diff --git a/salt/airgap/init.sls b/salt/airgap/init.sls
deleted file mode 100644
index 818bb3a3b..000000000
--- a/salt/airgap/init.sls
+++ /dev/null
@@ -1,71 +0,0 @@
-{% from 'allowed_states.map.jinja' import allowed_states %}
-{% if sls in allowed_states %}
-
-{% set MANAGER = salt['grains.get']('master') %}
-airgapyum:
- file.managed:
- - name: /etc/yum/yum.conf
- - source: salt://airgap/files/yum.conf
-
-airgap_repo:
- pkgrepo.managed:
- - humanname: Airgap Repo
- - baseurl: https://{{ MANAGER }}/repo
- - gpgcheck: 0
- - sslverify: 0
-
-agbase:
- file.absent:
- - name: /etc/yum.repos.d/CentOS-Base.repo
-
-agcr:
- file.absent:
- - name: /etc/yum.repos.d/CentOS-CR.repo
-
-agdebug:
- file.absent:
- - name: /etc/yum.repos.d/CentOS-Debuginfo.repo
-
-agfasttrack:
- file.absent:
- - name: /etc/yum.repos.d/CentOS-fasttrack.repo
-
-agmedia:
- file.absent:
- - name: /etc/yum.repos.d/CentOS-Media.repo
-
-agsources:
- file.absent:
- - name: /etc/yum.repos.d/CentOS-Sources.repo
-
-agvault:
- file.absent:
- - name: /etc/yum.repos.d/CentOS-Vault.repo
-
-agkernel:
- file.absent:
- - name: /etc/yum.repos.d/CentOS-x86_64-kernel.repo
-
-agepel:
- file.absent:
- - name: /etc/yum.repos.d/epel.repo
-
-agtesting:
- file.absent:
- - name: /etc/yum.repos.d/epel-testing.repo
-
-agssrepo:
- file.absent:
- - name: /etc/yum.repos.d/saltstack.repo
-
-agwazrepo:
- file.absent:
- - name: /etc/yum.repos.d/wazuh.repo
-
-{% else %}
-
-{{sls}}_state_not_allowed:
- test.fail_without_changes:
- - name: {{sls}}_state_not_allowed
-
-{% endif %}
\ No newline at end of file
diff --git a/salt/common/files/99-reserved-ports.conf b/salt/common/files/99-reserved-ports.conf
index a846341a5..82eb03f79 100644
--- a/salt/common/files/99-reserved-ports.conf
+++ b/salt/common/files/99-reserved-ports.conf
@@ -1 +1 @@
-net.ipv4.ip_local_reserved_ports=55000,57314
+net.ipv4.ip_local_reserved_ports=55000,57314,47760-47860
\ No newline at end of file
diff --git a/salt/common/init.sls b/salt/common/init.sls
index 40df1230c..6d2a9aea7 100644
--- a/salt/common/init.sls
+++ b/salt/common/init.sls
@@ -49,6 +49,11 @@ sosaltstackperms:
- gid: 939
- dir_mode: 770
+so_log_perms:
+ file.directory:
+ - name: /opt/so/log
+ - dir_mode: 755
+
# Create a state directory
statedir:
file.directory:
@@ -64,21 +69,6 @@ salttmp:
- group: 939
- makedirs: True
-# Install epel
-{% if grains['os'] == 'CentOS' %}
-repair_yumdb:
- cmd.run:
- - name: 'mv -f /var/lib/rpm/__db* /tmp && yum clean all'
- - onlyif:
- - 'yum check-update 2>&1 | grep "Error: rpmdb open failed"'
-
-epel:
- pkg.installed:
- - skip_suggestions: True
- - pkgs:
- - epel-release
-{% endif %}
-
# Install common packages
{% if grains['os'] != 'CentOS' %}
commonpkgs:
@@ -238,6 +228,30 @@ commonlogrotateconf:
- month: '*'
- dayweek: '*'
+# Create the status directory
+sostatusdir:
+ file.directory:
+ - name: /opt/so/log/sostatus
+ - user: 0
+ - group: 0
+ - makedirs: True
+
+sostatus_log:
+ file.managed:
+ - name: /opt/so/log/sostatus/status.log
+ - mode: 644
+
+# Install sostatus check cron
+'/usr/sbin/so-status -q; echo $? > /opt/so/log/sostatus/status.log 2>&1':
+ cron.present:
+ - user: root
+ - minute: '*/1'
+ - hour: '*'
+ - daymonth: '*'
+ - month: '*'
+ - dayweek: '*'
+
+
{% if role in ['eval', 'manager', 'managersearch', 'standalone'] %}
# Lock permissions on the backup directory
backupdir:
@@ -274,9 +288,10 @@ docker:
- file: docker_daemon
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present
+# 55000 = Wazuh, 57314 = Strelka, 47760-47860 = Zeek
dockerapplyports:
cmd.run:
- - name: if [ ! -s /etc/sysctl.d/99-reserved-ports.conf ]; then sysctl -w net.ipv4.ip_local_reserved_ports="55000,57314"; fi
+ - name: if [ ! -s /etc/sysctl.d/99-reserved-ports.conf ]; then sysctl -w net.ipv4.ip_local_reserved_ports="55000,57314,47760-47860"; fi
# Reserve OS ports for Docker proxy
dockerreserveports:
diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common
index 676b908ce..97e61e6e2 100755
--- a/salt/common/tools/sbin/so-common
+++ b/salt/common/tools/sbin/so-common
@@ -162,6 +162,23 @@ get_random_value() {
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
}
+gpg_rpm_import() {
+ if [[ "$OS" == "centos" ]]; then
+ if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
+ local RPMKEYSLOC="../salt/repo/client/files/centos/keys"
+ else
+ local RPMKEYSLOC="$UPDATEDIR/salt/repo/client/files/centos/keys"
+ fi
+
+ RPMKEYS=('RPM-GPG-KEY-EPEL-7' 'GPG-KEY-WAZUH' 'docker.pub' 'SALTSTACK-GPG-KEY.pub' 'securityonion.pub')
+
+ for RPMKEY in "${RPMKEYS[@]}"; do
+ rpm --import $RPMKEYSLOC/$RPMKEY
+ echo "Imported $RPMKEY"
+ done
+ fi
+}
+
header() {
printf '%s\n' "" "$banner" " $*" "$banner"
}
@@ -419,6 +436,20 @@ valid_proxy() {
[[ $has_prefix == true ]] && [[ $valid_url == true ]] && return 0 || return 1
}
+valid_ntp_list() {
+ local string=$1
+ local ntp_arr
+ IFS="," read -r -a ntp_arr <<< "$string"
+
+ for ntp in "${ntp_arr[@]}"; do
+ if ! valid_ip4 "$ntp" && ! valid_hostname "$ntp" && ! valid_fqdn "$ntp"; then
+ return 1
+ fi
+ done
+
+ return 0
+}
+
valid_string() {
local str=$1
local min_length=${2:-1}
diff --git a/salt/common/tools/sbin/so-docker-prune b/salt/common/tools/sbin/so-docker-prune
index 5a56f506d..f6c043ef3 100755
--- a/salt/common/tools/sbin/so-docker-prune
+++ b/salt/common/tools/sbin/so-docker-prune
@@ -60,15 +60,19 @@ def main(quiet):
no_prunable = True
for t_list in grouped_tag_lists:
try:
- # Keep the 2 most current images
+ # Group tags by version, in case multiple images exist with the same version string
t_list.sort(key=lambda x: Version(get_image_version(x)), reverse=True)
- if len(t_list) <= 2:
+ grouped_t_list = [ list(it) for _,it in groupby(t_list, lambda x: get_image_version(x)) ]
+
+ # Keep the 2 most current version groups
+ if len(grouped_t_list) <= 2:
continue
else:
no_prunable = False
- for tag in t_list[2:]:
- if not quiet: print(f'Removing image {tag}')
- client.images.remove(tag)
+ for group in grouped_t_list[2:]:
+ for tag in group:
+ if not quiet: print(f'Removing image {tag}')
+ client.images.remove(tag)
except InvalidVersion as e:
print(f'so-{get_so_image_basename(t_list[0])}: {e.args[0]}', file=sys.stderr)
exit(1)
diff --git a/salt/common/tools/sbin/so-elasticsearch-indices-list b/salt/common/tools/sbin/so-elasticsearch-indices-list
new file mode 100755
index 000000000..c9df67a25
--- /dev/null
+++ b/salt/common/tools/sbin/so-elasticsearch-indices-list
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see
+{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
+
+. /usr/sbin/so-common
+
+curl -s -k -L https://{{ NODEIP }}:9200/_cat/indices?pretty
diff --git a/salt/common/tools/sbin/so-elasticsearch-pipeline-view b/salt/common/tools/sbin/so-elasticsearch-pipeline-view
new file mode 100755
index 000000000..04901e122
--- /dev/null
+++ b/salt/common/tools/sbin/so-elasticsearch-pipeline-view
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see
+{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
+
+. /usr/sbin/so-common
+
+if [ "$1" == "" ]; then
+ curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq .
+else
+ curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq .
+fi
diff --git a/salt/common/tools/sbin/so-elasticsearch-shards-list b/salt/common/tools/sbin/so-elasticsearch-shards-list
new file mode 100755
index 000000000..9d28ed95b
--- /dev/null
+++ b/salt/common/tools/sbin/so-elasticsearch-shards-list
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see
+{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
+
+. /usr/sbin/so-common
+
+curl -s -k -L https://{{ NODEIP }}:9200/_cat/shards?pretty
diff --git a/salt/common/tools/sbin/so-elasticsearch-template-remove b/salt/common/tools/sbin/so-elasticsearch-template-remove
new file mode 100755
index 000000000..f7c3e6812
--- /dev/null
+++ b/salt/common/tools/sbin/so-elasticsearch-template-remove
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see
+{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
+
+. /usr/sbin/so-common
+
+curl -s -k -L -XDELETE https://{{ NODEIP }}:9200/_template/$1
diff --git a/salt/common/tools/sbin/so-elasticsearch-template-view b/salt/common/tools/sbin/so-elasticsearch-template-view
new file mode 100755
index 000000000..c9f3ec199
--- /dev/null
+++ b/salt/common/tools/sbin/so-elasticsearch-template-view
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see
+{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
+
+. /usr/sbin/so-common
+
+if [ "$1" == "" ]; then
+ curl -s -k -L https://{{ NODEIP }}:9200/_template/* | jq .
+else
+ curl -s -k -L https://{{ NODEIP }}:9200/_template/$1 | jq .
+fi
diff --git a/salt/common/tools/sbin/so-index-list b/salt/common/tools/sbin/so-index-list
index dcfebbf58..cf9232150 100755
--- a/salt/common/tools/sbin/so-index-list
+++ b/salt/common/tools/sbin/so-index-list
@@ -15,4 +15,4 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-curl -X GET -k -L https://localhost:9200/_cat/indices?v
+curl -X GET -k -L "https://localhost:9200/_cat/indices?v&s=index"
diff --git a/salt/common/tools/sbin/so-kibana-space-defaults b/salt/common/tools/sbin/so-kibana-space-defaults
old mode 100644
new mode 100755
diff --git a/salt/common/tools/sbin/so-logstash-events b/salt/common/tools/sbin/so-logstash-events
new file mode 100755
index 000000000..817cafb72
--- /dev/null
+++ b/salt/common/tools/sbin/so-logstash-events
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see
+{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
+
+. /usr/sbin/so-common
+
+if [ "$1" == "" ]; then
+ for i in $(curl -s -L http://{{ NODEIP }}:9600/_node/stats | jq .pipelines | jq '. | to_entries | .[].key' | sed 's/\"//g'); do echo ${i^}:; curl -s localhost:9600/_node/stats | jq .pipelines.$i.events; done
+else
+ curl -s -L http://{{ NODEIP }}:9600/_node/stats | jq .pipelines.$1.events
+fi
diff --git a/salt/common/tools/sbin/so-logstash-pipeline-stats b/salt/common/tools/sbin/so-logstash-pipeline-stats
new file mode 100755
index 000000000..b82a125d2
--- /dev/null
+++ b/salt/common/tools/sbin/so-logstash-pipeline-stats
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see
+{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
+
+. /usr/sbin/so-common
+
+if [ "$1" == "" ]; then
+ curl -s -L http://{{ NODEIP }}:9600/_node/stats | jq .pipelines
+else
+ curl -s -L http://{{ NODEIP }}:9600/_node/stats | jq .pipelines.$1
+fi
diff --git a/salt/common/tools/sbin/so-playbook-sync b/salt/common/tools/sbin/so-playbook-sync
index 250e4a3ad..a76d398cb 100755
--- a/salt/common/tools/sbin/so-playbook-sync
+++ b/salt/common/tools/sbin/so-playbook-sync
@@ -17,4 +17,8 @@
. /usr/sbin/so-common
+# Check to see if we are already running
+IS_RUNNING=$(ps aux | pgrep -f "so-playbook-sync" | wc -l)
+[ "$IS_RUNNING" -gt 2 ] && echo "$(date) - Multiple Playbook Sync processes already running...exiting." && exit 0
+
docker exec so-soctopus python3 playbook_play-sync.py
diff --git a/salt/common/tools/sbin/so-raid-status b/salt/common/tools/sbin/so-raid-status
index d55d158fe..11909e012 100755
--- a/salt/common/tools/sbin/so-raid-status
+++ b/salt/common/tools/sbin/so-raid-status
@@ -66,11 +66,13 @@ mkdir -p /opt/so/log/raid
{%- if grains['sosmodel'] in ['SOSMN', 'SOSSNNV'] %}
#check_boss_raid
check_software_raid
-echo "osraid=$BOSSRAID nsmraid=$SWRAID" > /opt/so/log/raid/status.log
+#echo "osraid=$BOSSRAID nsmraid=$SWRAID" > /opt/so/log/raid/status.log
+echo "osraid=1 nsmraid=$SWRAID" > /opt/so/log/raid/status.log
{%- elif grains['sosmodel'] in ['SOS1000F', 'SOS1000', 'SOSSN7200', 'SOS10K', 'SOS4000'] %}
#check_boss_raid
check_lsi_raid
-echo "osraid=$BOSSRAID nsmraid=$LSIRAID" > /opt/so/log/raid/status.log
+#echo "osraid=$BOSSRAID nsmraid=$LSIRAID" > /opt/so/log/raid/status.log
+echo "osraid=1 nsmraid=$LSIRAID" > /opt/so/log/raid/status.log
{%- else %}
exit 0
{%- endif %}
diff --git a/salt/common/tools/sbin/so-sensor-clean b/salt/common/tools/sbin/so-sensor-clean
index 63f102f0c..e62c3c4da 100755
--- a/salt/common/tools/sbin/so-sensor-clean
+++ b/salt/common/tools/sbin/so-sensor-clean
@@ -115,7 +115,7 @@ clean() {
}
# Check to see if we are already running
-IS_RUNNING=$(ps aux | grep "so-sensor-clean" | grep -v grep | wc -l)
+IS_RUNNING=$(ps aux | pgrep -f "so-sensor-clean" | wc -l)
[ "$IS_RUNNING" -gt 2 ] && echo "$(date) - $IS_RUNNING sensor clean script processes running...exiting." >>$LOG && exit 0
if [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ]; then
diff --git a/salt/common/tools/sbin/so-ssh-harden b/salt/common/tools/sbin/so-ssh-harden
index 1fd7d58d9..2a057ff5e 100755
--- a/salt/common/tools/sbin/so-ssh-harden
+++ b/salt/common/tools/sbin/so-ssh-harden
@@ -4,90 +4,184 @@
if [[ $1 =~ ^(-q|--quiet) ]]; then
quiet=true
+elif [[ $1 =~ ^(-v|--verbose) ]]; then
+ verbose=true
fi
+sshd_config=/etc/ssh/sshd_config
+temp_config=/tmp/sshd_config
+
before=
after=
reload_required=false
+change_header_printed=false
-print_sshd_t() {
+check_sshd_t() {
local string=$1
- local state=$2
- echo "${state}:"
local grep_out
grep_out=$(sshd -T | grep "^${string}")
- if [[ $state == "Before" ]]; then
- before=$grep_out
+ before=$grep_out
+}
+
+print_diff() {
+ local diff
+ diff=$(diff -dbB <(echo $before) <(echo $after) | awk 'NR>1')
+
+ if [[ -n $diff ]]; then
+ if [[ $change_header_printed == false ]]; then
+ printf '%s\n' '' "Changes" '-------' ''
+ change_header_printed=true
+ fi
+ echo -e "$diff\n"
+ fi
+}
+
+replace_or_add() {
+ local type=$1
+ local string=$2
+ if grep -q "$type" $temp_config; then
+ sed -i "/$type .*/d" $temp_config
+ fi
+ printf "%s\n\n" "$string" >> $temp_config
+ reload_required=true
+}
+
+test_config() {
+ local msg
+ msg=$(sshd -t -f $temp_config)
+ local ret=$?
+
+ if [[ -n $msg ]]; then
+ echo "Error found in temp sshd config:"
+ echo $msg
+ fi
+
+ return $ret
+}
+
+main() {
+ if ! [[ $quiet ]]; then echo "Copying current config to $temp_config"; fi
+ cp $sshd_config $temp_config
+
+ # Add newline to ssh for legibility
+ echo "" >> $temp_config
+
+ # Ciphers
+ check_sshd_t "ciphers"
+
+ local bad_ciphers=(
+ "3des-cbc"
+ "aes128-cbc"
+ "aes192-cbc"
+ "aes256-cbc"
+ "arcfour"
+ "arcfour128"
+ "arcfour256"
+ "blowfish-cbc"
+ "cast128-cbc"
+ )
+
+ local cipher_string=$before
+ for cipher in "${bad_ciphers[@]}"; do
+ cipher_string=$(echo "$cipher_string" | sed "s/${cipher}\(,\|\$\)//g" | sed 's/,$//')
+ done
+
+ after=$cipher_string
+
+ if [[ $verbose ]]; then print_diff; fi
+
+ if [[ $before != "$after" ]]; then
+ replace_or_add "ciphers" "$cipher_string" && test_config || exit 1
+ fi
+
+ # KexAlgorithms
+ check_sshd_t "kexalgorithms"
+
+ local bad_kexalgs=(
+ "diffie-hellman-group-exchange-sha1"
+ "diffie-hellman-group-exchange-sha256"
+ "diffie-hellman-group1-sha1"
+ "diffie-hellman-group14-sha1"
+ "ecdh-sha2-nistp256"
+ "ecdh-sha2-nistp521"
+ "ecdh-sha2-nistp384"
+ )
+
+ local kexalg_string=$before
+ for kexalg in "${bad_kexalgs[@]}"; do
+ kexalg_string=$(echo "$kexalg_string" | sed "s/${kexalg}\(,\|\$\)//g" | sed 's/,$//')
+ done
+
+ after=$kexalg_string
+
+ if [[ $verbose ]]; then print_diff; fi
+
+ if [[ $before != "$after" ]]; then
+ replace_or_add "kexalgorithms" "$kexalg_string" && test_config || exit 1
+ fi
+
+ # Macs
+ check_sshd_t "macs"
+
+ local bad_macs=(
+ "hmac-sha2-512"
+ "umac-128@openssh.com"
+ "hmac-sha2-256"
+ "umac-64@openssh.com"
+ "hmac-sha1"
+ "hmac-sha1-etm@openssh.com"
+ "umac-64-etm@openssh.com"
+ )
+
+ local macs_string=$before
+ for mac in "${bad_macs[@]}"; do
+ macs_string=$(echo "$macs_string" | sed "s/${mac}\(,\|\$\)//g" | sed 's/,$//')
+ done
+
+ after=$macs_string
+
+ if [[ $verbose ]]; then print_diff; fi
+
+ if [[ $before != "$after" ]]; then
+ replace_or_add "macs" "$macs_string" && test_config || exit 1
+ fi
+
+ # HostKeyAlgorithms
+ check_sshd_t "hostkeyalgorithms"
+
+ local optional_suffix_regex_hka="\(-cert-v01@openssh.com\)\?"
+ local bad_hostkeyalg_list=(
+ "ecdsa-sha2-nistp256"
+ "ecdsa-sha2-nistp384"
+ "ecdsa-sha2-nistp521"
+ "ssh-rsa"
+ "ssh-dss"
+ )
+
+ local hostkeyalg_string=$before
+ for alg in "${bad_hostkeyalg_list[@]}"; do
+ hostkeyalg_string=$(echo "$hostkeyalg_string" | sed "s/${alg}${optional_suffix_regex_hka}\(,\|\$\)//g" | sed 's/,$//')
+ done
+
+ after=$hostkeyalg_string
+
+ if [[ $verbose ]]; then print_diff; fi
+
+ if [[ $before != "$after" ]]; then
+ replace_or_add "hostkeyalgorithms" "$hostkeyalg_string" && test_config || exit 1
+ fi
+
+ if [[ $reload_required == true ]]; then
+ mv -f $temp_config $sshd_config
+ if ! [[ $quiet ]]; then echo "Reloading sshd to load config changes"; fi
+ systemctl reload sshd
+ echo "[ WARNING ] Any new ssh sessions will need to remove and reaccept the host key fingerprint for this server before reconnecting."
else
- after=$grep_out
- fi
-
- echo $grep_out
-}
-
-print_msg() {
- local msg=$1
- if ! [[ $quiet ]]; then
- printf "%s\n" \
- "----" \
- "$msg" \
- "----" \
- ""
+ if ! [[ $quiet ]]; then echo "No changes made to temp file, cleaning up"; fi
+ rm -f $temp_config
fi
}
-if ! [[ $quiet ]]; then print_sshd_t "ciphers" "Before"; fi
-sshd -T | grep "^ciphers" | sed -e "s/\(3des-cbc\|aes128-cbc\|aes192-cbc\|aes256-cbc\|arcfour\|arcfour128\|arcfour256\|blowfish-cbc\|cast128-cbc\|rijndael-cbc@lysator.liu.se\)\,\?//g" >> /etc/ssh/sshd_config
-if ! [[ $quiet ]]; then
- print_sshd_t "ciphers" "After"
- echo ""
-fi
-
-if [[ $before != $after ]]; then
- reload_required=true
-fi
-
-if ! [[ $quiet ]]; then print_sshd_t "kexalgorithms" "Before"; fi
-sshd -T | grep "^kexalgorithms" | sed -e "s/\(diffie-hellman-group14-sha1\|ecdh-sha2-nistp256\|diffie-hellman-group-exchange-sha256\|diffie-hellman-group1-sha1\|diffie-hellman-group-exchange-sha1\|ecdh-sha2-nistp521\|ecdh-sha2-nistp384\)\,\?//g" >> /etc/ssh/sshd_config
-if ! [[ $quiet ]]; then
- print_sshd_t "kexalgorithms" "After"
- echo ""
-fi
-
-if [[ $before != $after ]]; then
- reload_required=true
-fi
-
-if ! [[ $quiet ]]; then print_sshd_t "macs" "Before"; fi
-sshd -T | grep "^macs" | sed -e "s/\(hmac-sha2-512,\|umac-128@openssh.com,\|hmac-sha2-256,\|umac-64@openssh.com,\|hmac-sha1,\|hmac-sha1-etm@openssh.com,\|umac-64-etm@openssh.com,\|hmac-sha1\)//g" >> /etc/ssh/sshd_config
-if ! [[ $quiet ]]; then
- print_sshd_t "macs" "After"
- echo ""
-fi
-
-if [[ $before != $after ]]; then
- reload_required=true
-fi
-
-if ! [[ $quiet ]]; then print_sshd_t "hostkeyalgorithms" "Before"; fi
-sshd -T | grep "^hostkeyalgorithms" | sed "s|ecdsa-sha2-nistp256,||g" | sed "s|ssh-rsa,||g" >> /etc/ssh/sshd_config
-if ! [[ $quiet ]]; then
- print_sshd_t "hostkeyalgorithms" "After"
- echo ""
-fi
-
-if [[ $before != $after ]]; then
- reload_required=true
-fi
-
-if [[ $reload_required == true ]]; then
- print_msg "Reloading sshd to load config changes..."
- systemctl reload sshd
-fi
-
-{% if grains['os'] != 'CentOS' %}
-print_msg "[ WARNING ] Any new ssh sessions will need to remove and reaccept the ECDSA key for this server before reconnecting."
-{% endif %}
-
+main
diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup
index 6ff298770..2a1ddab1c 100755
--- a/salt/common/tools/sbin/soup
+++ b/salt/common/tools/sbin/soup
@@ -24,6 +24,7 @@ INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'})
DEFAULT_SALT_DIR=/opt/so/saltstack/default
BATCHSIZE=5
SOUP_LOG=/root/soup.log
+WHATWOULDYOUSAYYAHDOHERE=soup
add_common() {
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
@@ -229,6 +230,13 @@ masterunlock() {
fi
}
+preupgrade_changes_2.3.50_repo() {
+ # We made repo changes in 2.3.50 and this prepares for that on upgrade
+ echo "Checking to see if 2.3.50 repo changes are needed."
+
+ [[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_2.3.3X_to_2.3.50_repo
+}
+
preupgrade_changes() {
# This function is to add any new pillar items if needed.
echo "Checking to see if changes are needed."
@@ -238,6 +246,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" =~ rc.3 ]] && rc3_to_2.3.0
[[ "$INSTALLEDVERSION" == 2.3.0 || "$INSTALLEDVERSION" == 2.3.1 || "$INSTALLEDVERSION" == 2.3.2 || "$INSTALLEDVERSION" == 2.3.10 ]] && up_2.3.0_to_2.3.20
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_2.3.2X_to_2.3.30
+ [[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_2.3.3X_to_2.3.50
}
postupgrade_changes() {
@@ -408,6 +417,36 @@ up_2.3.2X_to_2.3.30() {
sed -i "/^strelka:/a \\ repos: \n - https://github.com/Neo23x0/signature-base" /opt/so/saltstack/local/pillar/global.sls;
fi
check_log_size_limit
+ INSTALLEDVERSION=2.3.30
+}
+
+up_2.3.3X_to_2.3.50_repo() {
+ echo "Performing 2.3.50 repo actions."
+ if [[ "$OS" == "centos" ]]; then
+ # Import GPG Keys
+ gpg_rpm_import
+
+ if [ $is_airgap -eq 1 ]; then
+ echo "Deleting unneeded repo files."
+ DELREPOS=('CentOS-Base' 'CentOS-CR' 'CentOS-Debuginfo' 'docker-ce' 'CentOS-fasttrack' 'CentOS-Media' 'CentOS-Sources' 'CentOS-Vault' 'CentOS-x86_64-kernel' 'epel' 'epel-testing' 'saltstack' 'wazuh')
+
+ for DELREPO in "${DELREPOS[@]}"; do
+ if [[ -f "/etc/yum.repos.d/$DELREPO.repo" ]]; then
+ echo "Deleting $DELREPO.repo"
+ rm -f "/etc/yum.repos.d/$DELREPO.repo"
+ fi
+ done
+
+ # Copy the new repo file if not airgap
+ cp $UPDATE_DIR/salt/repo/client/files/centos/securityonion.repo /etc/yum.repos.d/
+ yum clean all
+ yum repolist
+ fi
+ fi
+}
+
+up_2.3.3X_to_2.3.50() {
+ INSTALLEDVERSION=2.3.50
}
verify_upgradespace() {
@@ -502,22 +541,18 @@ upgrade_salt() {
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo ""
# If CentOS
- if [ "$OS" == "centos" ]; then
+ if [[ $OS == 'centos' ]]; then
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt-*"
echo "Updating Salt packages and restarting services."
echo ""
- if [ $is_airgap -eq 0 ]; then
- sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -r -F -M -x python3 stable "$NEWSALTVERSION"
- else
- sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
- fi
+ sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -r -F -M -x python3 stable "$NEWSALTVERSION"
echo "Applying yum versionlock for Salt."
echo ""
yum versionlock add "salt-*"
# Else do Ubuntu things
- elif [ "$OS" == "ubuntu" ]; then
+ elif [[ $OS == 'ubuntu' ]]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
@@ -628,6 +663,7 @@ else
update_registry
update_docker_containers "soup"
fi
+
echo ""
echo "Stopping Salt Minion service."
systemctl stop salt-minion
@@ -638,6 +674,8 @@ echo "Stopping Salt Master service."
systemctl stop salt-master
echo ""
+preupgrade_changes_2.3.50_repo
+
# Does salt need upgraded. If so update it.
if [ "$UPGRADESALT" == "1" ]; then
echo "Upgrading Salt"
@@ -652,7 +690,8 @@ fi
echo "Checking if Salt was upgraded."
echo ""
# Check that Salt was upgraded
-if [[ $(salt --versions-report | grep Salt: | awk {'print $2'}) != "$NEWSALTVERSION" ]]; then
+SALTVERSIONPOSTUPGRADE=$(salt --versions-report | grep Salt: | awk {'print $2'})
+if [[ "$SALTVERSIONPOSTUPGRADE" != "$NEWSALTVERSION" ]]; then
echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG."
echo "Once the issue is resolved, run soup again."
echo "Exiting."
diff --git a/salt/curator/files/bin/so-curator-closed-delete-delete b/salt/curator/files/bin/so-curator-closed-delete-delete
index 58433ee1a..9cc94833c 100755
--- a/salt/curator/files/bin/so-curator-closed-delete-delete
+++ b/salt/curator/files/bin/so-curator-closed-delete-delete
@@ -34,7 +34,7 @@ overlimit() {
closedindices() {
- INDICES=$(curl -s -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed 2> /dev/null)
+ INDICES=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed 2> /dev/null)
[ $? -eq 1 ] && return false
echo ${INDICES} | grep -q -E "(logstash-|so-)"
}
@@ -49,12 +49,12 @@ while overlimit && closedindices; do
# First, get the list of closed indices using _cat/indices?h=index\&expand_wildcards=closed.
# Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field.
# Finally, select the first entry in that sorted list.
- OLDEST_INDEX=$(curl -s -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1)
+ OLDEST_INDEX=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1)
# Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
- curl -XDELETE -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
+ curl -XDELETE -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
# Finally, write a log entry that says we deleted it.
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OLDEST_INDEX} deleted ..." >> ${LOG}
-done
\ No newline at end of file
+done
diff --git a/salt/elastalert/files/modules/so/playbook-es.py b/salt/elastalert/files/modules/so/playbook-es.py
index c10a80f2c..ab2327ab7 100644
--- a/salt/elastalert/files/modules/so/playbook-es.py
+++ b/salt/elastalert/files/modules/so/playbook-es.py
@@ -17,7 +17,7 @@ class PlaybookESAlerter(Alerter):
def alert(self, matches):
for match in matches:
today = strftime("%Y.%m.%d", gmtime())
- timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S", gmtime())
+ timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime())
headers = {"Content-Type": "application/json"}
payload = {"rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
url = f"https://{self.rule['elasticsearch_host']}/so-playbook-alerts-{today}/_doc/"
diff --git a/salt/elasticsearch/files/ingest/osquery.query_result b/salt/elasticsearch/files/ingest/osquery.query_result
index 9bb381946..a58df1315 100644
--- a/salt/elasticsearch/files/ingest/osquery.query_result
+++ b/salt/elasticsearch/files/ingest/osquery.query_result
@@ -9,6 +9,7 @@
{ "rename": { "if": "!(ctx.error?.eventdata_parsing == true)", "field": "unparsed.EventData", "target_field": "winlog.event_data", "ignore_missing": true, "ignore_failure": true } },
{ "rename": { "field": "winlog.source", "target_field": "winlog.channel", "ignore_missing": true } },
{ "rename": { "field": "winlog.eventid", "target_field": "winlog.event_id", "ignore_missing": true } },
+ { "rename": { "field": "winlog.datetime", "target_field": "winlog.systemTime", "ignore_missing": true } },
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
{ "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational' && ctx.containsKey('winlog')", "name":"win.eventlogs" } },
{ "set": { "field": "event.module", "value": "osquery", "override": false } },
diff --git a/salt/elasticsearch/files/ingest/win.eventlogs b/salt/elasticsearch/files/ingest/win.eventlogs
index 2644be7a2..a6ef87256 100644
--- a/salt/elasticsearch/files/ingest/win.eventlogs
+++ b/salt/elasticsearch/files/ingest/win.eventlogs
@@ -4,8 +4,8 @@
{ "set": { "if": "ctx.winlog?.channel != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } },
{ "set": { "if": "ctx.winlog?.channel != null", "field": "event.dataset", "value": "{{winlog.channel}}", "override": true } },
{ "set": { "if": "ctx.winlog?.computer_name != null", "field": "observer.name", "value": "{{winlog.computer_name}}", "override": true } },
- { "rename": { "if": "ctx.winlog?.systemTime != null", "field": "@timestamp", "target_field": "ingest.timestamp", "ignore_missing": true } },
- { "set": { "if": "ctx.winlog?.systemTime != null", "field": "@timestamp", "value": "{{winlog.systemTime}}", "override": true } },
+ { "rename": { "if": "ctx.winlog?.systemTime != null", "field": "@timestamp", "target_field": "event.ingested", "ignore_missing": true } },
+ { "date": { "if": "ctx.winlog?.systemTime != null", "field": "winlog.systemTime", "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSSSSSSS'Z'","yyyy-MM-dd'T'HH:mm:ss.SSSSSSS'Z'"] } },
{ "set": { "field": "event.code", "value": "{{winlog.event_id}}", "override": true } },
{ "set": { "field": "event.category", "value": "host", "override": true } },
{ "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_failure": true, "ignore_missing": true } },
diff --git a/salt/elasticsearch/templates/so/so-common-template.json b/salt/elasticsearch/templates/so/so-common-template.json
index ebf123fed..c1f0a6755 100644
--- a/salt/elasticsearch/templates/so/so-common-template.json
+++ b/salt/elasticsearch/templates/so/so-common-template.json
@@ -267,9 +267,14 @@
},
"ingest":{
"type":"object",
- "dynamic": true
+ "dynamic": true,
+ "properties":{
+ "timestamp":{
+ "type":"date"
+ }
+ }
},
- "intel":{
+ "intel":{
"type":"object",
"dynamic": true,
"properties":{
diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml
index c680d61c1..0f7c9c778 100644
--- a/salt/filebeat/etc/filebeat.yml
+++ b/salt/filebeat/etc/filebeat.yml
@@ -493,12 +493,13 @@ setup.template.enabled: false
# append ?pretty to the URL.
# Defines if the HTTP endpoint is enabled.
-#http.enabled: false
+http.enabled: true
# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost.
-#http.host: localhost
+http.host: 0.0.0.0
# Port on which the HTTP endpoint will bind. Default is 5066.
+http.port: 5066
queue.mem.events: {{ FBMEMEVENTS }}
queue.mem.flush.min_events: {{ FBMEMFLUSHMINEVENTS }}
diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls
index 339d307ee..64cdc47fc 100644
--- a/salt/filebeat/init.sls
+++ b/salt/filebeat/init.sls
@@ -74,6 +74,7 @@ so-filebeat:
- port_bindings:
- 0.0.0.0:514:514/udp
- 0.0.0.0:514:514/tcp
+ - 0.0.0.0:5066:5066/tcp
- watch:
- file: /opt/so/conf/filebeat/etc/filebeat.yml
diff --git a/salt/firewall/map.jinja b/salt/firewall/map.jinja
index 2df668a07..496e6f568 100644
--- a/salt/firewall/map.jinja
+++ b/salt/firewall/map.jinja
@@ -18,14 +18,18 @@
{# This block translate the portgroups defined in the pillar to what is defined my portgroups.yaml and portgroups.local.yaml #}
{% if salt['pillar.get']('firewall:assigned_hostgroups:chain') %}
+ {% set translated_pillar_assigned_hostgroups = {'chain': {}} %}
{% for chain, hg in salt['pillar.get']('firewall:assigned_hostgroups:chain').items() %}
{% for pillar_hostgroup, pillar_portgroups in salt['pillar.get']('firewall:assigned_hostgroups:chain')[chain].hostgroups.items() %}
- {% do translated_pillar_assigned_hostgroups.update({"chain": {chain: {"hostgroups": {pillar_hostgroup: {"portgroups": []}}}}}) %}
+ {% if translated_pillar_assigned_hostgroups.chain[chain] is defined %}
+ {% do translated_pillar_assigned_hostgroups.chain[chain].hostgroups.update({pillar_hostgroup: {"portgroups": []}}) %}
+ {% else %}
+ {% do translated_pillar_assigned_hostgroups.chain.update({chain: {"hostgroups": {pillar_hostgroup: {"portgroups": []}}}}) %}
+ {% endif %}
{% for pillar_portgroup in pillar_portgroups.portgroups %}
{% set pillar_portgroup = pillar_portgroup.split('.') | last %}
{% do translated_pillar_assigned_hostgroups.chain[chain].hostgroups[pillar_hostgroup].portgroups.append(defined_portgroups[pillar_portgroup]) %}
-
{% endfor %}
{% endfor %}
{% endfor %}
@@ -39,7 +43,6 @@
{% set assigned_hostgroups = default_assigned_hostgroups.role[role] %}
{% endif %}
-
{% if translated_pillar_assigned_hostgroups %}
{% do salt['defaults.merge'](assigned_hostgroups, translated_pillar_assigned_hostgroups, merge_lists=True, in_place=True) %}
{% endif %}
\ No newline at end of file
diff --git a/salt/firewall/portgroups.yaml b/salt/firewall/portgroups.yaml
index 55a09c6bf..1386267f5 100644
--- a/salt/firewall/portgroups.yaml
+++ b/salt/firewall/portgroups.yaml
@@ -18,6 +18,9 @@ firewall:
beats_5644:
tcp:
- 5644
+ beats_5066:
+ tcp:
+ - 5066
cortex:
tcp:
- 9001
diff --git a/salt/influxdb/etc/influxdb.conf b/salt/influxdb/etc/influxdb.conf
index 86c1ccfe8..9d89ca774 100644
--- a/salt/influxdb/etc/influxdb.conf
+++ b/salt/influxdb/etc/influxdb.conf
@@ -233,7 +233,7 @@
# enabled = true
# Determines whether the Flux query endpoint is enabled.
- # flux-enabled = false
+ flux-enabled = true
# The bind address used by the HTTP service.
# bind-address = ":8086"
diff --git a/salt/manager/files/acng/acng.conf b/salt/manager/files/acng/acng.conf
index a37d898af..3492cf111 100644
--- a/salt/manager/files/acng/acng.conf
+++ b/salt/manager/files/acng/acng.conf
@@ -20,6 +20,7 @@ Remap-npm: registry.npmjs.org
Remap-node: nodejs.org
Remap-apache: file:apache_mirrors ; file:backends_apache.us
Remap-salt: repo.saltstack.com; https://repo.saltstack.com
+Remap-securityonion: http://repocache.securityonion.net ; file:securityonion
# Remap-secdeb: security.debian.org
ReportPage: acng-report.html
# SocketPath:/var/run/apt-cacher-ng/socket
@@ -79,7 +80,7 @@ RedirMax: 6
VfileUseRangeOps: 0
# PassThroughPattern: private-ppa\.launchpad\.net:443$
# PassThroughPattern: .* # this would allow CONNECT to everything
-PassThroughPattern: (download\.docker\.com:443|mirrors\.fedoraproject\.org:443|packages\.wazuh\.com:443|repo\.saltstack\.com:443|yum\.dockerproject\.org:443|download\.docker\.com:443|registry\.npmjs\.org:443|registry\.yarnpkg\.com:443)$ # yarn/npm pkg, cant to http :/
+PassThroughPattern: (repo\.securityonion\.net:443|download\.docker\.com:443|mirrors\.fedoraproject\.org:443|packages\.wazuh\.com:443|repo\.saltstack\.com:443|yum\.dockerproject\.org:443|download\.docker\.com:443|registry\.npmjs\.org:443|registry\.yarnpkg\.com:443)$ # yarn/npm pkg, cant to http :/
# ResponseFreezeDetectTime: 500
# ReuseConnections: 1
# PipelineDepth: 255
diff --git a/salt/nginx/etc/nginx.conf b/salt/nginx/etc/nginx.conf
index 25e8bc11f..ea820442b 100644
--- a/salt/nginx/etc/nginx.conf
+++ b/salt/nginx/etc/nginx.conf
@@ -157,7 +157,7 @@ http {
ssl_prefer_server_ciphers on;
ssl_protocols TLSv1.2;
- location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
+ location ~* (^/login/.*|^/js/.*|^/css/.*|^/images/.*) {
proxy_pass http://{{ manager_ip }}:9822;
proxy_read_timeout 90;
proxy_connect_timeout 90;
diff --git a/salt/airgap/files/yum.conf b/salt/repo/client/files/centos/airgap/yum.conf
similarity index 100%
rename from salt/airgap/files/yum.conf
rename to salt/repo/client/files/centos/airgap/yum.conf
diff --git a/salt/repo/client/files/centos/keys/GPG-KEY-WAZUH b/salt/repo/client/files/centos/keys/GPG-KEY-WAZUH
new file mode 100644
index 000000000..b424ccfae
--- /dev/null
+++ b/salt/repo/client/files/centos/keys/GPG-KEY-WAZUH
@@ -0,0 +1,52 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v2.0.22 (GNU/Linux)
+
+mQINBFeeyYwBEACyf4VwV8c2++J5BmCl6ofLCtSIW3UoVrF4F+P19k/0ngnSfjWb
+8pSWB11HjZ3Mr4YQeiD7yY06UZkrCXk+KXDlUjMK3VOY7oNPkqzNaP6+8bDwj4UA
+hADMkaXBvWooGizhCoBtDb1bSbHKcAnQ3PTdiuaqF5bcyKk8hv939CHulL2xH+BP
+mmTBi+PM83pwvR+VRTOT7QSzf29lW1jD79v4rtXHJs4KCz/amT/nUm/tBpv3q0sT
+9M9rH7MTQPdqvzMl122JcZST75GzFJFl0XdSHd5PAh2mV8qYak5NYNnwA41UQVIa
++xqhSu44liSeZWUfRdhrQ/Nb01KV8lLAs11Sz787xkdF4ad25V/Rtg/s4UXt35K3
+klGOBwDnzPgHK/OK2PescI5Ve1z4x1C2bkGze+gk/3IcfGJwKZDfKzTtqkZ0MgpN
+7RGghjkH4wpFmuswFFZRyV+s7jXYpxAesElDSmPJ0O07O4lQXQMROE+a2OCcm0eF
+3+Cr6qxGtOp1oYMOVH0vOLYTpwOkAM12/qm7/fYuVPBQtVpTojjV5GDl2uGq7p0o
+h9hyWnLeNRbAha0px6rXcF9wLwU5n7mH75mq5clps3sP1q1/VtP/Fr84Lm7OGke4
+9eD+tPNCdRx78RNWzhkdQxHk/b22LCn1v6p1Q0qBco9vw6eawEkz1qwAjQARAQAB
+tDFXYXp1aC5jb20gKFdhenVoIFNpZ25pbmcgS2V5KSA8c3VwcG9ydEB3YXp1aC5j
+b20+iQI9BBMBCAAnAhsDBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheABQJZHNOBBQkU
+SgzvAAoJEJaz7l8pERFF6xUP/3SbcmrI/u7a2EqZ0GxwQ/LRkPzWkJRnozCtNYHD
+ZjiZgSB/+77hkPS0tsBK/GXFLKfJAuf13XFrCvEuI4Q/pLOCCKIGumKXItUIwJBD
+HiEmVt/XxIijmlF7O1jcWqE/5CQXofjr03WMx+qzNabIwU/6dTKZN4FrR1jDk7yS
+6FYBsbhVcSoqSpGYx7EcuK3c3sKKtnbacK2Sw3K9n8Wdj+EK83cbpMg8D/efVRqv
+xypeCeojtY10y4bmugEwMYPgFkrSbicuiZc8NA8qhvFp6JFRq/uL0PGACyg05wB3
+S9U4wvSkmlo2/G74awna22UlaoYmSSz3UZdpWd2zBxflx17948QfTqyhO6bM8qLz
+dSyR6/6olAcR1N+PBup8PoMdBte4ul/hJp8WIviW0AxJUTZSbVj5v/t43QAKEpCE
+IMHvkK8PRHz/9kMd/2xN7LgMtihCrGZOnzErkjhlZvmiJ6kcJoD7ywzFnfJrntOU
+DjNb3eqUFSEwmhD60Hd2OCkfmiV7NEE/YTd9B72NSwzj4Za/JUdlF64LMeIiHbYp
+Lh7P+mR+lMJf/SWsQmlyuiQ2u8SY2aDFvzBS9WtpwiznuUdrbRN87+TYLSVqDifj
+Ea3zOnzLaLYbOr6LHz1xbhAvInv7KLobgiw1E4WnBNWN8xVwVJLKNE7wV88k43XV
+3L/RuQINBFeeyYwBEADD1Y3zW5OrnYZ6ghTd5PXDAMB8Z1ienmnb2IUzLM+i0yE2
+TpKSP/XYCTBhFa390rYgFO2lbLDVsiz7Txd94nHrdWXGEQfwrbxsvdlLLWk7iN8l
+Fb4B60OfRi3yoR96a/kIPNa0x26+n79LtDuWZ/DTq5JSHztdd9F1sr3h8i5zYmtv
+luj99ZorpwYejbBVUm0+gP0ioaXM37uO56UFVQk3po9GaS+GtLnlgoE5volgNYyO
+rkeIua4uZVsifREkHCKoLJip6P7S3kTyfrpiSLhouEZ7kV1lbMbFgvHXyjm+/AIx
+HIBy+H+e+HNt5gZzTKUJsuBjx44+4jYsOR67EjOdtPOpgiuJXhedzShEO6rbu/O4
+wM1rX45ZXDYa2FGblHCQ/VaS0ttFtztk91xwlWvjTR8vGvp5tIfCi+1GixPRQpbN
+Y/oq8Kv4A7vB3JlJscJCljvRgaX0gTBzlaF6Gq0FdcWEl5F1zvsWCSc/Fv5WrUPY
+5mG0m69YUTeVO6cZS1aiu9Qh3QAT/7NbUuGXIaAxKnu+kkjLSz+nTTlOyvbG7BVF
+a6sDmv48Wqicebkc/rCtO4g8lO7KoA2xC/K/6PAxDrLkVyw8WPsAendmezNfHU+V
+32pvWoQoQqu8ysoaEYc/j9fN4H3mEBCN3QUJYCugmHP0pu7VtpWwwMUqcGeUVwAR
+AQABiQIlBBgBCAAPAhsMBQJZHNOaBQkUSg0HAAoJEJaz7l8pERFFhpkQAJ09mjjp
+n9f18JGSMzP41fVucPuLBZ5XJL/hy2boII1FvgfmOETzNxLPblHdkJVjZS5iMrhL
+EJ1jv+GQDtf68/0jO+HXuQIBmUJ53YwbuuQlLWH7CI2AxlSAKAn2kOApWMKsjnAv
+JwS3eNGukOKWRfEKTqz2Vwi1H7M7ppypZ9keoyAoSIWb61gm7rXbfT+tVBetHfrU
+EM5vz3AS3pJk6Yfqn10IZfiexXmsBD+SpJBNzMBsznCcWO2y4qZNLjFferBoizvV
+34UnZyd1bkSN0T/MKp8sgJwqDJBS72tH6ZIM8NNoy29aPDkeaa8XlhkWiBdRizqL
+BcxrV/1n3xdzfY9FX6s4KGudo+gYsVpY0mrpZU8jG8YUNLDXQTXnRo4CQOtRJJbA
+RFDoZfsDqToZftuEhIsk+MaKlyXoA0eIYqGe6lXa/jEwvViqLYubCNLu0+kgNQ3v
+hKF8Pf7eXFDAePw7guuvDvBOMQqBCaKCxsz1HoKRNYBEdUYrEQBJnX235Q4IsdI/
+GcQ/dvERJXaDCG8EPhnwc517EMUJDiJ1CxT4+VMHphmFbiVqmctz0upIj+D037Xk
+CcgxNte6LZorGRZ/l1MYINliGJKtCCFK7XGVPKiJ8zyGSyPj1FfwtBy5hUX3aQtm
+bvP0H2BRCKoelsbRENu58BkU6YhiUry7pVul
+=SJij
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/salt/repo/client/files/centos/keys/RPM-GPG-KEY-EPEL-7 b/salt/repo/client/files/centos/keys/RPM-GPG-KEY-EPEL-7
new file mode 100644
index 000000000..f205ede46
--- /dev/null
+++ b/salt/repo/client/files/centos/keys/RPM-GPG-KEY-EPEL-7
@@ -0,0 +1,29 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+mQINBFKuaIQBEAC1UphXwMqCAarPUH/ZsOFslabeTVO2pDk5YnO96f+rgZB7xArB
+OSeQk7B90iqSJ85/c72OAn4OXYvT63gfCeXpJs5M7emXkPsNQWWSju99lW+AqSNm
+jYWhmRlLRGl0OO7gIwj776dIXvcMNFlzSPj00N2xAqjMbjlnV2n2abAE5gq6VpqP
+vFXVyfrVa/ualogDVmf6h2t4Rdpifq8qTHsHFU3xpCz+T6/dGWKGQ42ZQfTaLnDM
+jToAsmY0AyevkIbX6iZVtzGvanYpPcWW4X0RDPcpqfFNZk643xI4lsZ+Y2Er9Yu5
+S/8x0ly+tmmIokaE0wwbdUu740YTZjCesroYWiRg5zuQ2xfKxJoV5E+Eh+tYwGDJ
+n6HfWhRgnudRRwvuJ45ztYVtKulKw8QQpd2STWrcQQDJaRWmnMooX/PATTjCBExB
+9dkz38Druvk7IkHMtsIqlkAOQMdsX1d3Tov6BE2XDjIG0zFxLduJGbVwc/6rIc95
+T055j36Ez0HrjxdpTGOOHxRqMK5m9flFbaxxtDnS7w77WqzW7HjFrD0VeTx2vnjj
+GqchHEQpfDpFOzb8LTFhgYidyRNUflQY35WLOzLNV+pV3eQ3Jg11UFwelSNLqfQf
+uFRGc+zcwkNjHh5yPvm9odR1BIfqJ6sKGPGbtPNXo7ERMRypWyRz0zi0twARAQAB
+tChGZWRvcmEgRVBFTCAoNykgPGVwZWxAZmVkb3JhcHJvamVjdC5vcmc+iQI4BBMB
+AgAiBQJSrmiEAhsPBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRBqL66iNSxk
+5cfGD/4spqpsTjtDM7qpytKLHKruZtvuWiqt5RfvT9ww9GUUFMZ4ZZGX4nUXg49q
+ixDLayWR8ddG/s5kyOi3C0uX/6inzaYyRg+Bh70brqKUK14F1BrrPi29eaKfG+Gu
+MFtXdBG2a7OtPmw3yuKmq9Epv6B0mP6E5KSdvSRSqJWtGcA6wRS/wDzXJENHp5re
+9Ism3CYydpy0GLRA5wo4fPB5uLdUhLEUDvh2KK//fMjja3o0L+SNz8N0aDZyn5Ax
+CU9RB3EHcTecFgoy5umRj99BZrebR1NO+4gBrivIfdvD4fJNfNBHXwhSH9ACGCNv
+HnXVjHQF9iHWApKkRIeh8Fr2n5dtfJEF7SEX8GbX7FbsWo29kXMrVgNqHNyDnfAB
+VoPubgQdtJZJkVZAkaHrMu8AytwT62Q4eNqmJI1aWbZQNI5jWYqc6RKuCK6/F99q
+thFT9gJO17+yRuL6Uv2/vgzVR1RGdwVLKwlUjGPAjYflpCQwWMAASxiv9uPyYPHc
+ErSrbRG0wjIfAR3vus1OSOx3xZHZpXFfmQTsDP7zVROLzV98R3JwFAxJ4/xqeON4
+vCPFU6OsT3lWQ8w7il5ohY95wmujfr6lk89kEzJdOTzcn7DBbUru33CQMGKZ3Evt
+RjsC7FDbL017qxS+ZVA/HGkyfiu4cpgV8VUnbql5eAZ+1Ll6Dw==
+=hdPa
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/salt/repo/client/files/centos/keys/SALTSTACK-GPG-KEY.pub b/salt/repo/client/files/centos/keys/SALTSTACK-GPG-KEY.pub
new file mode 100644
index 000000000..14bd7d98c
--- /dev/null
+++ b/salt/repo/client/files/centos/keys/SALTSTACK-GPG-KEY.pub
@@ -0,0 +1,31 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v2
+
+mQENBFOpvpgBCADkP656H41i8fpplEEB8IeLhugyC2rTEwwSclb8tQNYtUiGdna9
+m38kb0OS2DDrEdtdQb2hWCnswxaAkUunb2qq18vd3dBvlnI+C4/xu5ksZZkRj+fW
+tArNR18V+2jkwcG26m8AxIrT+m4M6/bgnSfHTBtT5adNfVcTHqiT1JtCbQcXmwVw
+WbqS6v/LhcsBE//SHne4uBCK/GHxZHhQ5jz5h+3vWeV4gvxS3Xu6v1IlIpLDwUts
+kT1DumfynYnnZmWTGc6SYyIFXTPJLtnoWDb9OBdWgZxXfHEcBsKGha+bXO+m2tHA
+gNneN9i5f8oNxo5njrL8jkCckOpNpng18BKXABEBAAG0MlNhbHRTdGFjayBQYWNr
+YWdpbmcgVGVhbSA8cGFja2FnaW5nQHNhbHRzdGFjay5jb20+iQE4BBMBAgAiBQJT
+qb6YAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRAOCKFJ3le/vhkqB/0Q
+WzELZf4d87WApzolLG+zpsJKtt/ueXL1W1KA7JILhXB1uyvVORt8uA9FjmE083o1
+yE66wCya7V8hjNn2lkLXboOUd1UTErlRg1GYbIt++VPscTxHxwpjDGxDB1/fiX2o
+nK5SEpuj4IeIPJVE/uLNAwZyfX8DArLVJ5h8lknwiHlQLGlnOu9ulEAejwAKt9CU
+4oYTszYM4xrbtjB/fR+mPnYh2fBoQO4d/NQiejIEyd9IEEMd/03AJQBuMux62tjA
+/NwvQ9eqNgLw9NisFNHRWtP4jhAOsshv1WW+zPzu3ozoO+lLHixUIz7fqRk38q8Q
+9oNR31KvrkSNrFbA3D89uQENBFOpvpgBCADJ79iH10AfAfpTBEQwa6vzUI3Eltqb
+9aZ0xbZV8V/8pnuU7rqM7Z+nJgldibFk4gFG2bHCG1C5aEH/FmcOMvTKDhJSFQUx
+uhgxttMArXm2c22OSy1hpsnVG68G32Nag/QFEJ++3hNnbyGZpHnPiYgej3FrerQJ
+zv456wIsxRDMvJ1NZQB3twoCqwapC6FJE2hukSdWB5yCYpWlZJXBKzlYz/gwD/Fr
+GL578WrLhKw3UvnJmlpqQaDKwmV2s7MsoZogC6wkHE92kGPG2GmoRD3ALjmCvN1E
+PsIsQGnwpcXsRpYVCoW7e2nW4wUf7IkFZ94yOCmUq6WreWI4NggRcFC5ABEBAAGJ
+AR8EGAECAAkFAlOpvpgCGwwACgkQDgihSd5Xv74/NggA08kEdBkiWWwJZUZEy7cK
+WWcgjnRuOHd4rPeT+vQbOWGu6x4bxuVf9aTiYkf7ZjVF2lPn97EXOEGFWPZeZbH4
+vdRFH9jMtP+rrLt6+3c9j0M8SIJYwBL1+CNpEC/BuHj/Ra/cmnG5ZNhYebm76h5f
+T9iPW9fFww36FzFka4VPlvA4oB7ebBtquFg3sdQNU/MmTVV4jPFWXxh4oRDDR+8N
+1bcPnbB11b5ary99F/mqr7RgQ+YFF0uKRE3SKa7a+6cIuHEZ7Za+zhPaQlzAOZlx
+fuBmScum8uQTrEF5+Um5zkwC7EXTdH1co/+/V/fpOtxIg4XO4kcugZefVm5ERfVS
+MA==
+=dtMN
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/salt/repo/client/files/centos/keys/docker.pub b/salt/repo/client/files/centos/keys/docker.pub
new file mode 100644
index 000000000..1967cbf01
--- /dev/null
+++ b/salt/repo/client/files/centos/keys/docker.pub
@@ -0,0 +1,28 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBFit5IEBEADDt86QpYKz5flnCsOyZ/fk3WwBKxfDjwHf/GIflo+4GWAXS7wJ
+1PSzPsvSDATV10J44i5WQzh99q+lZvFCVRFiNhRmlmcXG+rk1QmDh3fsCCj9Q/yP
+w8jn3Hx0zDtz8PIB/18ReftYJzUo34COLiHn8WiY20uGCF2pjdPgfxE+K454c4G7
+gKFqVUFYgPug2CS0quaBB5b0rpFUdzTeI5RCStd27nHCpuSDCvRYAfdv+4Y1yiVh
+KKdoe3Smj+RnXeVMgDxtH9FJibZ3DK7WnMN2yeob6VqXox+FvKYJCCLkbQgQmE50
+uVK0uN71A1mQDcTRKQ2q3fFGlMTqJbbzr3LwnCBE6hV0a36t+DABtZTmz5O69xdJ
+WGdBeePCnWVqtDb/BdEYz7hPKskcZBarygCCe2Xi7sZieoFZuq6ltPoCsdfEdfbO
++VBVKJnExqNZCcFUTEnbH4CldWROOzMS8BGUlkGpa59Sl1t0QcmWlw1EbkeMQNrN
+spdR8lobcdNS9bpAJQqSHRZh3cAM9mA3Yq/bssUS/P2quRXLjJ9mIv3dky9C3udM
++q2unvnbNpPtIUly76FJ3s8g8sHeOnmYcKqNGqHq2Q3kMdA2eIbI0MqfOIo2+Xk0
+rNt3ctq3g+cQiorcN3rdHPsTRSAcp+NCz1QF9TwXYtH1XV24A6QMO0+CZwARAQAB
+tCtEb2NrZXIgUmVsZWFzZSAoQ0UgcnBtKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
+BBMBCgAhBQJYrep4AhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEMUv62ti
+Hp816C0P/iP+1uhSa6Qq3TIc5sIFE5JHxOO6y0R97cUdAmCbEqBiJHUPNQDQaaRG
+VYBm0K013Q1gcJeUJvS32gthmIvhkstw7KTodwOM8Kl11CCqZ07NPFef1b2SaJ7l
+TYpyUsT9+e343ph+O4C1oUQw6flaAJe+8ATCmI/4KxfhIjD2a/Q1voR5tUIxfexC
+/LZTx05gyf2mAgEWlRm/cGTStNfqDN1uoKMlV+WFuB1j2oTUuO1/dr8mL+FgZAM3
+ntWFo9gQCllNV9ahYOON2gkoZoNuPUnHsf4Bj6BQJnIXbAhMk9H2sZzwUi9bgObZ
+XO8+OrP4D4B9kCAKqqaQqA+O46LzO2vhN74lm/Fy6PumHuviqDBdN+HgtRPMUuao
+xnuVJSvBu9sPdgT/pR1N9u/KnfAnnLtR6g+fx4mWz+ts/riB/KRHzXd+44jGKZra
+IhTMfniguMJNsyEOO0AN8Tqcl0eRBxcOArcri7xu8HFvvl+e+ILymu4buusbYEVL
+GBkYP5YMmScfKn+jnDVN4mWoN1Bq2yMhMGx6PA3hOvzPNsUoYy2BwDxNZyflzuAi
+g59mgJm2NXtzNbSRJbMamKpQ69mzLWGdFNsRd4aH7PT7uPAURaf7B5BVp3UyjERW
+5alSGnBqsZmvlRnVH5BDUhYsWZMPRQS9rRr4iGW0l+TH+O2VJ8aQ
+=0Zqq
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/salt/repo/client/files/centos/keys/securityonion.pub b/salt/repo/client/files/centos/keys/securityonion.pub
new file mode 100644
index 000000000..15be14ca9
--- /dev/null
+++ b/salt/repo/client/files/centos/keys/securityonion.pub
@@ -0,0 +1,52 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBF7rzwEBEADBg87uJhnC3Ls7s60hbHGaywGrPtbz2WuYA/ev3YS3X7WS75p8
+PGlzTWUCujx0pEHbK2vYfExl3zksZ8ZmLyZ9VB3oSLiWBzJgKAeB7YCFEo8te+eE
+P2Z+8c+kX4eOV+2waxZyewA2TipSkhWgStSI4Ow8SyVUcUWA3hCw7mo2duNVi7KO
+C3vvI3wzirH+8/XIGo+lWTg6yYlSxdf+0xWzYvV2QCMpwzJfARw6GGXtfCZw/zoO
+o4+YPsiyztQdyI1y+g3Fbesl65E36DelbyP+lYd2VecX8ELEv0wlKCgHYlk6lc+n
+qnOotVjWbsyXuFfo06PHUd6O9n3nmo0drC6kmXGw1e8hu0t8VcGfMTKS/hszwVUY
+bHS6kbfsOoAb6LXPWKfqxk/BdreLXmcHHz88DimS3OS0JufkcmkjxEzSFRL0kb2h
+QVb1SATrbx+v2RWQXvi9sLCjT2fdOiwi1Tgc84orc7A1C3Jwu353YaX9cV+n5uyG
+OZ2AULZ5z2h13sVuiZAwfyyFs/O0CJ783hFA2TNPnyNGAgw/kaIo7nNRnggtndBo
+oQzVS+BHiFx98IF4zDqmF2r2+jOCjxSrw8KnZBe4bgXFtl89DmjoejGvWDnu2MVM
+pZDEs1DcOxHBQmTCWMIYLyNKG0xW6diyWBxEIaa7YgrP6kA+RaDfZ/xXPwARAQAB
+tD9TZWN1cml0eSBPbmlvbiBTb2x1dGlvbnMsIExMQyA8aW5mb0BzZWN1cml0eW9u
+aW9uc29sdXRpb25zLmNvbT6JAlQEEwEKAD4WIQTIBKk9Nr4Mcz6hlkR8EGC3/lBw
+EwUCXuvPAQIbAwUJEswDAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRB8EGC3
+/lBwExB1D/42xIDGU2XFNFyTU+ZqzDA8qNC9hEKjLeizbeM8RIm3xO+3p7SdqbuJ
+7pA8gk0RiHuILb+Ba1xiSh/w/W2bOxQhsXuWHih2z3W1tI+hu6RQhIm4e6CIHHf7
+Vzj4RSvHOVS0AzITUwkHjv0x0Z8zVBPJfEHKkK2x03BqP1o12rd7n2ZMrSfN6sED
+fUwOJLDjthShtyLSPBVG8j7T5cfSCPSLhfVOKPQVcI1sSir7RLeyxt1v1kzjQdaA
++znxO8EgfZJN93wzfBrAGcVT8KmpmgwR6p46m20wJXyZC9DZxJ0o1y3toVWTC+kP
+Qj1ROPivySVn10rBoOJk8HteyhW07gTcydq+noKHV7SqJ1899xRAYP7rDCfI9iMW
+Nn22ZDLnAkIcbNR7JLJCHwsZH/Umo9KO/dIccIqVQel3UCCYZcWTZW0VkcjqVKRa
+eK+JQGaJPrBAoxIG5/sMlbk2sINSubNWlcbH6kM0V8NVwdPiOO9xLmp2hI4ICxE3
+M+O2HCNX4QYzVizzTFxEvW3ieLa4nePQ8J6lvMI2oLkFP7xHoFluvZnuwfNvoEy0
+RnlHExN1UQTUvcbCxIbzjaJ4HJXilWHjgmGaVQO1S7AYskWnNWQ7uJvxnuZBNNwm
+pIvwYEZp23fYaWl/xKqnmPMy2ADjROBKlCm7L+Ntq1r7ELGW5ZCTobkCDQRe688B
+ARAA22GzdkSAo+mwJ2S1RbJ1G20tFnLsG/NC8iMN3lEh/PSmyPdB7mBtjZ+HPDzF
+VSznXZdr3LItBBQOli2hVIj1lZBY7+s2ZufV3TFFwselUwT3b1g1KMkopD95Ckf8
+WhLbSz2yqgrvcEvbB0HFX/ZEsHGqIz2kLacixjwXXLWOMQ2LNbeW1f5zQkBnaNNQ
+/4njzTj68OxnvfplNYNJqi2pZGb2UqarYX04FqKNuocN8E7AC9FQdBXylmVctw9T
+pQVwfCI76bTe6vPWb+keb6UNN1jyXVnhIQ3Fv5sFBsmgXf/hO8tqCotrKjEiK2/i
+RkvFeqsGMXreCgYg9zW4k+DcJtVa+Q8juGOjElrubY3Ua9mCusx3vY4QYSWxQ5Ih
+k1lXiUcM5Rt38lfpKHRJ5Pd4Y5xlWSQfZ7nmzbf/GzJQz+rWrA0X6Oc6cDOPLNXK
+w1dAygre4f2bsp5kHQt6NMefxeNTDmi+4R62K0tb40f5q0Vxz8qdyD48bBsbULNx
+kb6mjOAD+FNkfNXcGeuTq9oRnjx8i93mhYsIP5LFNDXS/zSP1nv0ZUFeIlGQGjV9
+1wOvT454qkI9sKiVFtd4FrNKZJbKszxxDm+DPfB5j+hRC4oeEJ7w+sVyh3EawtfM
+V7Mwj8i+7c3YUCravXBhSwG7SCTggFUgA8lMr8oWVgCATYsAEQEAAYkCPAQYAQoA
+JhYhBMgEqT02vgxzPqGWRHwQYLf+UHATBQJe688BAhsMBQkSzAMAAAoJEHwQYLf+
+UHATTtwQAJiztPW68ykifpFdwYFp1VC7c+uGLhWBqjDY9NSUKNC9caR7bV0cnNu8
+07UG6j18gCB2GSkukXjOR/oTj6rNcW/WouPYfQOrw7+M2Ya8M8iq+E/HOXaXB3b4
+FeCcB0UuwfcHHd2KbXrRHA+9GNpmuOcfTCdsPpIr41Xg4QltATDEt/FrzuKspXg4
+vUKDXgfnbj7y0JcJM2FfcwWGlnAG5MMRyjJQAleGdiidX/9WxgJ4Mweq4qJM0jr3
+Qsrc9VuzxsLr85no3Hn5UYVgT7bBZ59HUbQoi775m78MxN3mWUSdcyLQKovI+YXr
+tshTxWIf/2Ovdzt6Wq1WWXOGGuK1qgdPJTFWrlh3amFdb70zR1p6A/Lthd7Zty+n
+QjRZRQo5jBSnYtjhMrZP6rxM3QqnQ0frEKK9HfDYONk1Bw18CUtdwFGb9OMregLR
+IjvNLp9coSh5yYAepZyUGEPRET0GsmVw2trQF0uyMSkQfiq2zjPto6WWbsmrrbLr
+cfZ/wnBw1FoNEd51U54euo9yvOgOVtJGvqLgHNwB8574FhQhoWAMhyizqdgeEt26
+m3FXecUNKL/AK71/l04vor+/WsXe8uhDg3O84qeYa9wgd8LZZVmGZJDosSwqYjtb
+LdNNm+v60Zo6rFWSREegqi/nRTTDdxdW99ybjlh+mpbq3xavyFXF
+=bhkm
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/salt/repo/client/files/centos/securityonion.repo b/salt/repo/client/files/centos/securityonion.repo
new file mode 100644
index 000000000..53788f272
--- /dev/null
+++ b/salt/repo/client/files/centos/securityonion.repo
@@ -0,0 +1,71 @@
+[base]
+name=CentOS-$releasever - Base
+baseurl=https://repo.securityonion.net/file/securityonion-repo/base/
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
+
+#released updates
+[updates]
+name=CentOS-$releasever - Updates
+baseurl=https://repo.securityonion.net/file/securityonion-repo/updates/
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
+
+#additional packages that may be useful
+[extras]
+name=CentOS-$releasever - Extras
+baseurl=https://repo.securityonion.net/file/securityonion-repo/extras/
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
+
+#additional packages that extend functionality of existing packages
+[centosplus]
+name=CentOS-$releasever - Plus
+baseurl=https://repo.securityonion.net/file/securityonion-repo/centosplus/
+gpgcheck=1
+enabled=0
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
+
+[epel]
+name=Extra Packages for Enterprise Linux 7 - $basearch
+baseurl=https://repo.securityonion.net/file/securityonion-repo/epel/
+enabled=1
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
+
+[docker-ce-stable]
+name=Docker CE Stable - $basearch
+baseurl=https://repo.securityonion.net/file/securityonion-repo/docker-ce-stable
+enabled=1
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/docker.pub
+
+[saltstack]
+name=SaltStack repo for RHEL/CentOS $releasever PY3
+baseurl=https://repo.securityonion.net/file/securityonion-repo/saltstack3003/
+enabled=1
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/SALTSTACK-GPG-KEY.pub
+
+[saltstack3003]
+name=SaltStack repo for RHEL/CentOS $releasever PY3
+baseurl=https://repo.securityonion.net/file/securityonion-repo/saltstack3003/
+enabled=1
+gpgcheck=1
+gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/SALTSTACK-GPG-KEY.pub
+
+[wazuh_repo]
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/GPG-KEY-WAZUH
+enabled=1
+name=Wazuh repository
+baseurl=https://repo.securityonion.net/file/securityonion-repo/wazuh_repo/
+protect=1
+
+[wazuh4_repo]
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/GPG-KEY-WAZUH
+enabled=1
+name=Wazuh repository
+baseurl=https://repo.securityonion.net/file/securityonion-repo/wazuh4_repo/
+protect=1
\ No newline at end of file
diff --git a/salt/repo/client/files/centos/securityonioncache.repo b/salt/repo/client/files/centos/securityonioncache.repo
new file mode 100644
index 000000000..d683ab410
--- /dev/null
+++ b/salt/repo/client/files/centos/securityonioncache.repo
@@ -0,0 +1,71 @@
+[base]
+name=CentOS-$releasever - Base
+baseurl=http://repocache.securityonion.net/file/securityonion-repo/base/
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
+
+#released updates
+[updates]
+name=CentOS-$releasever - Updates
+baseurl=http://repocache.securityonion.net/file/securityonion-repo/updates/
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
+
+#additional packages that may be useful
+[extras]
+name=CentOS-$releasever - Extras
+baseurl=http://repocache.securityonion.net/file/securityonion-repo/extras/
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
+
+#additional packages that extend functionality of existing packages
+[centosplus]
+name=CentOS-$releasever - Plus
+baseurl=http://repocache.securityonion.net/file/securityonion-repo/centosplus/
+gpgcheck=1
+enabled=0
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
+
+[epel]
+name=Extra Packages for Enterprise Linux 7 - $basearch
+baseurl=http://repocache.securityonion.net/file/securityonion-repo/epel/
+enabled=1
+gpgcheck=1
+gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/RPM-GPG-KEY-EPEL-7
+
+[docker-ce-stable]
+name=Docker CE Stable - $basearch
+baseurl=http://repocache.securityonion.net/file/securityonion-repo/docker-ce-stable
+enabled=1
+gpgcheck=1
+gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/docker.pub
+
+[saltstack]
+name=SaltStack repo for RHEL/CentOS $releasever PY3
+baseurl=http://repocache.securityonion.net/file/securityonion-repo/saltstack3003/
+enabled=1
+gpgcheck=1
+gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/SALTSTACK-GPG-KEY.pub
+
+[saltstack3003]
+name=SaltStack repo for RHEL/CentOS $releasever PY3
+baseurl=http://repocache.securityonion.net/file/securityonion-repo/saltstack3003/
+enabled=1
+gpgcheck=1
+gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/SALTSTACK-GPG-KEY.pub
+
+[wazuh_repo]
+gpgcheck=1
+gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/GPG-KEY-WAZUH
+enabled=1
+name=Wazuh repository
+baseurl=http://repocache.securityonion.net/file/securityonion-repo/wazuh_repo/
+protect=1
+
+[wazuh4_repo]
+gpgcheck=1
+gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/GPG-KEY-WAZUH
+enabled=1
+name=Wazuh repository
+baseurl=https://repo.securityonion.net/file/securityonion-repo/wazuh4_repo/
+protect=1
\ No newline at end of file
diff --git a/salt/yum/etc/yum.conf.jinja b/salt/repo/client/files/centos/yum.conf.jinja
similarity index 100%
rename from salt/yum/etc/yum.conf.jinja
rename to salt/repo/client/files/centos/yum.conf.jinja
diff --git a/salt/repo/client/init.sls b/salt/repo/client/init.sls
new file mode 100644
index 000000000..5567caac2
--- /dev/null
+++ b/salt/repo/client/init.sls
@@ -0,0 +1,81 @@
+{% from 'repo/client/map.jinja' import ABSENTFILES with context %}
+{% from 'repo/client/map.jinja' import REPOPATH with context %}
+{% set ISAIRGAP = salt['pillar.get']('global:airgap', False) %}
+{% set managerupdates = salt['pillar.get']('global:managerupdate', '0') %}
+{% set role = grains.id.split('_') | last %}
+
+# from airgap state
+{% if ISAIRGAP and grains.os == 'CentOS' %}
+{% set MANAGER = salt['grains.get']('master') %}
+airgapyum:
+ file.managed:
+ - name: /etc/yum/yum.conf
+ - source: salt://repo/client/files/centos/airgap/yum.conf
+
+airgap_repo:
+ pkgrepo.managed:
+ - humanname: Airgap Repo
+ - baseurl: https://{{ MANAGER }}/repo
+ - gpgcheck: 1
+ - sslverify: 0
+{% endif %}
+
+# from airgap and common
+{% if ABSENTFILES|length > 0%}
+ {% for file in ABSENTFILES %}
+{{ file }}:
+ file.absent:
+ - name: {{ REPOPATH }}{{ file }}
+ - onchanges_in:
+ - module: cleanyum
+ {% endfor %}
+{% endif %}
+
+# from common state
+# Remove default Repos
+{% if grains['os'] == 'CentOS' %}
+repair_yumdb:
+ cmd.run:
+ - name: 'mv -f /var/lib/rpm/__db* /tmp && yum clean all'
+ - onlyif:
+ - 'yum check-update 2>&1 | grep "Error: rpmdb open failed"'
+
+crsynckeys:
+ file.recurse:
+ - name: /etc/pki/rpm_gpg
+ - source: salt://repo/client/files/centos/keys/
+
+{% if not ISAIRGAP %}
+crsecurityonionrepo:
+ file.managed:
+ {% if role in ['eval', 'standalone', 'import', 'manager', 'managersearch'] or managerupdates == 0 %}
+ - name: /etc/yum.repos.d/securityonion.repo
+ - source: salt://repo/client/files/centos/securityonion.repo
+ {% else %}
+ - name: /etc/yum.repos.d/securityonioncache.repo
+ - source: salt://repo/client/files/centos/securityonioncache.repo
+ {% endif %}
+ - mode: 644
+
+yumconf:
+ file.managed:
+ - name: /etc/yum.conf
+ - source: salt://repo/client/files/centos/yum.conf.jinja
+ - mode: 644
+ - template: jinja
+{% endif %}
+
+cleanyum:
+ module.run:
+ - pkg.clean_metadata: []
+ - onchanges:
+{% if ISAIRGAP %}
+ - file: airgapyum
+ - pkgrepo: airgap_repo
+{% else %}
+ - file: crsecurityonionrepo
+ - file: yumconf
+{% endif %}
+
+{% endif %}
+
diff --git a/salt/repo/client/map.jinja b/salt/repo/client/map.jinja
new file mode 100644
index 000000000..ccfa1eae2
--- /dev/null
+++ b/salt/repo/client/map.jinja
@@ -0,0 +1,25 @@
+{% if grains.os == 'CentOS' %}
+
+ {% set REPOPATH = '/etc/yum.repos.d/' %}
+ {% set ABSENTFILES = [
+ 'CentOS-Base.repo',
+ 'CentOS-CR.repo',
+ 'CentOS-Debuginfo.repo',
+ 'CentOS-fasttrack.repo',
+ 'CentOS-Media.repo',
+ 'CentOS-Sources.repo',
+ 'CentOS-Vault.repo',
+ 'CentOS-x86_64-kernel.repo',
+ 'epel.repo',
+ 'epel-testing.repo',
+ 'saltstack.repo',
+ 'wazuh.repo'
+ ]
+ %}
+
+{% elif grains.os == 'Ubuntu' %}
+
+ {% set REPOPATH = '/etc/apt/sources.list.d/' %}
+ {% set ABSENTFILES = [] %}
+
+{% endif %}
\ No newline at end of file
diff --git a/salt/salt/map.jinja b/salt/salt/map.jinja
index 7ef63bd68..5d6d980be 100644
--- a/salt/salt/map.jinja
+++ b/salt/salt/map.jinja
@@ -3,28 +3,19 @@
{% if grains.os == 'Ubuntu' %}
{% set SPLITCHAR = '+' %}
+ {% set SALTNOTHELD = salt['cmd.run']('apt-mark showhold | grep salt-* ; echo $?', python_shell=True) %}
{% else %}
{% set SPLITCHAR = '-' %}
+ {% set SALTNOTHELD = salt['cmd.run']('yum versionlock list | grep salt-* ; echo $?', python_shell=True) %}
{% endif %}
{% set INSTALLEDSALTVERSION = salt['pkg.version']('salt-minion').split(SPLITCHAR)[0] %}
-{% set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
-
-{% if grains.os|lower == 'ubuntu' %}
- {% set COMMON = 'salt-common' %}
-{% elif grains.os|lower in ['centos', 'redhat'] %}
- {% set COMMON = 'salt' %}
-{% endif %}
{% if grains.saltversion|string != SALTVERSION|string %}
{% if grains.os|lower in ['centos', 'redhat'] %}
- {% if ISAIRGAP is sameas true %}
- {% set UPGRADECOMMAND = 'yum clean all && yum versionlock delete "salt-*" && /usr/sbin/bootstrap-salt.sh -X -s 120 -r -F -x python3 stable ' ~ SALTVERSION ~ ' && yum versionlock add "salt-*"' %}
- {% else %}
- {% set UPGRADECOMMAND = 'yum versionlock delete "salt-*" && /usr/sbin/bootstrap-salt.sh -X -s 120 -F -x python3 stable ' ~ SALTVERSION ~ ' && yum versionlock add "salt-*"' %}
- {% endif %}
+ {% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -X -s 120 -r -F -x python3 stable ' ~ SALTVERSION ~ ' && yum versionlock add "salt-*"' %}
{% elif grains.os|lower == 'ubuntu' %}
- {% set UPGRADECOMMAND = 'apt-mark unhold salt-common && apt-mark unhold salt-minion && /usr/sbin/bootstrap-salt.sh -X -s 120 -F -x python3 stable ' ~ SALTVERSION ~ ' && apt-mark hold salt-common && apt-mark hold salt-minion' %}
+ {% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -X -s 120 -F -x python3 stable ' ~ SALTVERSION ~ ' && apt-mark hold salt-common && apt-mark hold salt-minion' %}
{% endif %}
{% else %}
{% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %}
diff --git a/salt/salt/master.defaults.yaml b/salt/salt/master.defaults.yaml
index 2b50b517b..8588af65c 100644
--- a/salt/salt/master.defaults.yaml
+++ b/salt/salt/master.defaults.yaml
@@ -2,4 +2,4 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt:
master:
- version: 3002.5
\ No newline at end of file
+ version: 3003
\ No newline at end of file
diff --git a/salt/salt/master.sls b/salt/salt/master.sls
index 3c23bbb36..8b2b6c7d0 100644
--- a/salt/salt/master.sls
+++ b/salt/salt/master.sls
@@ -1,17 +1,16 @@
+{% from 'salt/map.jinja' import SALTNOTHELD %}
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
-{% from 'salt/map.jinja' import COMMON with context %}
-
include:
- salt.minion
-salt_master_package:
- pkg.installed:
- - pkgs:
- - {{ COMMON }}
- - salt-master
- - hold: True
+{% if SALTNOTHELD == 1 %}
+hold_salt_master_package:
+ module.run:
+ - pkg.hold:
+ - name: salt-master
+{% endif %}
salt_master_service:
service.running:
diff --git a/salt/salt/minion.defaults.yaml b/salt/salt/minion.defaults.yaml
index e6b1303ed..560493bed 100644
--- a/salt/salt/minion.defaults.yaml
+++ b/salt/salt/minion.defaults.yaml
@@ -2,5 +2,6 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt:
minion:
- version: 3002.5
- check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
\ No newline at end of file
+ version: 3003
+ check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
+ service_start_delay: 30 # in seconds.
\ No newline at end of file
diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls
index 47cd179ac..2ac66c81a 100644
--- a/salt/salt/minion.sls
+++ b/salt/salt/minion.sls
@@ -1,10 +1,22 @@
-{% from 'salt/map.jinja' import COMMON with context %}
{% from 'salt/map.jinja' import UPGRADECOMMAND with context %}
{% from 'salt/map.jinja' import SALTVERSION %}
{% from 'salt/map.jinja' import INSTALLEDSALTVERSION %}
+{% from 'salt/map.jinja' import SALTNOTHELD %}
+{% import_yaml 'salt/minion.defaults.yaml' as SALTMINION %}
+{% set service_start_delay = SALTMINION.salt.minion.service_start_delay %}
include:
- salt
+ - systemd.reload
+
+{% if INSTALLEDSALTVERSION|string != SALTVERSION|string %}
+
+{% if SALTNOTHELD == 0 %}
+unhold_salt_packages:
+ module.run:
+ - pkg.unhold:
+ - name: 'salt-*'
+{% endif %}
install_salt_minion:
cmd.run:
@@ -13,15 +25,16 @@ install_salt_minion:
exec 1>&- # close stdout
exec 2>&- # close stderr
nohup /bin/sh -c '{{ UPGRADECOMMAND }}' &
- - onlyif: test "{{INSTALLEDSALTVERSION}}" != "{{SALTVERSION}}"
+{% endif %}
-salt_minion_package:
- pkg.installed:
- - pkgs:
- - {{ COMMON }}
- - salt-minion
- - hold: True
- - onlyif: test "{{INSTALLEDSALTVERSION}}" == "{{SALTVERSION}}"
+{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
+
+{% if SALTNOTHELD == 1 %}
+hold_salt_packages:
+ module.run:
+ - pkg.hold:
+ - name: 'salt-*'
+{% endif %}
set_log_levels:
file.append:
@@ -32,6 +45,20 @@ set_log_levels:
- listen_in:
- service: salt_minion_service
+salt_minion_service_unit_file:
+ file.managed:
+ - name: /etc/systemd/system/multi-user.target.wants/salt-minion.service
+ - source: salt://salt/service/salt-minion.service.jinja
+ - template: jinja
+ - defaults:
+ service_start_delay: {{ service_start_delay }}
+ - onchanges_in:
+ - module: systemd_reload
+ - listen_in:
+ - service: salt_minion_service
+{% endif %}
+
+# this has to be outside the if statement above since there are _in calls to this state
salt_minion_service:
service.running:
- name: salt-minion
diff --git a/salt/salt/service/salt-minion.service.jinja b/salt/salt/service/salt-minion.service.jinja
new file mode 100644
index 000000000..c7bae0bc2
--- /dev/null
+++ b/salt/salt/service/salt-minion.service.jinja
@@ -0,0 +1,15 @@
+[Unit]
+Description=The Salt Minion
+Documentation=man:salt-minion(1) file:///usr/share/doc/salt/html/contents.html https://docs.saltstack.com/en/latest/contents.html
+After=network.target salt-master.service
+
+[Service]
+KillMode=process
+Type=notify
+NotifyAccess=all
+LimitNOFILE=8192
+ExecStart=/usr/bin/salt-minion
+ExecStartPre=/bin/sleep {{ salt['pillar.get']('salt:minion:service_start_delay', service_start_delay) }}
+
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
diff --git a/salt/sensoroni/files/sensoroni.json b/salt/sensoroni/files/sensoroni.json
index 23b967b04..378d42373 100644
--- a/salt/sensoroni/files/sensoroni.json
+++ b/salt/sensoroni/files/sensoroni.json
@@ -1,5 +1,6 @@
{%- set URLBASE = salt['pillar.get']('global:url_base') %}
-{%- set DESCRIPTION = salt['pillar.get']('sensoroni:node_description') %}
+{%- set DESCRIPTION = salt['pillar.get']('sensoroni:node_description', '') %}
+{%- set MODEL = salt['grains.get']('sosmodel', '') %}
{%- set ADDRESS = salt['pillar.get']('sensoroni:node_address') %}
{%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') %}
{%- set CHECKININTERVALMS = salt['pillar.get']('sensoroni:node_checkin_interval_ms', 10000) %}
@@ -9,7 +10,7 @@
{%- else %}
{%- set STENODEFAULT = False %}
{%- endif %}
-{%- set STENOENABLED = salt['pillar.get']('steno:enabled', STENODEFAULT) %}
+{%- set STENOENABLED = salt['pillar.get']('steno:enabled', STENODEFAULT) %}
{
"logFilename": "/opt/sensoroni/logs/sensoroni.log",
"logLevel":"info",
@@ -17,6 +18,7 @@
"role": "{{ grains.role }}",
"description": "{{ DESCRIPTION }}",
"address": "{{ ADDRESS }}",
+ "model": "{{ MODEL }}",
"pollIntervalMs": {{ CHECKININTERVALMS if CHECKININTERVALMS else 10000 }},
"serverUrl": "https://{{ URLBASE }}/sensoroniagents",
"verifyCert": false,
diff --git a/salt/soc/files/soc/alerts.actions.json b/salt/soc/files/soc/alerts.actions.json
index 364c59d27..c0543d8fc 100644
--- a/salt/soc/files/soc/alerts.actions.json
+++ b/salt/soc/files/soc/alerts.actions.json
@@ -1,7 +1,7 @@
[
{ "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "target": "",
"links": [
- "/#/hunt?q=\"{value}\" | groupby event.module event.dataset"
+ "/#/hunt?q=\"{value|escape}\" | groupby event.module event.dataset"
]},
{ "name": "actionCorrelate", "description": "actionCorrelateHelp", "icon": "fab fa-searchengin", "target": "",
"links": [
diff --git a/salt/soc/files/soc/banner.md b/salt/soc/files/soc/banner.md
new file mode 100644
index 000000000..e69de29bb
diff --git a/salt/soc/files/soc/changes.json b/salt/soc/files/soc/changes.json
deleted file mode 100644
index dbc7b4061..000000000
--- a/salt/soc/files/soc/changes.json
+++ /dev/null
@@ -1,49 +0,0 @@
-{
- "title": "Security Onion 2.3.40 is here!",
- "changes": [
- { "summary": "FEATURE: Add option for HTTP Method Specification/POST to Hunt/Alerts Actions #2904" },
- { "summary": "FEATURE: Add option to configure proxy for various tools used during setup + persist the proxy configuration #529" },
- { "summary": "FEATURE: Alerts/Hunt - Provide method for base64-encoding pivot value #1749" },
- { "summary": "FEATURE: Allow users to customize links in SOC #1248" },
- { "summary": "FEATURE: Display user who requested PCAP in SOC #2775" },
- { "summary": "FEATURE: Make SOC browser app connection timeouts adjustable #2408" },
- { "summary": "FEATURE: Move to FleetDM #3483" },
- { "summary": "FEATURE: Reduce field cache expiration from 1d to 5m, and expose value as a salt pillar #3537" },
- { "summary": "FEATURE: Refactor docker_clean salt state to use loop w/ inspection instead of hardcoded image list #3113" },
- { "summary": "FEATURE: Run so-ssh-harden during setup #1932" },
- { "summary": "FEATURE: SOC should only display links to tools that are enabled #1643" },
- { "summary": "FEATURE: Update Sigmac Osquery Field Mappings #3137" },
- { "summary": "FEATURE: User must accept the Elastic licence during setup #3233" },
- { "summary": "FEATURE: soup should output more guidance for distributed deployments at the end #3340" },
- { "summary": "FEATURE: soup should provide some initial information and then prompt the user to continue #3486" },
- { "summary": "FIX: Add cronjob for so-suricata-eve-clean script #3515" },
- { "summary": "FIX: Change Elasticsearch heap formula #1686" },
- { "summary": "FIX: Create a post install version loop in soup #3102" },
- { "summary": "FIX: Custom Kibana settings are not being applied properly on upgrades #3254" },
- { "summary": "FIX: Hunt query issues with quotes #3320" },
- { "summary": "FIX: IP Addresses don't work with .security #3327" },
- { "summary": "FIX: Improve DHCP leases query in Hunt #3395" },
- { "summary": "FIX: Improve Setup verbiage #3422" },
- { "summary": "FIX: Improve Suricata DHCP logging and parsing #3397" },
- { "summary": "FIX: Keep RELATED,ESTABLISHED rules at the top of iptables chains #3288" },
- { "summary": "FIX: Populate http.status_message field #3408" },
- { "summary": "FIX: Remove 'types removal' deprecation messages from elastic log. #3345" },
- { "summary": "FIX: Reword + fix formatting on ES data storage prompt #3205" },
- { "summary": "FIX: SMTP shoud read SNMP on Kibana SNMP view #3413" },
- { "summary": "FIX: Sensors can temporarily show offline while processing large PCAP jobs #3279" },
- { "summary": "FIX: Soup should log to the screen as well as to a file #3467" },
- { "summary": "FIX: Strelka port 57314 not immediately relinquished upon restart #3457" },
- { "summary": "FIX: Switch SOC to pull from fieldcaps API due to field caching changes in Kibana 7.11 #3502" },
- { "summary": "FIX: Syntax error in /etc/sysctl.d/99-reserved-ports.conf #3308" },
- { "summary": "FIX: Telegraf hardcoded to use https and is not aware of elasticsearch features #2061" },
- { "summary": "FIX: Zeek Index Close and Delete Count for curator #3274" },
- { "summary": "FIX: so-cortex-user-add and so-cortex-user-enable use wrong pillar value for api key #3388" },
- { "summary": "FIX: so-rule does not completely apply change #3289" },
- { "summary": "FIX: soup should recheck disk space after it tries to clean up. #3235" },
- { "summary": "UPGRADE: Elastic 7.11.2 #3389" },
- { "summary": "UPGRADE: Suricata 6.0.2 #3217" },
- { "summary": "UPGRADE: Zeek 4 #3216" },
- { "summary": "UPGRADE: Zeek container to use Python 3 #1113" },
- { "summary": "UPGRADE: docker-ce to latest #3493" }
- ]
-}
\ No newline at end of file
diff --git a/salt/soc/files/soc/custom.js b/salt/soc/files/soc/custom.js
index b23b7c36b..575e019a7 100644
--- a/salt/soc/files/soc/custom.js
+++ b/salt/soc/files/soc/custom.js
@@ -17,8 +17,5 @@
suggested to avoid and/or minimize the extent of any
content placed here so that upgrading to newer version of
Security Onion do not become a burden.
-
- Example:
-
- i18n.translations["en-US"].loginHeader = "Unauthorized use of this computer system is prohibited...";
+
*/
diff --git a/salt/soc/files/soc/hunt.actions.json b/salt/soc/files/soc/hunt.actions.json
index 364c59d27..c0543d8fc 100644
--- a/salt/soc/files/soc/hunt.actions.json
+++ b/salt/soc/files/soc/hunt.actions.json
@@ -1,7 +1,7 @@
[
{ "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "target": "",
"links": [
- "/#/hunt?q=\"{value}\" | groupby event.module event.dataset"
+ "/#/hunt?q=\"{value|escape}\" | groupby event.module event.dataset"
]},
{ "name": "actionCorrelate", "description": "actionCorrelateHelp", "icon": "fab fa-searchengin", "target": "",
"links": [
diff --git a/salt/soc/files/soc/hunt.queries.json b/salt/soc/files/soc/hunt.queries.json
index 840b4b373..93295364d 100644
--- a/salt/soc/files/soc/hunt.queries.json
+++ b/salt/soc/files/soc/hunt.queries.json
@@ -34,7 +34,7 @@
{ "name": "HTTP", "description": "HTTP grouped by status code and message", "query": "event.dataset:http | groupby http.status_code http.status_message"},
{ "name": "HTTP", "description": "HTTP grouped by method and user agent", "query": "event.dataset:http | groupby http.method http.useragent"},
{ "name": "HTTP", "description": "HTTP grouped by virtual host", "query": "event.dataset:http | groupby http.virtual_host"},
- { "name": "HTTP", "description": "HTTP with exe downloads", "query": "event.dataset:http AND file.resp_mime_types:dosexec | groupby http.virtual_host"},
+ { "name": "HTTP", "description": "HTTP with exe downloads", "query": "event.dataset:http AND (file.resp_mime_types:dosexec OR file.resp_mime_types:executable) | groupby http.virtual_host"},
{ "name": "Intel", "description": "Intel framework hits grouped by indicator", "query": "event.dataset:intel | groupby intel.indicator.keyword"},
{ "name": "IRC", "description": "IRC grouped by command", "query": "event.dataset:irc | groupby irc.command.type"},
{ "name": "KERBEROS", "description": "KERBEROS grouped by service", "query": "event.dataset:kerberos | groupby kerberos.service"},
diff --git a/salt/soc/files/soc/motd.md b/salt/soc/files/soc/motd.md
new file mode 100644
index 000000000..295329f39
--- /dev/null
+++ b/salt/soc/files/soc/motd.md
@@ -0,0 +1,25 @@
+## Getting Started
+
+New to Security Onion 2? Check out the [Online Help](/docs/) and [Cheatsheet](/docs/cheatsheet.pdf) to learn how to best utilize Security Onion to hunt for evil! Find them in the upper-right menu.
+
+If you're ready to dive-in, take a look at the [Alerts](/#/alerts) interface to see what Security Onion has detected so far. Or navigate to the [Hunt](/#/hunt) interface to hunt for evil that the alerts might have missed!
+
+## What's New
+
+The release notes have moved to the upper-right menu. Click on the [What's New](/docs/#document-release-notes) menu option to find all the latest fixes and features in this version of Security Onion!
+
+## Customize This Space
+
+Make this area your own by customizing the content. The content is stored in the `motd.md` file, which uses the common Markdown (.md) format. Visit [mardownguide.org](https://www.markdownguide.org/) to learn more about the simple Markdown format.
+
+To customize this content, login to the manager via SSH and execute the following command:
+
+```bash
+cp -f /opt/so/saltstack/default/salt/soc/files/soc/motd.md /opt/so/saltstack/local/salt/soc/files/soc/motd.md
+```
+
+Now, edit the new file as desired. Finally, run this command:
+
+```bash
+salt-call state.apply soc queue=True
+```
diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json
index 6e2850aca..6f1c3a6da 100644
--- a/salt/soc/files/soc/soc.json
+++ b/salt/soc/files/soc/soc.json
@@ -53,6 +53,17 @@
"cacheMs": {{ ES_FIELDCAPS_CACHE }},
"verifyCert": false
},
+ "influxdb": {
+{%- if grains['role'] in ['so-import'] %}
+ "hostUrl": "",
+{%- else %}
+ "hostUrl": "https://{{ MANAGERIP }}:8086",
+{%- endif %}
+ "token": "",
+ "org": "",
+ "bucket": "telegraf",
+ "verifyCert": false
+ },
"sostatus": {
"refreshIntervalMs": 30000,
"offlineThresholdMs": 900000
diff --git a/salt/soc/init.sls b/salt/soc/init.sls
index d31898e72..18fda41da 100644
--- a/salt/soc/init.sls
+++ b/salt/soc/init.sls
@@ -35,10 +35,19 @@ socconfig:
- mode: 600
- template: jinja
-socchanges:
+socmotd:
file.managed:
- - name: /opt/so/conf/soc/changes.json
- - source: salt://soc/files/soc/changes.json
+ - name: /opt/so/conf/soc/motd.md
+ - source: salt://soc/files/soc/motd.md
+ - user: 939
+ - group: 939
+ - mode: 600
+ - template: jinja
+
+socbanner:
+ file.managed:
+ - name: /opt/so/conf/soc/banner.md
+ - source: salt://soc/files/soc/banner.md
- user: 939
- group: 939
- mode: 600
@@ -61,7 +70,8 @@ so-soc:
- binds:
- /nsm/soc/jobs:/opt/sensoroni/jobs:rw
- /opt/so/conf/soc/soc.json:/opt/sensoroni/sensoroni.json:ro
- - /opt/so/conf/soc/changes.json:/opt/sensoroni/html/changes.json:ro
+ - /opt/so/conf/soc/motd.md:/opt/sensoroni/html/motd.md:ro
+ - /opt/so/conf/soc/banner.md:/opt/sensoroni/html/login/banner.md:ro
- /opt/so/conf/soc/custom.js:/opt/sensoroni/html/js/custom.js:ro
- /opt/so/log/soc/:/opt/sensoroni/logs/:rw
{%- if salt['pillar.get']('nodestab', {}) %}
diff --git a/salt/soctopus/files/SOCtopus.conf b/salt/soctopus/files/SOCtopus.conf
index c9941c3e1..b6ee45e74 100644
--- a/salt/soctopus/files/SOCtopus.conf
+++ b/salt/soctopus/files/SOCtopus.conf
@@ -8,8 +8,8 @@
[es]
es_url = https://{{MANAGER}}:9200
es_ip = {{MANAGER}}
-es_user = YOURESUSER
-es_pass = YOURESPASS
+es_user =
+es_pass =
es_index_pattern = so-*
es_verifycert = no
diff --git a/salt/systemd/reload.sls b/salt/systemd/reload.sls
new file mode 100644
index 000000000..ff2185539
--- /dev/null
+++ b/salt/systemd/reload.sls
@@ -0,0 +1,3 @@
+systemd_reload:
+ module.run:
+ - service.systemctl_reload: []
\ No newline at end of file
diff --git a/salt/telegraf/etc/telegraf.conf b/salt/telegraf/etc/telegraf.conf
index f6bcbdaf5..a7be4d8a2 100644
--- a/salt/telegraf/etc/telegraf.conf
+++ b/salt/telegraf/etc/telegraf.conf
@@ -17,6 +17,7 @@
{% set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') %}
{% set HELIX_API_KEY = salt['pillar.get']('fireeye:helix:api_key', '') %}
{% set UNIQUEID = salt['pillar.get']('sensor:uniqueid', '') %}
+{%- set TRUE_CLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %}
# Global tags can be specified here in key="value" format.
[global_tags]
@@ -614,18 +615,29 @@
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
+{% if TRUE_CLUSTER %}
+ {% if grains.role == 'so-manager' %}
+[[inputs.elasticsearch]]
+ servers = ["https://{{ MANAGER }}:9200"]
+ insecure_skip_verify = true
+ local = false
+ cluster_health = true
+ cluster_stats = true
+ {% endif %}
+
+{% else %}
# # Read stats from one or more Elasticsearch servers or clusters
-{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
+ {% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
[[inputs.elasticsearch]]
servers = ["https://{{ MANAGER }}:9200"]
insecure_skip_verify = true
-{% elif grains['role'] in ['so-node', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %}
+ {% elif grains['role'] in ['so-node', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %}
[[inputs.elasticsearch]]
servers = ["https://{{ NODEIP }}:9200"]
insecure_skip_verify = true
+ {% endif %}
{% endif %}
-
#
# ## Timeout for HTTP requests to the elastic search server(s)
# http_timeout = "5s"
@@ -663,14 +675,45 @@
# # Read metrics from one or more commands that can output to stdout
+[[inputs.exec]]
+ commands = [
+ "/scripts/sostatus.sh"
+ ]
+ data_format = "influx"
+ timeout = "15s"
+ interval = "60s"
+
+
# ## Commands array
-{% if grains['role'] in ['so-manager', 'so-managersearch'] %}
+{% if grains['role'] in ['so-manager'] %}
+[[inputs.exec]]
+ commands = [
+ "/scripts/redis.sh",
+ "/scripts/influxdbsize.sh",
+ "/scripts/raid.sh",
+ "/scripts/beatseps.sh"
+ ]
+ data_format = "influx"
+ ## Timeout for each command to complete.
+ timeout = "15s"
+{% elif grains['role'] in ['so-managersearch'] %}
[[inputs.exec]]
commands = [
"/scripts/redis.sh",
"/scripts/influxdbsize.sh",
"/scripts/eps.sh",
- "/scripts/raid.sh"
+ "/scripts/raid.sh",
+ "/scripts/beatseps.sh"
+ ]
+ data_format = "influx"
+ ## Timeout for each command to complete.
+ timeout = "15s"
+{% elif grains['role'] in ['so-node'] %}
+[[inputs.exec]]
+ commands = [
+ "/scripts/eps.sh",
+ "/scripts/raid.sh",
+ "/scripts/beatseps.sh"
]
data_format = "influx"
## Timeout for each command to complete.
@@ -686,7 +729,8 @@
"/scripts/zeekcaptureloss.sh",
{% endif %}
"/scripts/oldpcap.sh",
- "/scripts/raid.sh"
+ "/scripts/raid.sh",
+ "/scripts/beatseps.sh"
]
data_format = "influx"
timeout = "15s"
@@ -702,7 +746,8 @@
{% endif %}
"/scripts/oldpcap.sh",
"/scripts/eps.sh",
- "/scripts/raid.sh"
+ "/scripts/raid.sh",
+ "/scripts/beatseps.sh"
]
data_format = "influx"
timeout = "15s"
@@ -720,7 +765,8 @@
{% endif %}
"/scripts/oldpcap.sh",
"/scripts/eps.sh",
- "/scripts/raid.sh"
+ "/scripts/raid.sh",
+ "/scripts/beatseps.sh"
]
data_format = "influx"
timeout = "15s"
@@ -737,7 +783,8 @@
{% endif %}
"/scripts/oldpcap.sh",
"/scripts/influxdbsize.sh",
- "/scripts/raid.sh"
+ "/scripts/raid.sh",
+ "/scripts/beatseps.sh"
]
data_format = "influx"
timeout = "15s"
diff --git a/salt/telegraf/init.sls b/salt/telegraf/init.sls
index 2814eb159..cea4d3f45 100644
--- a/salt/telegraf/init.sls
+++ b/salt/telegraf/init.sls
@@ -72,6 +72,8 @@ so-telegraf:
- /opt/so/conf/telegraf/scripts:/scripts:ro
- /opt/so/log/stenographer:/var/log/stenographer:ro
- /opt/so/log/suricata:/var/log/suricata:ro
+ - /opt/so/log/raid:/var/log/raid:ro
+ - /opt/so/log/sostatus:/var/log/sostatus:ro
- watch:
- file: tgrafconf
- file: tgrafsyncscripts
diff --git a/salt/telegraf/scripts/beatseps.sh b/salt/telegraf/scripts/beatseps.sh
new file mode 100644
index 000000000..5e8256c22
--- /dev/null
+++ b/salt/telegraf/scripts/beatseps.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
+
+if [ ! "$THEGREP" ]; then
+
+ PREVCOUNTFILE='/tmp/beatseps.txt'
+ EVENTCOUNTCURRENT="$(curl -s localhost:5066/stats | jq '.libbeat.output.events.acked')"
+ FAILEDEVENTCOUNT="$(curl -s localhost:5066/stats | jq '.libbeat.output.events.failed')"
+
+ if [ ! -z "$EVENTCOUNTCURRENT" ]; then
+
+ if [ -f "$PREVCOUNTFILE" ]; then
+ EVENTCOUNTPREVIOUS=`cat $PREVCOUNTFILE`
+ else
+ echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
+ exit 0
+ fi
+
+ echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
+ # the division by 30 is because the agent interval is 30 seconds
+ EVENTS=$(((EVENTCOUNTCURRENT - EVENTCOUNTPREVIOUS)/30))
+ if [ "$EVENTS" -lt 0 ]; then
+ EVENTS=0
+ fi
+
+ echo "fbstats eps=${EVENTS%%.*},failed=$FAILEDEVENTCOUNT"
+ fi
+
+else
+ exit 0
+fi
+
diff --git a/salt/telegraf/scripts/checkfiles.sh b/salt/telegraf/scripts/checkfiles.sh
index c84b6bec9..5c6ab56c1 100644
--- a/salt/telegraf/scripts/checkfiles.sh
+++ b/salt/telegraf/scripts/checkfiles.sh
@@ -15,15 +15,13 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-APP=checkfiles
-lf=/tmp/$APP-pidLockFile
-# create empty lock file if none exists
-cat /dev/null >> $lf
-read lastPID < $lf
-# if lastPID is not null and a process with that pid exists , exit
-[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
-echo $$ > $lf
+THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
-FILES=$(ls -1x /host/nsm/faf/complete/ | wc -l)
+if [ ! "$THEGREP" ]; then
-echo "faffiles files=$FILES"
+ FILES=$(ls -1x /host/nsm/strelka/unprocessed | wc -l)
+
+ echo "faffiles files=$FILES"
+else
+ exit 0
+fi
\ No newline at end of file
diff --git a/salt/telegraf/scripts/eps.sh b/salt/telegraf/scripts/eps.sh
index dcc4b9051..b497c2519 100644
--- a/salt/telegraf/scripts/eps.sh
+++ b/salt/telegraf/scripts/eps.sh
@@ -15,36 +15,32 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-APP=eps
-lf=/tmp/$APP-pidLockFile
-# create empty lock file if none exists
-cat /dev/null >> $lf
-read lastPID < $lf
-# if lastPID is not null and a process with that pid exists , exit
-[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
-echo $$ > $lf
+THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
-PREVCOUNTFILE='/tmp/eps.txt'
-EVENTCOUNTCURRENT="$(curl -s localhost:9600/_node/stats | jq '.events.in')"
+if [ ! "$THEGREP" ]; then
-if [ ! -z "$EVENTCOUNTCURRENT" ]; then
+ PREVCOUNTFILE='/tmp/eps.txt'
+ EVENTCOUNTCURRENT="$(curl -s localhost:9600/_node/stats | jq '.events.in')"
- if [ -f "$PREVCOUNTFILE" ]; then
- EVENTCOUNTPREVIOUS=`cat $PREVCOUNTFILE`
- else
- echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
+ if [ ! -z "$EVENTCOUNTCURRENT" ]; then
+
+ if [ -f "$PREVCOUNTFILE" ]; then
+ EVENTCOUNTPREVIOUS=`cat $PREVCOUNTFILE`
+ else
+ echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
+ exit 0
+ fi
+
+ echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
+ # the division by 30 is because the agent interval is 30 seconds
+ EVENTS=$(((EVENTCOUNTCURRENT - EVENTCOUNTPREVIOUS)/30))
+ if [ "$EVENTS" -lt 0 ]; then
+ EVENTS=0
+ fi
+
+ echo "consumptioneps eps=${EVENTS%%.*}"
+ fi
+else
exit 0
- fi
-
- echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
- # the division by 30 is because the agent interval is 30 seconds
- EVENTS=$(((EVENTCOUNTCURRENT - EVENTCOUNTPREVIOUS)/30))
- if [ "$EVENTS" -lt 0 ]; then
- EVENTS=0
- fi
-
- echo "esteps eps=${EVENTS%%.*}"
-
fi
-exit 0
diff --git a/salt/telegraf/scripts/helixeps.sh b/salt/telegraf/scripts/helixeps.sh
index be5aaa1d2..1411cc40b 100644
--- a/salt/telegraf/scripts/helixeps.sh
+++ b/salt/telegraf/scripts/helixeps.sh
@@ -15,35 +15,30 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-APP=helixeps
-lf=/tmp/$APP-pidLockFile
-# create empty lock file if none exists
-cat /dev/null >> $lf
-read lastPID < $lf
-# if lastPID is not null and a process with that pid exists , exit
-[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
-echo $$ > $lf
+THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
-PREVCOUNTFILE='/tmp/helixevents.txt'
-EVENTCOUNTCURRENT="$(curl -s localhost:9600/_node/stats | jq '.pipelines.helix.events.out')"
+if [ ! "$THEGREP" ]; then
-if [ ! -z "$EVENTCOUNTCURRENT" ]; then
+ PREVCOUNTFILE='/tmp/helixevents.txt'
+ EVENTCOUNTCURRENT="$(curl -s localhost:9600/_node/stats | jq '.pipelines.helix.events.out')"
+
+ if [ ! -z "$EVENTCOUNTCURRENT" ]; then
+
+ if [ -f "$PREVCOUNTFILE" ]; then
+ EVENTCOUNTPREVIOUS=`cat $PREVCOUNTFILE`
+ else
+ echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
+ exit 0
+ fi
- if [ -f "$PREVCOUNTFILE" ]; then
- EVENTCOUNTPREVIOUS=`cat $PREVCOUNTFILE`
- else
echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
+ EVENTS=$(((EVENTCOUNTCURRENT - EVENTCOUNTPREVIOUS)/30))
+ if [ "$EVENTS" -lt 0 ]; then
+ EVENTS=0
+ fi
+
+ echo "helixeps eps=${EVENTS%%.*}"
+ fi
+else
exit 0
- fi
-
- echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE
- EVENTS=$(((EVENTCOUNTCURRENT - EVENTCOUNTPREVIOUS)/30))
- if [ "$EVENTS" -lt 0 ]; then
- EVENTS=0
- fi
-
- echo "helixeps eps=${EVENTS%%.*}"
-
-fi
-
-exit 0
+fi
\ No newline at end of file
diff --git a/salt/telegraf/scripts/influxdbsize.sh b/salt/telegraf/scripts/influxdbsize.sh
index 9bab7815b..46e230a8a 100644
--- a/salt/telegraf/scripts/influxdbsize.sh
+++ b/salt/telegraf/scripts/influxdbsize.sh
@@ -15,15 +15,13 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-APP=influxsize
-lf=/tmp/$APP-pidLockFile
-# create empty lock file if none exists
-cat /dev/null >> $lf
-read lastPID < $lf
-# if lastPID is not null and a process with that pid exists , exit
-[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
-echo $$ > $lf
+THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
-INFLUXSIZE=$(du -s -k /host/nsm/influxdb | awk {'print $1'})
+if [ ! "$THEGREP" ]; then
-echo "influxsize kbytes=$INFLUXSIZE"
+ INFLUXSIZE=$(du -s -k /host/nsm/influxdb | awk {'print $1'})
+
+ echo "influxsize kbytes=$INFLUXSIZE"
+else
+ exit 0
+fi
\ No newline at end of file
diff --git a/salt/telegraf/scripts/oldpcap.sh b/salt/telegraf/scripts/oldpcap.sh
index 0557137e7..f23c0c83f 100644
--- a/salt/telegraf/scripts/oldpcap.sh
+++ b/salt/telegraf/scripts/oldpcap.sh
@@ -15,18 +15,16 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-APP=oldpcap
-lf=/tmp/$APP-pidLockFile
-# create empty lock file if none exists
-cat /dev/null >> $lf
-read lastPID < $lf
-# if lastPID is not null and a process with that pid exists , exit
-[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
-echo $$ > $lf
+THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
-# Get the data
-OLDPCAP=$(find /host/nsm/pcap -type f -exec stat -c'%n %Z' {} + | sort | grep -v "\." | head -n 1 | awk {'print $2'})
-DATE=$(date +%s)
-AGE=$(($DATE - $OLDPCAP))
+if [ ! "$THEGREP" ]; then
-echo "pcapage seconds=$AGE"
+ # Get the data
+ OLDPCAP=$(find /host/nsm/pcap -type f -exec stat -c'%n %Z' {} + | sort | grep -v "\." | head -n 1 | awk {'print $2'})
+ DATE=$(date +%s)
+ AGE=$(($DATE - $OLDPCAP))
+
+ echo "pcapage seconds=$AGE"
+else
+ exit 0
+fi
\ No newline at end of file
diff --git a/salt/telegraf/scripts/raid.sh b/salt/telegraf/scripts/raid.sh
index c53644889..03e309c38 100644
--- a/salt/telegraf/scripts/raid.sh
+++ b/salt/telegraf/scripts/raid.sh
@@ -15,19 +15,15 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-APP=raid
-lf=/tmp/$APP-pidLockFile
-# create empty lock file if none exists
-cat /dev/null >> $lf
-read lastPID < $lf
-# if lastPID is not null and a process with that pid exists , exit
-[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
-echo $$ > $lf
-RAIDLOG=/var/log/raid/status.log
-RAIDSTATUS=$(cat /var/log/raid/status.log)
+THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
-if [ -f "$RAIDLOG" ]; then
- echo "raid raidstatus=$RAIDSTATUS "
+if [ ! "$THEGREP" ]; then
+
+ if [ -f "$RAIDLOG" ]; then
+ echo "raid $RAIDSTATUS"
+ else
+ exit 0
+ fi
else
exit 0
fi
diff --git a/salt/telegraf/scripts/redis.sh b/salt/telegraf/scripts/redis.sh
index 04079c63b..b448bba2d 100644
--- a/salt/telegraf/scripts/redis.sh
+++ b/salt/telegraf/scripts/redis.sh
@@ -15,17 +15,14 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
+THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
-APP=redis
-lf=/tmp/$APP-pidLockFile
-# create empty lock file if none exists
-cat /dev/null >> $lf
-read lastPID < $lf
-# if lastPID is not null and a process with that pid exists , exit
-[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
-echo $$ > $lf
+if [ ! "$THEGREP" ]; then
-UNPARSED=$(redis-cli llen logstash:unparsed | awk '{print $1}')
-PARSED=$(redis-cli llen logstash:parsed | awk '{print $1}')
+ UNPARSED=$(redis-cli llen logstash:unparsed | awk '{print $1}')
+ PARSED=$(redis-cli llen logstash:parsed | awk '{print $1}')
-echo "redisqueue unparsed=$UNPARSED,parsed=$PARSED"
+ echo "redisqueue unparsed=$UNPARSED,parsed=$PARSED"
+else
+ exit 0
+fi
diff --git a/salt/telegraf/scripts/sostatus.sh b/salt/telegraf/scripts/sostatus.sh
new file mode 100644
index 000000000..a7222b67d
--- /dev/null
+++ b/salt/telegraf/scripts/sostatus.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
+
+if [ ! "$THEGREP" ]; then
+
+ SOSTATUSLOG=/var/log/sostatus/status.log
+ SOSTATUSSTATUS=$(cat /var/log/sostatus/status.log)
+
+ if [ -f "$SOSTATUSLOG" ]; then
+ echo "sostatus status=$SOSTATUSSTATUS"
+ else
+ exit 0
+ fi
+else
+ exit 0
+fi
diff --git a/salt/telegraf/scripts/stenoloss.sh b/salt/telegraf/scripts/stenoloss.sh
index ad88ccc8d..028637e16 100644
--- a/salt/telegraf/scripts/stenoloss.sh
+++ b/salt/telegraf/scripts/stenoloss.sh
@@ -15,31 +15,29 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-APP=stenoloss
-lf=/tmp/$APP-pidLockFile
-# create empty lock file if none exists
-cat /dev/null >> $lf
-read lastPID < $lf
-# if lastPID is not null and a process with that pid exists , exit
-[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
-echo $$ > $lf
+THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
-TSFILE=/var/log/telegraf/laststenodrop.log
-if [ -f "$TSFILE" ]; then
- LASTTS=$(cat $TSFILE)
+if [ ! "$THEGREP" ]; then
+
+ TSFILE=/var/log/telegraf/laststenodrop.log
+ if [ -f "$TSFILE" ]; then
+ LASTTS=$(cat $TSFILE)
+ else
+ LASTTS=0
+ fi
+
+ # Get the data
+ LOGLINE=$(tac /var/log/stenographer/stenographer.log | grep -m1 drop)
+ CURRENTTS=$(echo $LOGLINE | awk '{print $1}')
+
+ if [[ "$CURRENTTS" != "$LASTTS" ]]; then
+ DROP=$(echo $LOGLINE | awk '{print $14}' | awk -F "=" '{print $2}')
+ echo $CURRENTTS > $TSFILE
+ else
+ DROP=0
+ fi
+
+ echo "stenodrop drop=$DROP"
else
- LASTTS=0
-fi
-
-# Get the data
-LOGLINE=$(tac /var/log/stenographer/stenographer.log | grep -m1 drop)
-CURRENTTS=$(echo $LOGLINE | awk '{print $1}')
-
-if [[ "$CURRENTTS" != "$LASTTS" ]]; then
- DROP=$(echo $LOGLINE | awk '{print $14}' | awk -F "=" '{print $2}')
- echo $CURRENTTS > $TSFILE
-else
- DROP=0
-fi
-
-echo "stenodrop drop=$DROP"
\ No newline at end of file
+ exit 0
+fi
\ No newline at end of file
diff --git a/salt/telegraf/scripts/suriloss.sh b/salt/telegraf/scripts/suriloss.sh
index 08f8c23eb..2d0a56106 100644
--- a/salt/telegraf/scripts/suriloss.sh
+++ b/salt/telegraf/scripts/suriloss.sh
@@ -16,37 +16,33 @@
# along with this program. If not, see .
-APP=suriloss
-lf=/tmp/$APP-pidLockFile
-# create empty lock file if none exists
-cat /dev/null >> $lf
-read lastPID < $lf
-# if lastPID is not null and a process with that pid exists , exit
-[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
-echo $$ > $lf
+THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
-SURILOG=$(tac /var/log/suricata/stats.log | grep kernel | head -4)
-CHECKIT=$(echo $SURILOG | grep -o 'drop' | wc -l)
+if [ ! "$THEGREP" ]; then
-if [ $CHECKIT == 2 ]; then
- declare RESULT=($SURILOG)
+ SURILOG=$(tac /var/log/suricata/stats.log | grep kernel | head -4)
+ CHECKIT=$(echo $SURILOG | grep -o 'drop' | wc -l)
- CURRENTDROP=${RESULT[4]}
- PASTDROP=${RESULT[14]}
- DROPPED=$((CURRENTDROP - PASTDROP))
- if [ $DROPPED == 0 ]; then
- LOSS=0
- echo "suridrop drop=0"
- else
- CURRENTPACKETS=${RESULT[9]}
- PASTPACKETS=${RESULT[19]}
- TOTALCURRENT=$((CURRENTPACKETS + CURRENTDROP))
- TOTALPAST=$((PASTPACKETS + PASTDROP))
- TOTAL=$((TOTALCURRENT - TOTALPAST))
+ if [ $CHECKIT == 2 ]; then
+ declare RESULT=($SURILOG)
- LOSS=$(echo 4 k $DROPPED $TOTAL / p | dc)
- echo "suridrop drop=$LOSS"
- fi
+ CURRENTDROP=${RESULT[4]}
+ PASTDROP=${RESULT[14]}
+ DROPPED=$((CURRENTDROP - PASTDROP))
+ if [ $DROPPED == 0 ]; then
+ LOSS=0
+ echo "suridrop drop=0"
+ else
+ CURRENTPACKETS=${RESULT[9]}
+ PASTPACKETS=${RESULT[19]}
+ TOTALCURRENT=$((CURRENTPACKETS + CURRENTDROP))
+ TOTALPAST=$((PASTPACKETS + PASTDROP))
+ TOTAL=$((TOTALCURRENT - TOTALPAST))
+
+ LOSS=$(echo 4 k $DROPPED $TOTAL / p | dc)
+ echo "suridrop drop=$LOSS"
+ fi
+ fi
else
echo "suridrop drop=0"
fi
\ No newline at end of file
diff --git a/salt/telegraf/scripts/zeekcaptureloss.sh b/salt/telegraf/scripts/zeekcaptureloss.sh
index aa8a222a3..6cb2dd2e2 100644
--- a/salt/telegraf/scripts/zeekcaptureloss.sh
+++ b/salt/telegraf/scripts/zeekcaptureloss.sh
@@ -18,35 +18,33 @@
# This script returns the average of all the workers average capture loss to telegraf / influxdb in influx format include nanosecond precision timestamp
-APP=zeekcaploss
-lf=/tmp/$APP-pidLockFile
-# create empty lock file if none exists
-cat /dev/null >> $lf
-read lastPID < $lf
-# if lastPID is not null and a process with that pid exists , exit
-[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
-echo $$ > $lf
+THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
-if [ -d "/host/nsm/zeek/spool/logger" ]; then
- WORKERS={{ salt['pillar.get']('sensor:zeek_lbprocs', salt['pillar.get']('sensor:zeek_pins') | length) }}
- ZEEKLOG=/host/nsm/zeek/spool/logger/capture_loss.log
-elif [ -d "/host/nsm/zeek/spool/zeeksa" ]; then
- WORKERS=1
- ZEEKLOG=/host/nsm/zeek/spool/zeeksa/capture_loss.log
-else
- echo 'Zeek capture_loss.log not found' >/dev/stderr
- exit 2
-fi
+if [ ! "$THEGREP" ]; then
-LASTCAPTURELOSSLOG=/var/log/telegraf/lastcaptureloss.txt
-if [ -f "$ZEEKLOG" ]; then
- CURRENTTS=$(tail -1 $ZEEKLOG | jq .ts | sed 's/"//g')
- if [ -f "$LASTCAPTURELOSSLOG" ]; then
- LASTTS=$(cat $LASTCAPTURELOSSLOG)
- if [[ "$LASTTS" != "$CURRENTTS" ]]; then
- LOSS=$(tail -$WORKERS $ZEEKLOG | awk -F, '{print $NF}' | sed 's/}//' | awk -v WORKERS=$WORKERS -F: '{LOSS += $2 / WORKERS} END { print LOSS}')
- echo "zeekcaptureloss loss=$LOSS"
+ if [ -d "/host/nsm/zeek/spool/logger" ]; then
+ WORKERS={{ salt['pillar.get']('sensor:zeek_lbprocs', salt['pillar.get']('sensor:zeek_pins') | length) }}
+ ZEEKLOG=/host/nsm/zeek/spool/logger/capture_loss.log
+ elif [ -d "/host/nsm/zeek/spool/zeeksa" ]; then
+ WORKERS=1
+ ZEEKLOG=/host/nsm/zeek/spool/zeeksa/capture_loss.log
+ else
+ echo 'Zeek capture_loss.log not found' >/dev/stderr
+ exit 2
fi
- fi
- echo "$CURRENTTS" > $LASTCAPTURELOSSLOG
+
+ LASTCAPTURELOSSLOG=/var/log/telegraf/lastcaptureloss.txt
+ if [ -f "$ZEEKLOG" ]; then
+ CURRENTTS=$(tail -1 $ZEEKLOG | jq .ts | sed 's/"//g')
+ if [ -f "$LASTCAPTURELOSSLOG" ]; then
+ LASTTS=$(cat $LASTCAPTURELOSSLOG)
+ if [[ "$LASTTS" != "$CURRENTTS" ]]; then
+ LOSS=$(tail -$WORKERS $ZEEKLOG | awk -F, '{print $NF}' | sed 's/}//' | awk -v WORKERS=$WORKERS -F: '{LOSS += $2 / WORKERS} END { print LOSS}')
+ echo "zeekcaptureloss loss=$LOSS"
+ fi
+ fi
+ echo "$CURRENTTS" > $LASTCAPTURELOSSLOG
+ fi
+else
+ exit 0
fi
diff --git a/salt/telegraf/scripts/zeekloss.sh b/salt/telegraf/scripts/zeekloss.sh
index 0c1a714ba..3dbd42833 100644
--- a/salt/telegraf/scripts/zeekloss.sh
+++ b/salt/telegraf/scripts/zeekloss.sh
@@ -17,34 +17,32 @@
# This script returns the packets dropped by Zeek, but it isn't a percentage. $LOSS * 100 would be the percentage
-APP=zeekloss
-lf=/tmp/$APP-pidLockFile
-# create empty lock file if none exists
-cat /dev/null >> $lf
-read lastPID < $lf
-# if lastPID is not null and a process with that pid exists , exit
-[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
-echo $$ > $lf
+THEGREP=$(ps -ef | grep $0 | grep -v $$ | grep -v grep)
-ZEEKLOG=$(tac /host/nsm/zeek/logs/packetloss.log | head -2)
-declare RESULT=($ZEEKLOG)
-CURRENTDROP=${RESULT[3]}
-# zeek likely not running if this is true
-if [[ $CURRENTDROP == "rcvd:" ]]; then
- CURRENTDROP=0
- PASTDROP=0
- DROPPED=0
+if [ ! "$THEGREP" ]; then
+
+ ZEEKLOG=$(tac /host/nsm/zeek/logs/packetloss.log | head -2)
+ declare RESULT=($ZEEKLOG)
+ CURRENTDROP=${RESULT[3]}
+ # zeek likely not running if this is true
+ if [[ $CURRENTDROP == "rcvd:" ]]; then
+ CURRENTDROP=0
+ PASTDROP=0
+ DROPPED=0
+ else
+ PASTDROP=${RESULT[9]}
+ DROPPED=$((CURRENTDROP - PASTDROP))
+ fi
+ if [[ "$DROPPED" -le 0 ]]; then
+ LOSS=0
+ echo "zeekdrop drop=0"
+ else
+ CURRENTPACKETS=${RESULT[5]}
+ PASTPACKETS=${RESULT[11]}
+ TOTAL=$((CURRENTPACKETS - PASTPACKETS))
+ LOSS=$(echo 4 k $DROPPED $TOTAL / p | dc)
+ echo "zeekdrop drop=$LOSS"
+ fi
else
- PASTDROP=${RESULT[9]}
- DROPPED=$((CURRENTDROP - PASTDROP))
-fi
-if [[ "$DROPPED" -le 0 ]]; then
- LOSS=0
- echo "zeekdrop drop=0"
-else
- CURRENTPACKETS=${RESULT[5]}
- PASTPACKETS=${RESULT[11]}
- TOTAL=$((CURRENTPACKETS - PASTPACKETS))
- LOSS=$(echo 4 k $DROPPED $TOTAL / p | dc)
- echo "zeekdrop drop=$LOSS"
+ exit 0
fi
\ No newline at end of file
diff --git a/salt/top.sls b/salt/top.sls
index 68c392c25..8a12aaa26 100644
--- a/salt/top.sls
+++ b/salt/top.sls
@@ -14,27 +14,21 @@
{% set CURATOR = salt['pillar.get']('curator:enabled', True) %}
{% set REDIS = salt['pillar.get']('redis:enabled', True) %}
{% set STRELKA = salt['pillar.get']('strelka:enabled', '0') %}
-{% set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
{% import_yaml 'salt/minion.defaults.yaml' as saltversion %}
{% set saltversion = saltversion.salt.minion.version %}
+{% set INSTALLEDSALTVERSION = grains.saltversion %}
base:
'not G@saltversion:{{saltversion}}':
- match: compound
- salt.minion-state-apply-test
- {% if ISAIRGAP is sameas true %}
- - airgap
- {% endif %}
+ - repo.client
- salt.minion
'G@os:CentOS and G@saltversion:{{saltversion}}':
- match: compound
- {% if ISAIRGAP is sameas true %}
- - airgap
- {% else %}
- - yum
- {% endif %}
+ - repo.client
- yum.packages
'* and G@saltversion:{{saltversion}}':
diff --git a/salt/yum/init.sls b/salt/yum/init.sls
deleted file mode 100644
index 339a6f2a7..000000000
--- a/salt/yum/init.sls
+++ /dev/null
@@ -1,17 +0,0 @@
-{% from 'allowed_states.map.jinja' import allowed_states %}
-{% if sls in allowed_states %}
-
-yumconf:
- file.managed:
- - name: /etc/yum.conf
- - source: salt://yum/etc/yum.conf.jinja
- - mode: 644
- - template: jinja
-
-{% else %}
-
-{{sls}}_state_not_allowed:
- test.fail_without_changes:
- - name: {{sls}}_state_not_allowed
-
-{% endif %}
\ No newline at end of file
diff --git a/setup/automation/distributed-airgap-search b/setup/automation/distributed-airgap-search
index 7a0888fee..1acee9b1a 100644
--- a/setup/automation/distributed-airgap-search
+++ b/setup/automation/distributed-airgap-search
@@ -35,6 +35,7 @@ ADMINPASS2=onionuser
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=distributed-search
+INTERWEBS=AIRGAP
install_type=SEARCHNODE
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=
diff --git a/setup/automation/distributed-airgap-sensor b/setup/automation/distributed-airgap-sensor
index 91b9c24a9..c8186bf8a 100644
--- a/setup/automation/distributed-airgap-sensor
+++ b/setup/automation/distributed-airgap-sensor
@@ -35,6 +35,7 @@ ZEEKVERSION=ZEEK
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=distributed-sensor
+INTERWEBS=AIRGAP
install_type=SENSOR
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=
diff --git a/setup/automation/eval-net-centos b/setup/automation/eval-net-centos
index abd0c4765..82d2cc9ec 100644
--- a/setup/automation/eval-net-centos
+++ b/setup/automation/eval-net-centos
@@ -41,7 +41,7 @@ install_type=EVAL
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
MANAGERADV=BASIC
-MANAGERUPDATES=1
+MANAGERUPDATES=0
# MDNS=
# MGATEWAY=
# MIP=
diff --git a/setup/automation/import-airgap b/setup/automation/import-airgap
index bfd0e3641..9c394ef2f 100644
--- a/setup/automation/import-airgap
+++ b/setup/automation/import-airgap
@@ -42,7 +42,7 @@ INTERWEBS=AIRGAP
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
MANAGERADV=BASIC
-MANAGERUPDATES=1
+MANAGERUPDATES=0
# MDNS=
# MGATEWAY=
# MIP=
diff --git a/setup/automation/import-ami b/setup/automation/import-ami
index 88734c352..10758be9a 100644
--- a/setup/automation/import-ami
+++ b/setup/automation/import-ami
@@ -41,7 +41,7 @@ install_type=IMPORT
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
MANAGERADV=BASIC
-MANAGERUPDATES=1
+MANAGERUPDATES=0
# MDNS=
# MGATEWAY=
# MIP=
diff --git a/setup/automation/import-iso b/setup/automation/import-iso
index 011623091..fbfdd364b 100644
--- a/setup/automation/import-iso
+++ b/setup/automation/import-iso
@@ -41,7 +41,7 @@ install_type=IMPORT
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
MANAGERADV=BASIC
-MANAGERUPDATES=1
+MANAGERUPDATES=0
# MDNS=
# MGATEWAY=
# MIP=
diff --git a/setup/automation/import-net-centos b/setup/automation/import-net-centos
index 37ca6ac51..f6394bde1 100644
--- a/setup/automation/import-net-centos
+++ b/setup/automation/import-net-centos
@@ -41,7 +41,7 @@ install_type=IMPORT
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
MANAGERADV=BASIC
-MANAGERUPDATES=1
+MANAGERUPDATES=0
# MDNS=
# MGATEWAY=
# MIP=
diff --git a/setup/so-functions b/setup/so-functions
index 1d8023a67..027b81171 100755
--- a/setup/so-functions
+++ b/setup/so-functions
@@ -44,6 +44,21 @@ logCmd() {
}
### End Logging Section ###
+airgap_repo() {
+ # Remove all the repo files
+ rm -rf /etc/yum.repos.d/*
+ echo "[airgap_repo]" > /etc/yum.repos.d/airgap_repo.repo
+ if $is_manager; then
+ echo "baseurl=https://$HOSTNAME/repo" >> /etc/yum.repos.d/airgap_repo.repo
+ else
+ echo "baseurl=https://$MSRV/repo" >> /etc/yum.repos.d/airgap_repo.repo
+ fi
+ echo "gpgcheck=1" >> /etc/yum.repos.d/airgap_repo.repo
+ echo "sslverify=0" >> /etc/yum.repos.d/airgap_repo.repo
+ echo "name=Airgap Repo" >> /etc/yum.repos.d/airgap_repo.repo
+ echo "enabled=1" >> /etc/yum.repos.d/airgap_repo.repo
+}
+
airgap_rules() {
# Copy the rules for suricata if using Airgap
mkdir -p /nsm/repo/rules
@@ -147,6 +162,25 @@ check_hive_init() {
docker rm so-thehive
}
+check_manager_state() {
+ echo "Checking state of manager services. This may take a moment..."
+ retry 2 15 "__check_so_status" >> $setup_log 2>&1 && retry 2 15 "__check_salt_master" >> $setup_log 2>&1 && return 0 || return 1
+}
+
+__check_so_status() {
+ local so_status_output
+ so_status_output=$($sshcmd -i /root/.ssh/so.key soremote@"$MSRV" cat /opt/so/log/sostatus/status.log)
+ [[ -z $so_status_output ]] && so_status_output=1
+ return $so_status_output
+}
+
+__check_salt_master() {
+ local salt_master_status
+ salt_master_status=$($sshcmd -i /root/.ssh/so.key soremote@"$MSRV" systemctl is-active --quiet salt-master)
+ [[ -z $salt_master_status ]] && salt_master_status=1
+ return $salt_master_status
+}
+
check_network_manager_conf() {
local gmdconf="/usr/lib/NetworkManager/conf.d/10-globally-managed-devices.conf"
local nmconf="/etc/NetworkManager/NetworkManager.conf"
@@ -159,11 +193,6 @@ check_network_manager_conf() {
systemctl restart NetworkManager
} >> "$setup_log" 2>&1
fi
-
- #if test -f "$nmconf"; then
-# sed -i 's/managed=false/managed=true/g' "$nmconf" >> "$setup_log" 2>&1
-# systemctl restart NetworkManager >> "$setup_log" 2>&1
-# fi
if [[ ! -d "$preupdir" ]]; then
mkdir "$preupdir" >> "$setup_log" 2>&1
@@ -400,7 +429,7 @@ collect_hostname() {
if [[ $HOSTNAME == 'securityonion' ]]; then # Will only check HOSTNAME=securityonion once
if ! (whiptail_avoid_default_hostname); then
- whiptail_set_hostname
+ whiptail_set_hostname "$HOSTNAME"
fi
fi
@@ -486,6 +515,22 @@ collect_node_ls_pipeline_worker_count() {
done
}
+collect_ntp_servers() {
+ if whiptail_ntp_ask; then
+ [[ $is_airgap ]] && ntp_string=""
+ whiptail_ntp_servers "$ntp_string"
+
+ while ! valid_ntp_list "$ntp_string"; do
+ whiptail_invalid_input
+ whiptail_ntp_servers "$ntp_string"
+ done
+
+ IFS="," read -r -a ntp_servers <<< "$ntp_string" # Split string on commas into array
+ else
+ ntp_servers=()
+ fi
+}
+
collect_oinkcode() {
whiptail_oinkcode
@@ -537,7 +582,7 @@ collect_patch_schedule_name_import() {
collect_proxy() {
[[ -n $TESTING ]] && return
- collect_proxy_details
+ collect_proxy_details || return
while ! proxy_validate; do
if whiptail_invalid_proxy; then
collect_proxy_details no_ask
@@ -581,7 +626,9 @@ collect_proxy_details() {
else
so_proxy="$proxy_addr"
fi
- export proxy
+ export so_proxy
+ else
+ return 1
fi
}
@@ -702,6 +749,42 @@ configure_minion() {
} >> "$setup_log" 2>&1
}
+configure_ntp() {
+ local chrony_conf=/etc/chrony.conf
+
+ # Install chrony if it isn't already installed
+ if ! command -v chronyc &> /dev/null; then
+ yum -y install chrony
+ fi
+
+ [[ -f $chrony_conf ]] && mv $chrony_conf "$chrony_conf.bak"
+
+ printf '%s\n' "# NTP server list" > $chrony_conf
+
+ # Build list of servers
+ for addr in "${ntp_servers[@]}"; do
+ echo "server $addr iburst" >> $chrony_conf
+ done
+
+ printf '\n%s\n' "# Config options" >> $chrony_conf
+
+ printf '%s\n' \
+ 'driftfile /var/lib/chrony/drift' \
+ 'makestep 1.0 3' \
+ 'rtcsync' \
+ 'logdir /var/log/chrony' >> $chrony_conf
+
+ systemctl enable chronyd
+ systemctl restart chronyd
+
+ # Tell the chrony daemon to sync time & update the system time
+ # Since these commands only make a call to chronyd, wait after each command to make sure the changes are made
+ printf "Syncing chrony time to server: "
+ chronyc -a 'burst 4/4' && sleep 30
+ printf "Forcing chrony to update the time: "
+ chronyc -a makestep && sleep 30
+}
+
checkin_at_boot() {
local minion_config=/etc/salt/minion
@@ -950,7 +1033,7 @@ create_repo() {
detect_cloud() {
echo "Testing if setup is running on a cloud instance..." | tee -a "$setup_log"
- if ( curl --fail -s -m 5 http://169.254.169.254/latest/meta-data/instance-id > /dev/null ) || ( dmidecode -s bios-vendor | grep -q Google > /dev/null); then export is_cloud="true"; fi
+ if ( curl --fail -s -m 5 http://169.254.169.254/latest/meta-data/instance-id > /dev/null ) || ( dmidecode -s bios-vendor | grep -q Google > /dev/null) || [ -f /var/log/waagent.log ]; then export is_cloud="true"; fi
}
detect_os() {
@@ -1054,40 +1137,11 @@ disable_ipv6() {
} >> /etc/sysctl.conf
}
-#disable_misc_network_features() {
-# filter_unused_nics
-# if [ ${#filtered_nics[@]} -ne 0 ]; then
-# for unused_nic in "${filtered_nics[@]}"; do
-# if [ -n "$unused_nic" ]; then
-# echo "Disabling unused NIC: $unused_nic" >> "$setup_log" 2>&1
-#
-# # Disable DHCPv4/v6 and autoconnect
-# nmcli con mod "$unused_nic" \
-# ipv4.method disabled \
-# ipv6.method ignore \
-# connection.autoconnect "no" >> "$setup_log" 2>&1
-#
-# # Flush any existing IPs
-# ip addr flush "$unused_nic" >> "$setup_log" 2>&1
-# fi
-# done
-# fi
-# # Disable IPv6
-# {
-# echo "net.ipv6.conf.all.disable_ipv6 = 1"
-# echo "net.ipv6.conf.default.disable_ipv6 = 1"
-# echo "net.ipv6.conf.lo.disable_ipv6 = 1"
-# } >> /etc/sysctl.conf
-#}
-
docker_install() {
if [ $OS = 'centos' ]; then
{
yum clean expire-cache;
- if [[ ! $is_airgap ]]; then
- yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo;
- fi
if [[ ! $is_iso ]]; then
yum -y install docker-ce-20.10.5-3.el7 containerd.io-1.4.4-3.1.el7;
fi
@@ -1436,8 +1490,6 @@ install_cleanup() {
info "Removing so-setup permission entry from sudoers file"
sed -i '/so-setup/d' /etc/sudoers
fi
-
- so-ssh-harden -q
}
import_registry_docker() {
@@ -1539,8 +1591,7 @@ manager_pillar() {
printf '%s\n'\
" kratoskey: '$KRATOSKEY'"\
"" >> "$pillar_file"
-
- }
+}
manager_global() {
local global_pillar="$local_salt_dir/pillar/global.sls"
@@ -1564,7 +1615,6 @@ manager_global() {
"global:"\
" soversion: '$SOVERSION'"\
" hnmanager: '$HNMANAGER'"\
- " ntpserver: '$NTPSERVER'"\
" dockernet: '$DOCKERNET'"\
" mdengine: '$ZEEKVERSION'"\
" ids: '$NIDS'"\
@@ -1718,7 +1768,6 @@ manager_global() {
" bip: '$DOCKERBIP'"\
"redis_settings:"\
" redis_maxmemory: 812" >> "$global_pillar"
-
printf '%s\n' '----' >> "$setup_log" 2>&1
}
@@ -1781,6 +1830,19 @@ network_setup() {
} >> "$setup_log" 2>&1
}
+ntp_pillar() {
+ local pillar_file="$temp_install_dir"/pillar/minions/"$MINION_ID".sls
+
+ if [[ ${#ntp_servers[@]} -gt 0 ]]; then
+ printf '%s\n'\
+ "ntp:"\
+ " servers:" >> "$pillar_file"
+ for addr in "${ntp_servers[@]}"; do
+ printf '%s\n' " - '$addr'" >> "$pillar_file"
+ done
+ fi
+}
+
parse_install_username() {
# parse out the install username so things copy correctly
INSTALLUSERNAME=${SUDO_USER:-${USER}}
@@ -1829,12 +1891,13 @@ print_salt_state_apply() {
}
proxy_validate() {
+ echo "Testing proxy..."
local test_url="https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS"
- proxy_test_err=$(curl -sS "$test_url" --proxy "$so_proxy" 2>&1)
+ proxy_test_err=$(curl -sS "$test_url" --proxy "$so_proxy" --connect-timeout 5 2>&1) # set short connection timeout so user doesn't sit waiting for proxy test to timeout
local ret=$?
if [[ $ret != 0 ]]; then
- error "Could not reach $test_url using proxy $so_proxy"
+ error "Could not reach $test_url using proxy provided"
error "Received error: $proxy_test_err"
if [[ -n $TESTING ]]; then
error "Exiting setup"
@@ -1988,11 +2051,6 @@ saltify() {
# Install updates and Salt
if [ $OS = 'centos' ]; then
- set_progress_str 5 'Installing Salt repo'
- {
- sudo rpm --import https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.5/SALTSTACK-GPG-KEY.pub;
- cp ./yum_repos/saltstack.repo /etc/yum.repos.d/saltstack.repo;
- } >> "$setup_log" 2>&1
set_progress_str 6 'Installing various dependencies'
if [[ ! $is_iso ]]; then
logCmd "yum -y install wget nmap-ncat"
@@ -2001,49 +2059,31 @@ saltify() {
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORT')
reserve_group_ids >> "$setup_log" 2>&1
if [[ ! $is_iso ]]; then
- logCmd "yum -y install epel-release"
- logCmd "yum -y install sqlite argon2 curl mariadb-devel python3-pip"
- retry 50 10 "pip3 install --user influxdb" >> "$setup_log" 2>&1 || exit 1
+ logCmd "yum -y install sqlite argon2 curl mariadb-devel"
fi
# Download Ubuntu Keys in case manager updates = 1
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
if [[ ! $is_airgap ]]; then
- logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3002.5/SALTSTACK-GPG-KEY.pub"
+ logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3003/SALTSTACK-GPG-KEY.pub"
logCmd "wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg"
logCmd "wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH"
- logCmd "cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo"
fi
set_progress_str 7 'Installing salt-master'
if [[ ! $is_iso ]]; then
- logCmd "yum -y install salt-master-3002.5"
+ logCmd "yum -y install salt-master-3003"
fi
systemctl enable salt-master >> "$setup_log" 2>&1
;;
*)
- if [ "$MANAGERUPDATES" = '1' ]; then
- {
- if [[ ! $is_airgap ]]; then
- # Create the GPG Public Key for the Salt Repo
- cp ./public_keys/salt.pem /etc/pki/rpm-gpg/saltstack-signing-key;
-
- # Copy repo files over
- cp ./yum_repos/saltstack.repo /etc/yum.repos.d/saltstack.repo;
- else
- info "This is airgap"
- fi
- } >> "$setup_log" 2>&1
- fi
;;
esac
if [[ ! $is_airgap ]]; then
- cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo >> "$setup_log" 2>&1
yum clean expire-cache >> "$setup_log" 2>&1
fi
set_progress_str 8 'Installing salt-minion & python modules'
{
if [[ ! $is_iso ]]; then
- yum -y install epel-release
- yum -y install salt-minion-3002.5\
+ yum -y install salt-minion-3003\
python3\
python36-docker\
python36-dateutil\
@@ -2095,8 +2135,8 @@ saltify() {
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT' | 'HELIXSENSOR')
# Add saltstack repo(s)
- wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3002.5/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
- echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.5 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
+ wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3003/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
+ echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3003 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
# Add Docker repo
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - >> "$setup_log" 2>&1
@@ -2104,7 +2144,7 @@ saltify() {
# Get gpg keys
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
- wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/archive/3002.5/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
+ wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/archive/3003/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1
@@ -2117,7 +2157,7 @@ saltify() {
set_progress_str 6 'Installing various dependencies'
retry 50 10 "apt-get -y install sqlite3 argon2 libssl-dev" >> "$setup_log" 2>&1 || exit 1
set_progress_str 7 'Installing salt-master'
- retry 50 10 "apt-get -y install salt-master=3002.5+ds-1" >> "$setup_log" 2>&1 || exit 1
+ retry 50 10 "apt-get -y install salt-master=3003+ds-1" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-mark hold salt-master" >> "$setup_log" 2>&1 || exit 1
;;
*)
@@ -2128,14 +2168,14 @@ saltify() {
echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1
apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1
- echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.5/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
+ echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3003/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log"
;;
esac
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1
set_progress_str 8 'Installing salt-minion & python modules'
- retry 50 10 "apt-get -y install salt-minion=3002.5+ds-1 salt-common=3002.5+ds-1" >> "$setup_log" 2>&1 || exit 1
+ retry 50 10 "apt-get -y install salt-minion=3003+ds-1 salt-common=3003+ds-1" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1
if [[ $OSVER != 'xenial' ]]; then
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging python3-influxdb" >> "$setup_log" 2>&1 || exit 1
@@ -2265,6 +2305,30 @@ secrets_pillar(){
fi
}
+securityonion_repo() {
+ # Remove all the current repos
+ if [[ "$OS" == "centos" ]]; then
+ if [[ "$INTERWEBS" == "AIRGAP" ]]; then
+ echo "This is airgap I don't need to add this repo"
+ else
+ mkdir -p /root/oldrepos
+ mv -v /etc/yum.repos.d/* /root/oldrepos/
+ ls -la /etc/yum.repos.d/
+ rm -rf /etc/yum.repos.d
+ yum clean all
+ yum repolist all
+ mkdir -p /etc/yum.repos.d
+ if [[ ! $is_manager && "$MANAGERUPDATES" == "1" ]]; then
+ cp -f ../salt/repo/client/files/centos/securityonioncache.repo /etc/yum.repos.d/
+ else
+ cp -f ../salt/repo/client/files/centos/securityonion.repo /etc/yum.repos.d/
+ fi
+ fi
+ else
+ echo "This is Ubuntu"
+ fi
+}
+
set_base_heapsizes() {
es_heapsize
ls_heapsize
@@ -2672,7 +2736,8 @@ update_sudoers() {
update_packages() {
if [ "$OS" = 'centos' ]; then
- yum -y update >> "$setup_log"
+ yum repolist >> /dev/null
+ yum -y update --exclude=salt* >> "$setup_log"
else
retry 50 10 "apt-get -y update" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get -y upgrade" >> "$setup_log" 2>&1 || exit 1
diff --git a/setup/so-preflight b/setup/so-preflight
index da25e6775..1ef840284 100644
--- a/setup/so-preflight
+++ b/setup/so-preflight
@@ -46,8 +46,8 @@ check_new_repos() {
if [[ $OS == 'centos' ]]; then
local repo_arr=(
"https://download.docker.com/linux/centos/docker-ce.repo"
- "https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.5/SALTSTACK-GPG-KEY.pub"
- "https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3002.5/SALTSTACK-GPG-KEY.pub"
+ "https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3003/SALTSTACK-GPG-KEY.pub"
+ "https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3003/SALTSTACK-GPG-KEY.pub"
"https://download.docker.com/linux/ubuntu/gpg"
"https://packages.wazuh.com/key/GPG-KEY-WAZUH"
"https://packages.wazuh.com/3.x/yum/"
@@ -59,7 +59,7 @@ check_new_repos() {
local repo_arr=(
"https://download.docker.com/linux/ubuntu/gpg"
"https://download.docker.com/linux/ubuntu"
- "https://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.5/SALTSTACK-GPG-KEY.pub"
+ "https://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3003/SALTSTACK-GPG-KEY.pub"
"https://packages.wazuh.com/key/GPG-KEY-WAZUH"
"https://packages.wazuh.com"
)
diff --git a/setup/so-setup b/setup/so-setup
index 82e414ca4..ad210048a 100755
--- a/setup/so-setup
+++ b/setup/so-setup
@@ -47,6 +47,7 @@ source ./so-variables
# Parse command line arguments
setup_type=$1
automation=$2
+WHATWOULDYOUSAYYAHDOHERE=setup
while [[ $# -gt 0 ]]; do
arg="$1"
@@ -167,10 +168,8 @@ set_ssh_cmds $automated
local_sbin="$(pwd)/../salt/common/tools/sbin"
export PATH=$PATH:$local_sbin
-set_network_dev_status_list
set_palette >> $setup_log 2>&1
-
# Kernel messages can overwrite whiptail screen #812
# https://github.com/Security-Onion-Solutions/securityonion/issues/812
dmesg -D
@@ -265,7 +264,7 @@ elif [ "$install_type" = 'ANALYST' ]; then
fi
# Check if this is an airgap install
-if [[ ( $is_manager || $is_import ) && $is_iso ]]; then
+if [[ $is_iso || $is_minion ]]; then
whiptail_airgap
if [[ "$INTERWEBS" == 'AIRGAP' ]]; then
is_airgap=true
@@ -291,13 +290,22 @@ if ! [[ -f $install_opt_file ]]; then
[[ -f $net_init_file ]] && whiptail_net_reinit && reinit_networking=true
- if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
+ if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
collect_hostname
+ fi
+
+ whiptail_node_description
+
+ if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
network_init_whiptail
else
source "$net_init_file"
fi
+ if [[ $is_minion ]] || [[ $reinit_networking ]] || [[ $is_iso ]] && ! [[ -f $net_init_file ]]; then
+ whiptail_management_interface_setup
+ fi
+
if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
network_init
fi
@@ -315,10 +323,6 @@ if ! [[ -f $install_opt_file ]]; then
[[ -n "$so_proxy" ]] && set_proxy >> $setup_log 2>&1
fi
- if [[ $is_minion ]] || [[ $reinit_networking ]] || [[ $is_iso ]] && ! [[ -f $net_init_file ]]; then
- whiptail_management_interface_setup
- fi
-
if [[ $is_minion ]]; then
add_mngr_ip_to_hosts
fi
@@ -334,7 +338,8 @@ if ! [[ -f $install_opt_file ]]; then
"MNIC=$MNIC" \
"HOSTNAME=$HOSTNAME" \
"MSRV=$MSRV" \
- "MSRVIP=$MSRVIP" > "$install_opt_file"
+ "MSRVIP=$MSRVIP" \
+ "NODE_DESCRIPTION=\"$NODE_DESCRIPTION\"" > "$install_opt_file"
[[ -n $so_proxy ]] && echo "so_proxy=$so_proxy" >> "$install_opt_file"
download_repo_tarball
exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}"
@@ -433,6 +438,7 @@ if [[ $is_helix ]]; then
fi
if [[ $is_helix || $is_sensor ]]; then
+ set_network_dev_status_list
whiptail_sensor_nics
fi
@@ -534,6 +540,8 @@ if [[ $is_sensor && ! $is_eval ]]; then
fi
fi
+[[ $is_iso ]] && collect_ntp_servers
+
if [[ $is_node && ! $is_eval ]]; then
whiptail_node_advanced
if [ "$NODESETUP" == 'NODEADVANCED' ]; then
@@ -550,7 +558,6 @@ if [[ $is_node && ! $is_eval ]]; then
LSPIPELINEWORKERS=$num_cpu_cores
LSPIPELINEBATCH=125
LSINPUTTHREADS=1
- LSPIPELINEBATCH=125
fi
fi
@@ -564,14 +571,19 @@ fi
if [[ $is_manager || $is_import ]]; then collect_so_allow; fi
-whiptail_make_changes
+# This block sets REDIRECTIT which is used by a function outside the below subshell
+set_redirect >> $setup_log 2>&1
+
+if [[ $is_minion ]] && ! check_manager_state; then
+ echo "Manager was not in a good state" >> "$setup_log" 2>&1
+ whiptail_manager_error
+fi
+
+whiptail_end_settings
# From here on changes will be made.
echo "1" > /root/accept_changes
-# This block sets REDIRECTIT which is used by a function outside the below subshell
-set_redirect >> $setup_log 2>&1
-
# Begin install
{
@@ -581,6 +593,8 @@ set_redirect >> $setup_log 2>&1
# Show initial progress message
set_progress_str 0 'Running initial configuration steps'
+ [[ ${#ntp_servers[@]} -gt 0 ]] && configure_ntp >> $setup_log 2>&1
+
reserve_ports
set_path
@@ -613,6 +627,8 @@ set_redirect >> $setup_log 2>&1
fi
host_pillar >> $setup_log 2>&1
+ ntp_pillar >> $setup_log 2>&1
+
if [[ $is_minion || $is_import ]]; then
set_updates >> $setup_log 2>&1
@@ -630,7 +646,14 @@ set_redirect >> $setup_log 2>&1
fi
set_progress_str 2 'Updating packages'
- update_packages >> $setup_log 2>&1
+ # Import the gpg keys
+ gpg_rpm_import >> $setup_log 2>&1
+ if [[ ! $is_airgap ]]; then
+ securityonion_repo >> $setup_log 2>&1
+ update_packages >> $setup_log 2>&1
+ else
+ airgap_repo >> $setup_log 2>&1
+ fi
if [[ $is_sensor || $is_helix || $is_import ]]; then
set_progress_str 3 'Generating sensor pillar'
@@ -888,6 +911,7 @@ set_redirect >> $setup_log 2>&1
set_progress_str 85 'Applying finishing touches'
filter_unused_nics >> $setup_log 2>&1
network_setup >> $setup_log 2>&1
+ so-ssh-harden >> $setup_log 2>&1
if [[ $is_manager || $is_import ]]; then
set_progress_str 87 'Adding user to SOC'
@@ -942,6 +966,7 @@ else
} | whiptail_gauge_post_setup "Running post-installation steps..."
whiptail_setup_complete
+ [[ $setup_type != 'iso' ]] && whitpail_ssh_warning
echo "Post-installation steps have completed." >> $setup_log 2>&1
fi
diff --git a/setup/so-variables b/setup/so-variables
index a2fdf03c6..676cba4f0 100644
--- a/setup/so-variables
+++ b/setup/so-variables
@@ -72,3 +72,6 @@ export install_opt_file
net_init_file=/root/net_init
export net_init_file
+
+ntp_string="0.pool.ntp.org,1.pool.ntp.org"
+export ntp_string
diff --git a/setup/so-whiptail b/setup/so-whiptail
index a0425b5af..6127a174a 100755
--- a/setup/so-whiptail
+++ b/setup/so-whiptail
@@ -19,13 +19,18 @@ whiptail_airgap() {
[ -n "$TESTING" ] && return
- INTERWEBS=$(whiptail --title "Security Onion Setup" --radiolist \
- "Choose your install conditions:" 20 75 4 \
- "STANDARD" "This manager has internet accesss" ON \
- "AIRGAP" "This manager does not have internet access" OFF 3>&1 1>&2 2>&3 )
+ local node_str='node'
+ [[ $is_manager || $is_import ]] && node_str='manager'
+
+ INTERWEBS=$(whiptail --title "Security Onion Setup" --menu \
+ "How should this $node_str be installed?" 10 60 2 \
+ "Standard " "This $node_str has internet accesss" \
+ "Airgap " "This $node_str does not have internet access" 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
+
+ INTERWEBS=$(echo "${INTERWEBS^^}" | tr -d ' ')
}
whiptail_avoid_default_hostname() {
@@ -79,7 +84,7 @@ whiptail_bond_nics_mtu() {
whiptail_cancel() {
- whiptail --title "Security Onion Setup" --msgbox "Cancelling Setup. No changes have been made." 8 75
+ whiptail --title "Security Onion Setup" --msgbox "Cancelling Setup." 8 75
if [ -d "/root/installtmp" ]; then
{
echo "/root/installtmp exists";
@@ -88,7 +93,7 @@ whiptail_cancel() {
} >> $setup_log 2>&1
fi
- title "User cancelled setup, no changes made."
+ title "User cancelled setup."
exit
}
@@ -391,6 +396,7 @@ whiptail_dockernet_net() {
whiptail_check_exitstatus $exitstatus
}
+
whiptail_enable_components() {
[ -n "$TESTING" ] && return
@@ -423,6 +429,211 @@ whiptail_enable_components() {
done
}
+whiptail_end_settings() {
+ [ -n "$TESTING" ] && return
+
+ # BASIC INFO (NETWORK, HOSTNAME, DESCRIPTION, ETC)
+
+ read -r -d '' end_msg <<- EOM
+ Node Type: $install_type
+ Hostname: $HOSTNAME
+ EOM
+
+ [[ -n $NODE_DESCRIPTION ]] && __append_end_msg "Description: $NODE_DESCRIPTION"
+
+ [[ $is_airgap ]] && __append_end_msg "Airgap: True"
+
+ if [[ $is_minion ]]; then
+ __append_end_msg "Manager Hostname: $MSRV"
+ __append_end_msg "Manager IP: $MSRVIP"
+ fi
+
+
+ [[ $is_iso ]] && __append_end_msg "Network: $address_type"
+
+ __append_end_msg "Management NIC: $MNIC"
+ __append_end_msg "Management IP: $MAINIP"
+
+ if [[ $address_type == 'STATIC' ]]; then
+ __append_end_msg "Gateway: $MGATEWAY"
+ __append_end_msg "DNS: $MDNS"
+ __append_end_msg "DNS Domain: $MSEARCH"
+ fi
+
+ if [[ -n $so_proxy ]]; then
+ __append_end_msg "Proxy:"
+ __append_end_msg " Server URL: $proxy_addr"
+ [[ -n $proxy_user ]] && __append_end_msg " User: $proxy_user"
+ else
+ __append_end_msg "Proxy: N/A"
+ fi
+
+ if [[ $is_sensor ]]; then
+ __append_end_msg "Bond NIC(s):"
+ for nic in "${BNICS[@]}"; do
+ __append_end_msg " - $nic"
+ done
+ [[ -n $MTU ]] && __append_end_msg "MTU: $MTU"
+ fi
+
+ local homenet_arr
+ if [[ -n $HNMANAGER ]]; then
+ __append_end_msg "Home Network(s):"
+ IFS="," read -r -a homenet_arr <<< "$HNMANAGER"
+ for net in "${homenet_arr[@]}"; do
+ __append_end_msg " - $net"
+ done
+ elif [[ -n $HNSENSOR ]]; then
+ __append_end_msg "Home Network(s):"
+ IFS="," read -r -a homenet_arr <<< "$HNSENSOR"
+ for net in "${homenet_arr[@]}"; do
+ __append_end_msg " - $net"
+ done
+ fi
+
+ [[ -n $REDIRECTIT ]] && __append_end_msg "Access URL: https://${REDIRECTIT}"
+
+ [[ -n $ALLOW_CIDR ]] && __append_end_msg "Allowed IP or Subnet: $ALLOW_CIDR"
+
+ [[ -n $WEBUSER ]] && __append_end_msg "Web User: $WEBUSER"
+
+ [[ -n $FLEETNODEUSER ]] && __append_end_msg "Fleet User: $FLEETNODEUSER"
+
+ if [[ $is_manager ]]; then
+ __append_end_msg "Enabled Optional Components:"
+ for component in "${COMPONENTS[@]}"; do
+ __append_end_msg " - $component"
+ done
+ fi
+
+ # METADATA / IDS
+
+ if [[ -n $ZEEKVERSION ]]; then
+ local md_tool_string=${ZEEKVERSION,;}
+ md_tool_string=${md_tool_string^}
+
+ __append_end_msg "Metadata Tool: $md_tool_string"
+ fi
+
+ [[ -n $RULESETUP ]] && __append_end_msg "IDS Ruleset: $RULESETUP"
+ [[ -n $OINKCODE ]] && __append_end_msg "Oinkcode: $OINKCODE"
+
+ # PATCH SCHEDULE
+
+ if [[ -n $PATCHSCHEDULENAME ]]; then
+ __append_end_msg "Patch Schedule:"
+ if [[ $PATCHSCHEDULENAME == 'auto'|| $PATCHSCHEDULENAME == 'manual' ]]; then
+ __append_end_msg " Type: $PATCHSCHEDULENAME"
+ else
+ __append_end_msg " Name: $PATCHSCHEDULENAME"
+ fi
+ if [[ ${#PATCHSCHEDULEDAYS[@]} -gt 0 ]]; then
+ __append_end_msg " Day(s):"
+ for day in "${PATCHSCHEDULEDAYS[@]}"; do
+ __append_end_msg " - $day"
+ done
+ fi
+ if [[ ${#PATCHSCHEDULEHOURS[@]} -gt 0 ]]; then
+ __append_end_msg " Hours(s):"
+ for hour in "${PATCHSCHEDULEHOURS[@]}"; do
+ __append_end_msg " - $hour"
+ done
+ fi
+ fi
+
+ # MISC
+
+ [[ $is_helix ]] && __append_end_msg "Helix API key: $HELIXAPIKEY"
+ [[ -n $DOCKERNET ]] && __append_end_msg "Docker network: $DOCKERNET"
+ if [[ -n $MANAGERUPDATES ]]; then
+ __append_end_msg "OS Package Updates: Manager"
+ else
+ __append_end_msg "OS Package Updates: Open"
+ fi
+ if [[ ${#ntp_servers[@]} -gt 0 ]]; then
+ __append_end_msg "NTP Servers:"
+ for server in "${ntp_servers[@]}"; do
+ __append_end_msg " - $server"
+ done
+ fi
+
+ if [[ $NSMSETUP != 'ADVANCED' ]]; then
+ [[ -n $BASICZEEK ]] && __append_end_msg "Zeek Processes: $BASICZEEK"
+ [[ -n $BASICSURI ]] && __append_end_msg "Suricata Processes: $BASICSURI"
+ fi
+
+ # ADVANCED OR REGULAR
+
+ if [[ $NODESETUP == 'NODEADVANCED' ]]; then
+ __append_end_msg "Advanced Node Settings:"
+ __append_end_msg " Elasticsearch Heap Size: $NODE_ES_HEAP_SIZE"
+ __append_end_msg " Logstash Heap Size: $NODE_LS_HEAP_SIZE"
+ __append_end_msg " Logstash Worker Count: $LSPIPELINEWORKERS"
+ __append_end_msg " Logstash Batch Size: $LSPIPELINEBATCH"
+ __append_end_msg " Logstash Input Threads: $LSINPUTTHREADS"
+ __append_end_msg " Curator Day Cutoff: $CURCLOSEDAYS days"
+ __append_end_msg " Elasticsearch Storage Space: ${log_size_limit}GB"
+ else
+ __append_end_msg "Elasticsearch Heap Size: $NODE_ES_HEAP_SIZE"
+ __append_end_msg "Logstash Heap Size: $NODE_LS_HEAP_SIZE"
+ __append_end_msg "Logstash Worker Count: $LSPIPELINEWORKERS"
+ __append_end_msg "Logstash Batch Size: $LSPIPELINEBATCH"
+ __append_end_msg "Logstash Input Threads: $LSINPUTTHREADS"
+ __append_end_msg "Curator Close After: $CURCLOSEDAYS days"
+ __append_end_msg "Elasticsearch Storage Space: ${log_size_limit}GB"
+ fi
+
+
+ # ADVANCED
+ if [[ $MANAGERADV == 'ADVANCED' ]]; then
+ __append_end_msg "Advanced Manager Settings:"
+ [[ -n $ESCLUSTERNAME ]] && __append_end_msg " ES Cluster Name: $ESCLUSTERNAME"
+ if [[ ${#BLOGS[@]} -gt 0 ]]; then
+ __append_end_msg " Zeek Logs Enabled:"
+ for log in "${BLOGS[@]}"; do
+ __append_end_msg " - $log"
+ done
+ fi
+ fi
+
+ if [[ $NSMSETUP == 'ADVANCED' ]]; then
+ __append_end_msg "Advanced NSM Settings:"
+ if [[ ${#ZEEKPINS[@]} -gt 0 ]]; then
+ local zeek_pin_str
+ for core in "${ZEEKPINS[@]}"; do
+ zeek_pin_str="${zeek_pin_str}${core},"
+ done
+ zeek_pin_str=${zeek_pin_str%,}
+ __append_end_msg " Zeek Pinned Cores: ${zeek_pin_str}"
+ fi
+ if [[ ${#SURIPINS[@]} -gt 0 ]]; then
+ local suri_pin_str
+ for core in "${SURIPINS[@]}"; do
+ suri_pin_str="${suri_pin_str}${core},"
+ done
+ suri_pin_str=${suri_pin_str%,}
+ __append_end_msg " Suricata Pinned Cores: ${suri_pin_str}"
+ fi
+ fi
+
+ whiptail --title "The following options have been set, would you like to proceed?" --yesno "$end_msg" 24 75 --scrolltext
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+ echo "$end_msg" > /root/install_summary
+ printf '%s\n' 'Install summary:' "$end_msg" >> "$setup_log"
+}
+
+__append_end_msg() {
+ local newline=$1
+
+ read -r -d '' end_msg <<- EOM
+ $end_msg
+ $newline
+ EOM
+}
+
whiptail_eval_adv() {
[ -n "$TESTING" ] && return
@@ -934,6 +1145,22 @@ whiptail_manager_adv_service_zeeklogs() {
}
+whiptail_manager_error() {
+
+ [ -n "$TESTING" ] && return
+
+ local msg
+ read -r -d '' msg <<- EOM
+ Setup could not determine if the manager $MSRV is in a good state.
+
+ Continuing without verifying all services on the manager are running may result in a failure.
+
+ Would you like to continue anyway?
+ EOM
+
+ whiptail --title "Security Onion Setup" --yesno "$msg" 13 75 || whiptail_check_exitstatus 1
+}
+
whiptail_manager_updates() {
[ -n "$TESTING" ] && return
@@ -1044,6 +1271,16 @@ whiptail_node_advanced() {
}
+whiptail_node_description() {
+ [ -n "$TESTING" ] && return
+
+ NODE_DESCRIPTION=$(whiptail --title "Security Onion Setup" \
+ --inputbox "Enter a short description for the node or press ENTER to leave blank:" 10 75 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+}
+
whiptail_node_es_heap() {
[ -n "$TESTING" ] && return
@@ -1105,6 +1342,22 @@ whiptail_node_ls_pipeline_worker() {
}
+whiptail_ntp_ask() {
+ [ -n "$TESTING" ] && return
+
+ whiptail --title "Security Onion Setup" --yesno "Would you like to configure ntp servers?" 7 44
+}
+
+whiptail_ntp_servers() {
+ [ -n "$TESTING" ] && return
+
+ ntp_string=$(whiptail --title "Security Onion Setup" \
+ --inputbox "Input the NTP server(s) you would like to use, separated by commas:" 8 75 "$1" 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+}
+
whiptail_oinkcode() {
[ -n "$TESTING" ] && return
@@ -1271,11 +1524,7 @@ whiptail_proxy_auth_pass() {
[ -n "$TESTING" ] && return
- if [[ $arg != 'confirm' ]]; then
- proxy_pass=$(whiptail --title "Security Onion Setup" --passwordbox "Please input the proxy password:" 8 60 3>&1 1>&2 2>&3)
- else
- proxy_pass_confirm=$(whiptail --title "Security Onion Setup" --passwordbox "Please confirm the proxy password:" 8 60 3>&1 1>&2 2>&3)
- fi
+ proxy_pass=$(whiptail --title "Security Onion Setup" --passwordbox "Please input the proxy password:" 8 60 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
@@ -1469,6 +1718,22 @@ whiptail_so_allow() {
whiptail_check_exitstatus $exitstatus
}
+whitpail_ssh_warning() {
+ [ -n "$TESTING" ] && return
+
+ local msg
+
+ read -r -d '' msg <<- EOM
+ NOTE: You will recceive a warning upon SSH reconnect that the host key has changed.
+
+ This is expected due to hardening of the OpenSSH server config.
+
+ The host key algorithm will now be ED25519, follow the instructions given by your SSH client to remove the old key fingerprint then retry the connection.
+ EOM
+
+ whiptail --msgbox "$msg" 14 75
+}
+
whiptail_storage_requirements() {
local mount=$1
local current_val=$2
diff --git a/setup/yum_repos/saltstack.repo b/setup/yum_repos/saltstack.repo
deleted file mode 100644
index 0430a62b8..000000000
--- a/setup/yum_repos/saltstack.repo
+++ /dev/null
@@ -1,6 +0,0 @@
-[saltstack]
-name=SaltStack repo for RHEL/CentOS $releasever PY3
-baseurl=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.5/
-enabled=1
-gpgcheck=1
-gpgkey=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.5/SALTSTACK-GPG-KEY.pub
\ No newline at end of file
diff --git a/setup/yum_repos/wazuh.repo b/setup/yum_repos/wazuh.repo
deleted file mode 100644
index ae462c62f..000000000
--- a/setup/yum_repos/wazuh.repo
+++ /dev/null
@@ -1,7 +0,0 @@
-[wazuh_repo]
-gpgcheck=1
-gpgkey=https://packages.wazuh.com/key/GPG-KEY-WAZUH
-enabled=1
-name=Wazuh repository
-baseurl=https://packages.wazuh.com/3.x/yum/
-protect=1