From e881f4c92b9d75e13f5b118a1d927cd6d682fdb0 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 20 Jul 2020 17:37:46 -0400 Subject: [PATCH 001/376] Increment VERSION for dev to 2.1.0-rc.2; Add more logging to troubleshoot automated setup not initiating post-installation steps --- VERSION | 2 +- setup/so-setup | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/VERSION b/VERSION index d0c10bc48..ac97ff772 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0.0-rc.1 \ No newline at end of file +2.1.0-rc.2 \ No newline at end of file diff --git a/setup/so-setup b/setup/so-setup index 6a432fc9d..31f917b16 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -632,6 +632,12 @@ if [[ -n $SO_ERROR ]]; then SKIP_REBOOT=1 whiptail_setup_failed else + echo "Successfully completed setup! Continuing with post-installation steps" >> $setup_log 2>&1 + echo "automated=$automated" >> $setup_log 2>&1 + echo "ALLOW_ROLE=$ALLOW_ROLE" >> $setup_log 2>&1 + echo "ALLOW_CIDR=$ALLOW_CIDR" >> $setup_log 2>&1 + echo "THEHIVE=$THEHIVE" >> $setup_log 2>&1 + { export percentage=95 # set to last percentage used in previous subshell if [[ -n $ALLOW_ROLE && -n $ALLOW_CIDR ]]; then @@ -645,6 +651,7 @@ else } | whiptail_gauge_post_setup "Running post-installation steps..." whiptail_setup_complete + echo "Post-installation steps have completed." >> $setup_log 2>&1 fi install_cleanup >> $setup_log 2>&1 From 328146799438427dbb9eb7c658580771b5974c70 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 20 Jul 2020 20:26:35 -0400 Subject: [PATCH 002/376] When running in automated mode, cat all piped in input to setup log --- setup/so-setup | 5 ----- setup/so-whiptail | 10 ++++++---- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/setup/so-setup b/setup/so-setup index 31f917b16..80d028662 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -633,11 +633,6 @@ if [[ -n $SO_ERROR ]]; then whiptail_setup_failed else echo "Successfully completed setup! Continuing with post-installation steps" >> $setup_log 2>&1 - echo "automated=$automated" >> $setup_log 2>&1 - echo "ALLOW_ROLE=$ALLOW_ROLE" >> $setup_log 2>&1 - echo "ALLOW_CIDR=$ALLOW_CIDR" >> $setup_log 2>&1 - echo "THEHIVE=$THEHIVE" >> $setup_log 2>&1 - { export percentage=95 # set to last percentage used in previous subshell if [[ -n $ALLOW_ROLE && -n $ALLOW_CIDR ]]; then diff --git a/setup/so-whiptail b/setup/so-whiptail index f0f1fb7b5..faeb5f496 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -1098,11 +1098,13 @@ whiptail_so_allow() { whiptail_gauge_post_setup() { - [ -n "$TESTING" ] && return - - local msg=$1 + if [ -n "$TESTING" ] + cat >> $setup_log 2>&1 + else + local msg=$1 - whiptail --title "Security Onion Setup" --gauge "$msg" 6 60 96 + whiptail --title "Security Onion Setup" --gauge "$msg" 6 60 96 + fi } whiptail_strelka_rules() { From abc68c2efb096036e9388ca5ccce6b5eac6a0ce3 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 21 Jul 2020 08:51:46 -0400 Subject: [PATCH 003/376] Update VERIFY_ISO.md --- VERIFY_ISO.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERIFY_ISO.md b/VERIFY_ISO.md index 17fb42b78..abefebfc6 100644 --- a/VERIFY_ISO.md +++ b/VERIFY_ISO.md @@ -24,7 +24,7 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma Download the signature file for the ISO: ``` -https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.0.0-rc1.iso.sig +wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.0.0-rc1.iso.sig ``` Download the ISO image: From d2df405cf05fb7e2b953bf86a4cd687065175603 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 21 Jul 2020 11:07:01 -0400 Subject: [PATCH 004/376] so-import-pcap improvements: Ensure PCAP filenames with spaces are handled properly; Provide link directly to the imported logs, filtered by import ID; Require sudo access to run so-import-pcap --- salt/common/tools/sbin/so-import-pcap | 12 ++++++---- .../templates/so/so-common-template.json | 24 +++++++++++-------- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/salt/common/tools/sbin/so-import-pcap b/salt/common/tools/sbin/so-import-pcap index a45fe6777..2281ff943 100755 --- a/salt/common/tools/sbin/so-import-pcap +++ b/salt/common/tools/sbin/so-import-pcap @@ -20,6 +20,8 @@ {% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} {%- set MANAGERIP = salt['pillar.get']('static:managerip') -%} +. /usr/sbin/so-common + function usage { cat << EOF Usage: $0 [pcap-file-2] [pcap-file-N] @@ -32,13 +34,13 @@ EOF function pcapinfo() { PCAP=$1 ARGS=$2 - docker run --rm -v $PCAP:/input.pcap --entrypoint capinfos {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap $ARGS + docker run --rm -v "$PCAP:/input.pcap" --entrypoint capinfos {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap $ARGS } function pcapfix() { PCAP=$1 PCAP_OUT=$2 - docker run --rm -v $PCAP:/input.pcap -v $PCAP_OUT:$PCAP_OUT --entrypoint pcapfix {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap -o $PCAP_OUT > /dev/null 2>&1 + docker run --rm -v "$PCAP:/input.pcap" -v $PCAP_OUT:$PCAP_OUT --entrypoint pcapfix {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap -o $PCAP_OUT > /dev/null 2>&1 } function suricata() { @@ -57,7 +59,7 @@ function suricata() { -v /opt/so/conf/suricata/rules:/etc/suricata/rules:ro \ -v ${LOG_PATH}:/var/log/suricata/:rw \ -v ${NSM_PATH}/:/nsm/:rw \ - -v $PCAP:/input.pcap:ro \ + -v "$PCAP:/input.pcap:ro" \ -v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \ {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \ --runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1 @@ -76,7 +78,7 @@ function zeek() { -v $NSM_PATH/logs:/nsm/zeek/logs:rw \ -v $NSM_PATH/spool:/nsm/zeek/spool:rw \ -v $NSM_PATH/extracted:/nsm/zeek/extracted:rw \ - -v $PCAP:/input.pcap:ro \ + -v "$PCAP:/input.pcap:ro" \ -v /opt/so/conf/zeek/local.zeek:/opt/zeek/share/zeek/site/local.zeek:ro \ -v /opt/so/conf/zeek/node.cfg:/opt/zeek/etc/node.cfg:ro \ -v /opt/so/conf/zeek/zeekctl.cfg:/opt/zeek/etc/zeekctl.cfg:ro \ @@ -210,7 +212,7 @@ cat << EOF Import complete! You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser: -https://{{ MANAGERIP }}/#/hunt?q=%2a%20%7C%20groupby%20event.module%20event.dataset&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM +https://{{ MANAGERIP }}/#/hunt?q=import.id:${HASH}%20%7C%20groupby%20event.module%20event.dataset&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM or you can manually set your Time Range to be: From: $START_OLDEST To: $END_NEWEST diff --git a/salt/elasticsearch/templates/so/so-common-template.json b/salt/elasticsearch/templates/so/so-common-template.json index c0167198d..85a65fd6f 100644 --- a/salt/elasticsearch/templates/so/so-common-template.json +++ b/salt/elasticsearch/templates/so/so-common-template.json @@ -18,7 +18,7 @@ "@version":{ "type":"keyword" }, - "osquery":{ + "osquery":{ "type":"object", "dynamic":true }, @@ -85,7 +85,7 @@ "type":"object", "dynamic": true }, - "client":{ + "client":{ "type":"object", "dynamic": true }, @@ -177,6 +177,10 @@ "type":"object", "dynamic": true }, + "import":{ + "type":"object", + "dynamic": true + }, "ingest":{ "type":"object", "dynamic": true @@ -185,7 +189,7 @@ "type":"object", "dynamic": true }, - "irc":{ + "irc":{ "type":"object", "dynamic": true }, @@ -201,7 +205,7 @@ "type":"object", "dynamic": true }, - "message":{ + "message":{ "type":"text", "fields":{ "keyword":{ @@ -213,7 +217,7 @@ "type":"object", "dynamic": true }, - "mysql":{ + "mysql":{ "type":"object", "dynamic": true }, @@ -221,7 +225,7 @@ "type":"object", "dynamic": true }, - "notice":{ + "notice":{ "type":"object", "dynamic": true }, @@ -269,7 +273,7 @@ "type":"object", "dynamic": true }, - "request":{ + "request":{ "type":"object", "dynamic": true }, @@ -281,7 +285,7 @@ "type":"object", "dynamic": true }, - "scan":{ + "scan":{ "type":"object", "dynamic": true }, @@ -317,7 +321,7 @@ "type":"object", "dynamic": true }, - "source":{ + "source":{ "type":"object", "dynamic": true }, @@ -329,7 +333,7 @@ "type":"object", "dynamic": true }, - "syslog":{ + "syslog":{ "type":"object", "dynamic": true }, From d6f7dcb630f28f3de9d3ad6dedd8c0a8e2e44dec Mon Sep 17 00:00:00 2001 From: William Wernert Date: Tue, 21 Jul 2020 15:35:13 -0400 Subject: [PATCH 005/376] [refactor] Changes to storage requirements See #1047 --- setup/so-functions | 17 +++++++++++++++-- setup/so-variables | 17 ++++++++++++++++- setup/so-whiptail | 29 +++++++++++++++++++++++++---- 3 files changed, 56 insertions(+), 7 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 89d0fdc7a..27b99ef0f 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -411,15 +411,28 @@ check_requirements() { req_mem=12 req_cores=4 req_nics=2 - req_storage=100 elif [[ "$standalone_or_dist" == 'dist' ]]; then req_mem=8 req_cores=4 - req_storage=40 if [[ "$node_type" == 'sensor' ]]; then req_nics=2; else req_nics=1; fi if [[ "$node_type" == 'fleet' ]]; then req_mem=4; fi fi + if [[ -n $nsm_mount ]]; then + req_storage=100 + if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then + whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB" + fi + if (( $(echo "$free_space_nsm < $req_storage" | bc -l) )); then + whiptail_storage_requirements "/nsm" "${free_space_nsm} GB" "${req_storage} GB" + fi + else + req_storage=200 + if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then + whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB" + fi + fi + if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then whiptail_requirements_error "disk space" "${free_space_root} GB" "${req_storage} GB" fi diff --git a/setup/so-variables b/setup/so-variables index 07f7aa71b..057c67ff2 100644 --- a/setup/so-variables +++ b/setup/so-variables @@ -28,9 +28,24 @@ mkdir -p /nsm filesystem_nsm=$(df /nsm | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }') export filesystem_nsm -free_space_root=$(df -Pk /nsm | sed 1d | grep -v used | awk '{ print $4 / 1048576 }' | awk '{ printf("%.0f", $1) }') +free_space_nsm=$(df -Pk /nsm | sed 1d | grep -v used | awk '{ print $4 / 1048576 }' | awk '{ printf("%.0f", $1) }') +export free_space_nsm + +free_space_root=$(df -Pk / | sed 1d | grep -v used | awk '{ print $4 / 1048576 }' | awk '{ printf("%.0f", $1) }') export free_space_root +readarray -t mountpoints <<< "$(lsblk -nlo MOUNTPOINT)" +readarray -t partitions <<< "$(lsblk -nlo NAME)" +index=0 +for item in "${mountpoints[@]}"; do + if [[ $item == '/' ]]; then export root_part="${partitions[$index]}" + elif [[ $item == '/nsm' ]]; then + export nsm_mount=1 + export nsm_part="${partitions[$index]}" + fi + ((index++)) +done + mkdir -p /root/installtmp/pillar/minions export temp_install_dir=/root/installtmp diff --git a/setup/so-whiptail b/setup/so-whiptail index faeb5f496..7c937ec4e 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -315,6 +315,27 @@ whiptail_requirements_error() { whiptail_check_exitstatus $exitstatus } +whiptail_storage_requirements() { + local mount=$1 + local current_val=$2 + local needed_val=$3 + + read -r -d '' message <<- EOM + Free space on mount point '${mount}' is currently ${current_val}. + + You need ${needed_val} to meet minimum requirements. + + Visit https://docs.securityonion.net/en/2.0/hardware.html for more information. + + Press YES to continue anyway, or press NO to cancel. + EOM + + whiptail \ + --title "Security Onion Setup" \ + --yesno "$message" \ + 14 75 +} + whiptail_invalid_pass_warning() { [ -n "$TESTING" ] && return @@ -1109,13 +1130,13 @@ whiptail_gauge_post_setup() { whiptail_strelka_rules() { - [ -n "$TESTING" ] && return + [ -n "$TESTING" ] && return - whiptail --title "Security Onion Setup" --yesno "Do you want to enable the default YARA rules for Strelka?" 8 75 + whiptail --title "Security Onion Setup" --yesno "Do you want to enable the default YARA rules for Strelka?" 8 75 - local exitstatus=$? + local exitstatus=$? - if [[ $exitstatus == 0 ]]; then export STRELKARULES=1; fi + if [[ $exitstatus == 0 ]]; then export STRELKARULES=1; fi } whiptail_suricata_pins() { From e43829b22cb3ca5a85be3c50dac802ce05d64486 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Tue, 21 Jul 2020 16:24:13 -0400 Subject: [PATCH 006/376] [fix] Add then to if statement --- setup/so-whiptail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index faeb5f496..e75df5b02 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -1098,7 +1098,7 @@ whiptail_so_allow() { whiptail_gauge_post_setup() { - if [ -n "$TESTING" ] + if [ -n "$TESTING" ]; then cat >> $setup_log 2>&1 else local msg=$1 From 003271127aae800f22a29b8fcc24bbba53c337b7 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Tue, 21 Jul 2020 16:32:28 -0400 Subject: [PATCH 007/376] [feat] Only check storage during setup on a network install --- setup/so-functions | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 27b99ef0f..498ff8682 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -418,20 +418,22 @@ check_requirements() { if [[ "$node_type" == 'fleet' ]]; then req_mem=4; fi fi - if [[ -n $nsm_mount ]]; then - req_storage=100 - if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then - whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB" - fi - if (( $(echo "$free_space_nsm < $req_storage" | bc -l) )); then - whiptail_storage_requirements "/nsm" "${free_space_nsm} GB" "${req_storage} GB" - fi - else - req_storage=200 - if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then - whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB" - fi - fi + if [[ $setup_type == 'network' ]]; then + if [[ -n $nsm_mount ]]; then + req_storage=100 + if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then + whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB" + fi + if (( $(echo "$free_space_nsm < $req_storage" | bc -l) )); then + whiptail_storage_requirements "/nsm" "${free_space_nsm} GB" "${req_storage} GB" + fi + else + req_storage=200 + if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then + whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB" + fi + fi + fi if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then whiptail_requirements_error "disk space" "${free_space_root} GB" "${req_storage} GB" From ddf0a5055edde9fd53419a954a08d3c52124b67e Mon Sep 17 00:00:00 2001 From: William Wernert Date: Tue, 21 Jul 2020 16:34:08 -0400 Subject: [PATCH 008/376] [fix] Exit on NO --- setup/so-whiptail | 3 +++ 1 file changed, 3 insertions(+) diff --git a/setup/so-whiptail b/setup/so-whiptail index f976d4a2f..d781de5e8 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -334,6 +334,9 @@ whiptail_storage_requirements() { --title "Security Onion Setup" \ --yesno "$message" \ 14 75 + + local exitstatus=$? + whiptail_check_exitstatus $exitstatus } whiptail_invalid_pass_warning() { From 752d1bceb423238b612af691f99c7c1012194583 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Tue, 21 Jul 2020 16:36:37 -0400 Subject: [PATCH 009/376] [fix] Remove old storage space check --- setup/so-functions | 4 ---- 1 file changed, 4 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 498ff8682..4074f1701 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -435,10 +435,6 @@ check_requirements() { fi fi - if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then - whiptail_requirements_error "disk space" "${free_space_root} GB" "${req_storage} GB" - fi - if [[ $num_nics -lt $req_nics ]]; then whiptail_requirements_error "NICs" "$num_nics" "$req_nics" fi From f3c24f1f016ccbb8f2ae5c5fb53886346a07061f Mon Sep 17 00:00:00 2001 From: William Wernert Date: Tue, 21 Jul 2020 16:43:21 -0400 Subject: [PATCH 010/376] [fix] Add check for $TESTING --- setup/so-whiptail | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup/so-whiptail b/setup/so-whiptail index d781de5e8..5b201818e 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -320,6 +320,8 @@ whiptail_storage_requirements() { local current_val=$2 local needed_val=$3 + [ -n "$TESTING" ] && return + read -r -d '' message <<- EOM Free space on mount point '${mount}' is currently ${current_val}. From 9eeb527ea710413d87104304c18d88dbf3c06fea Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 23 Jul 2020 17:18:42 -0400 Subject: [PATCH 011/376] Include UTC parameter when providing a hyperlink to Hunt from so-import-pcap output --- salt/common/tools/sbin/so-import-pcap | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/so-import-pcap b/salt/common/tools/sbin/so-import-pcap index 2281ff943..aef6e98d8 100755 --- a/salt/common/tools/sbin/so-import-pcap +++ b/salt/common/tools/sbin/so-import-pcap @@ -212,9 +212,9 @@ cat << EOF Import complete! You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser: -https://{{ MANAGERIP }}/#/hunt?q=import.id:${HASH}%20%7C%20groupby%20event.module%20event.dataset&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM +https://{{ MANAGERIP }}/#/hunt?q=import.id:${HASH}%20%7C%20groupby%20event.module%20event.dataset&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC -or you can manually set your Time Range to be: +or you can manually set your Time Range to be (in UTC): From: $START_OLDEST To: $END_NEWEST Please note that it may take 30 seconds or more for events to appear in Onion Hunt. From 78491e1fc577eb8258372b12341eb77c60c5b652 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 24 Jul 2020 15:06:06 -0400 Subject: [PATCH 012/376] soup update salt on manager for centos - https://github.com/Security-Onion-Solutions/securityonion/issues/1091 --- pillar/salt/master.sls | 4 + pillar/salt/minion.sls | 4 + salt/common/tools/sbin/soup | 47 +- salt/salt/master.sls | 1 + salt/salt/minion.sls | 1 + salt/salt/scripts/bootstrap-salt.sh | 7856 +++++++++++++++++++++++++++ 6 files changed, 7912 insertions(+), 1 deletion(-) create mode 100644 pillar/salt/master.sls create mode 100644 pillar/salt/minion.sls create mode 100644 salt/salt/master.sls create mode 100644 salt/salt/minion.sls create mode 100644 salt/salt/scripts/bootstrap-salt.sh diff --git a/pillar/salt/master.sls b/pillar/salt/master.sls new file mode 100644 index 000000000..a34a96b9e --- /dev/null +++ b/pillar/salt/master.sls @@ -0,0 +1,4 @@ +#version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched +salt: + master: + version: 3001 \ No newline at end of file diff --git a/pillar/salt/minion.sls b/pillar/salt/minion.sls new file mode 100644 index 000000000..4978a4a73 --- /dev/null +++ b/pillar/salt/minion.sls @@ -0,0 +1,4 @@ +#version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched +salt: + minion: + version: 3001 \ No newline at end of file diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index b2b8cacc4..ca4bc518b 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -18,6 +18,7 @@ . /usr/sbin/so-common UPDATE_DIR=/tmp/sogh/securityonion INSTALLEDVERSION=$(cat /etc/soversion) +INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'}) default_salt_dir=/opt/so/saltstack/default manager_check() { @@ -154,8 +155,27 @@ upgrade_check() { if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then echo "You are already running the latest version of Security Onion." exit 0 + fi +} + +upgrade_check_salt() { + NEWSALTVERSION=$(grep version: $UPDATE_DIR/pillar/salt/master.sls | awk {'print $2'}) + if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then + echo "You are already running the correct version of Salt for Security Onion." else - echo "Performing Upgrade from $INSTALLEDVERSION to $NEWVERSION" + echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION" + echo "" + # If CentOS + echo "Removing yum versionlock for Salt" + echo "" + yum versionlock delete "salt-*" + echo "Updating Salt packages and restarting services" + echo "" + sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION" + echo "Applying yum versionlock for Salt" + echo "" + yum versionlock add "salt-*" + # Else do Ubuntu things fi } @@ -185,7 +205,21 @@ verify_latest_update_script echo "" echo "Let's see if we need to update" upgrade_check + + echo "" +echo "Performing Upgrade from $INSTALLEDVERSION to $NEWVERSION" +echo "" +echo "Stopping Salt Master service" +systemctl stop salt-master +echo "" +echo "Stopping Salt Minion service" +systemctl stop salt-minion +echo "" +echo "Checking for Salt updates" +upgrade_check_salt + + echo "Making pillar changes" pillar_changes echo "" @@ -200,6 +234,17 @@ copy_new_files echo "" echo "Updating version" update_version + + +echo "" +echo "Starting Salt Master service" +systemctl start salt-master +echo "" +echo "Starting Salt Minion service" +systemctl start salt-minion +echo "" + + echo "" echo "Running a highstate to complete upgrade" highstate diff --git a/salt/salt/master.sls b/salt/salt/master.sls new file mode 100644 index 000000000..69f6ad89a --- /dev/null +++ b/salt/salt/master.sls @@ -0,0 +1 @@ +#Future state for Salt masters \ No newline at end of file diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls new file mode 100644 index 000000000..c95ff10e8 --- /dev/null +++ b/salt/salt/minion.sls @@ -0,0 +1 @@ +#Future state for Salt minions \ No newline at end of file diff --git a/salt/salt/scripts/bootstrap-salt.sh b/salt/salt/scripts/bootstrap-salt.sh new file mode 100644 index 000000000..70241a041 --- /dev/null +++ b/salt/salt/scripts/bootstrap-salt.sh @@ -0,0 +1,7856 @@ +#!/bin/sh - + +# WARNING: Changes to this file in the salt repo will be overwritten! +# Please submit pull requests against the salt-bootstrap repo: +# https://github.com/saltstack/salt-bootstrap + +#====================================================================================================================== +# vim: softtabstop=4 shiftwidth=4 expandtab fenc=utf-8 spell spelllang=en cc=120 +#====================================================================================================================== +# +# FILE: bootstrap-salt.sh +# +# DESCRIPTION: Bootstrap Salt installation for various systems/distributions +# +# BUGS: https://github.com/saltstack/salt-bootstrap/issues +# +# COPYRIGHT: (c) 2012-2018 by the SaltStack Team, see AUTHORS.rst for more +# details. +# +# LICENSE: Apache 2.0 +# ORGANIZATION: SaltStack (saltstack.com) +# CREATED: 10/15/2012 09:49:37 PM WEST +#====================================================================================================================== +set -o nounset # Treat unset variables as an error + +__ScriptVersion="2020.06.23" +__ScriptName="bootstrap-salt.sh" + +__ScriptFullName="$0" +__ScriptArgs="$*" + +#====================================================================================================================== +# Environment variables taken into account. +#---------------------------------------------------------------------------------------------------------------------- +# * BS_COLORS: If 0 disables colour support +# * BS_PIP_ALLOWED: If 1 enable pip based installations(if needed) +# * BS_PIP_ALL: If 1 enable all python packages to be installed via pip instead of apt, requires setting virtualenv +# * BS_VIRTUALENV_DIR: The virtualenv to install salt into (shouldn't exist yet) +# * BS_ECHO_DEBUG: If 1 enable debug echo which can also be set by -D +# * BS_SALT_ETC_DIR: Defaults to /etc/salt (Only tweak'able on git based installations) +# * BS_SALT_CACHE_DIR: Defaults to /var/cache/salt (Only tweak'able on git based installations) +# * BS_KEEP_TEMP_FILES: If 1, don't move temporary files, instead copy them +# * BS_FORCE_OVERWRITE: Force overriding copied files(config, init.d, etc) +# * BS_UPGRADE_SYS: If 1 and an option, upgrade system. Default 0. +# * BS_GENTOO_USE_BINHOST: If 1 add `--getbinpkg` to gentoo's emerge +# * BS_SALT_MASTER_ADDRESS: The IP or DNS name of the salt-master the minion should connect to +# * BS_SALT_GIT_CHECKOUT_DIR: The directory where to clone Salt on git installations +#====================================================================================================================== + + +# Bootstrap script truth values +BS_TRUE=1 +BS_FALSE=0 + +# Default sleep time used when waiting for daemons to start, restart and checking for these running +__DEFAULT_SLEEP=3 + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __detect_color_support +# DESCRIPTION: Try to detect color support. +#---------------------------------------------------------------------------------------------------------------------- +_COLORS=${BS_COLORS:-$(tput colors 2>/dev/null || echo 0)} +__detect_color_support() { + # shellcheck disable=SC2181 + if [ $? -eq 0 ] && [ "$_COLORS" -gt 2 ]; then + RC='\033[1;31m' + GC='\033[1;32m' + BC='\033[1;34m' + YC='\033[1;33m' + EC='\033[0m' + else + RC="" + GC="" + BC="" + YC="" + EC="" + fi +} +__detect_color_support + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: echoerr +# DESCRIPTION: Echo errors to stderr. +#---------------------------------------------------------------------------------------------------------------------- +echoerror() { + printf "${RC} * ERROR${EC}: %s\\n" "$@" 1>&2; +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: echoinfo +# DESCRIPTION: Echo information to stdout. +#---------------------------------------------------------------------------------------------------------------------- +echoinfo() { + printf "${GC} * INFO${EC}: %s\\n" "$@"; +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: echowarn +# DESCRIPTION: Echo warning information to stdout. +#---------------------------------------------------------------------------------------------------------------------- +echowarn() { + printf "${YC} * WARN${EC}: %s\\n" "$@"; +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: echodebug +# DESCRIPTION: Echo debug information to stdout. +#---------------------------------------------------------------------------------------------------------------------- +echodebug() { + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + printf "${BC} * DEBUG${EC}: %s\\n" "$@"; + fi +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_command_exists +# DESCRIPTION: Check if a command exists. +#---------------------------------------------------------------------------------------------------------------------- +__check_command_exists() { + command -v "$1" > /dev/null 2>&1 +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_pip_allowed +# DESCRIPTION: Simple function to let the users know that -P needs to be used. +#---------------------------------------------------------------------------------------------------------------------- +__check_pip_allowed() { + if [ $# -eq 1 ]; then + _PIP_ALLOWED_ERROR_MSG=$1 + else + _PIP_ALLOWED_ERROR_MSG="pip based installations were not allowed. Retry using '-P'" + fi + + if [ "$_PIP_ALLOWED" -eq $BS_FALSE ]; then + echoerror "$_PIP_ALLOWED_ERROR_MSG" + __usage + exit 1 + fi +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_config_dir +# DESCRIPTION: Checks the config directory, retrieves URLs if provided. +#---------------------------------------------------------------------------------------------------------------------- +__check_config_dir() { + CC_DIR_NAME="$1" + CC_DIR_BASE=$(basename "${CC_DIR_NAME}") + + case "$CC_DIR_NAME" in + http://*|https://*) + __fetch_url "/tmp/${CC_DIR_BASE}" "${CC_DIR_NAME}" + CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + ;; + ftp://*) + __fetch_url "/tmp/${CC_DIR_BASE}" "${CC_DIR_NAME}" + CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + ;; + *://*) + echoerror "Unsupported URI scheme for $CC_DIR_NAME" + echo "null" + return + ;; + *) + if [ ! -e "${CC_DIR_NAME}" ]; then + echoerror "The configuration directory or archive $CC_DIR_NAME does not exist." + echo "null" + return + fi + ;; + esac + + case "$CC_DIR_NAME" in + *.tgz|*.tar.gz) + tar -zxf "${CC_DIR_NAME}" -C /tmp + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tgz") + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.gz") + CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + ;; + *.tbz|*.tar.bz2) + tar -xjf "${CC_DIR_NAME}" -C /tmp + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tbz") + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.bz2") + CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + ;; + *.txz|*.tar.xz) + tar -xJf "${CC_DIR_NAME}" -C /tmp + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".txz") + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.xz") + CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + ;; + esac + + echo "${CC_DIR_NAME}" +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_unparsed_options +# DESCRIPTION: Checks the placed after the install arguments +#---------------------------------------------------------------------------------------------------------------------- +__check_unparsed_options() { + shellopts="$1" + # grep alternative for SunOS + if [ -f /usr/xpg4/bin/grep ]; then + grep='/usr/xpg4/bin/grep' + else + grep='grep' + fi + unparsed_options=$( echo "$shellopts" | ${grep} -E '(^|[[:space:]])[-]+[[:alnum:]]' ) + if [ "$unparsed_options" != "" ]; then + __usage + echo + echoerror "options are only allowed before install arguments" + echo + exit 1 + fi +} + + +#---------------------------------------------------------------------------------------------------------------------- +# Handle command line arguments +#---------------------------------------------------------------------------------------------------------------------- +_KEEP_TEMP_FILES=${BS_KEEP_TEMP_FILES:-$BS_FALSE} +_TEMP_CONFIG_DIR="null" +_SALTSTACK_REPO_URL="https://github.com/saltstack/salt.git" +_SALT_REPO_URL=${_SALTSTACK_REPO_URL} +_DOWNSTREAM_PKG_REPO=$BS_FALSE +_TEMP_KEYS_DIR="null" +_SLEEP="${__DEFAULT_SLEEP}" +_INSTALL_MASTER=$BS_FALSE +_INSTALL_SYNDIC=$BS_FALSE +_INSTALL_MINION=$BS_TRUE +_INSTALL_CLOUD=$BS_FALSE +_VIRTUALENV_DIR=${BS_VIRTUALENV_DIR:-"null"} +_START_DAEMONS=$BS_TRUE +_DISABLE_SALT_CHECKS=$BS_FALSE +_ECHO_DEBUG=${BS_ECHO_DEBUG:-$BS_FALSE} +_CONFIG_ONLY=$BS_FALSE +_PIP_ALLOWED=${BS_PIP_ALLOWED:-$BS_FALSE} +_PIP_ALL=${BS_PIP_ALL:-$BS_FALSE} +_SALT_ETC_DIR=${BS_SALT_ETC_DIR:-/etc/salt} +_SALT_CACHE_DIR=${BS_SALT_CACHE_DIR:-/var/cache/salt} +_PKI_DIR=${_SALT_ETC_DIR}/pki +_FORCE_OVERWRITE=${BS_FORCE_OVERWRITE:-$BS_FALSE} +_GENTOO_USE_BINHOST=${BS_GENTOO_USE_BINHOST:-$BS_FALSE} +_EPEL_REPO=${BS_EPEL_REPO:-epel} +_EPEL_REPOS_INSTALLED=$BS_FALSE +_UPGRADE_SYS=${BS_UPGRADE_SYS:-$BS_FALSE} +_INSECURE_DL=${BS_INSECURE_DL:-$BS_FALSE} +_CURL_ARGS=${BS_CURL_ARGS:-} +_FETCH_ARGS=${BS_FETCH_ARGS:-} +_GPG_ARGS=${BS_GPG_ARGS:-} +_WGET_ARGS=${BS_WGET_ARGS:-} +_SALT_MASTER_ADDRESS=${BS_SALT_MASTER_ADDRESS:-null} +_SALT_MINION_ID="null" +# _SIMPLIFY_VERSION is mostly used in Solaris based distributions +_SIMPLIFY_VERSION=$BS_TRUE +_LIBCLOUD_MIN_VERSION="0.14.0" +_EXTRA_PACKAGES="" +_HTTP_PROXY="" +_SALT_GIT_CHECKOUT_DIR=${BS_SALT_GIT_CHECKOUT_DIR:-/tmp/git/salt} +_NO_DEPS=$BS_FALSE +_FORCE_SHALLOW_CLONE=$BS_FALSE +_DISABLE_SSL=$BS_FALSE +_DISABLE_REPOS=$BS_FALSE +_CUSTOM_REPO_URL="null" +_CUSTOM_MASTER_CONFIG="null" +_CUSTOM_MINION_CONFIG="null" +_QUIET_GIT_INSTALLATION=$BS_FALSE +_REPO_URL="repo.saltstack.com" +_PY_EXE="" +_INSTALL_PY="$BS_FALSE" +_TORNADO_MAX_PY3_VERSION="5.0" +_POST_NEON_INSTALL=$BS_FALSE +_MINIMUM_PIP_VERSION="8.0.0" +_MINIMUM_SETUPTOOLS_VERSION="9.1" +_POST_NEON_PIP_INSTALL_ARGS="--prefix=/usr" + +# Defaults for install arguments +ITYPE="stable" + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __usage +# DESCRIPTION: Display usage information. +#---------------------------------------------------------------------------------------------------------------------- +__usage() { + cat << EOT + + Usage : ${__ScriptName} [options] [install-type-args] + + Installation types: + - stable Install latest stable release. This is the default + install type + - stable [branch] Install latest version on a branch. Only supported + for packages available at repo.saltstack.com + - stable [version] Install a specific version. Only supported for + packages available at repo.saltstack.com + - testing RHEL-family specific: configure EPEL testing repo + - git Install from the head of the master branch + - git [ref] Install from any git ref (such as a branch, tag, or + commit) + + Examples: + - ${__ScriptName} + - ${__ScriptName} stable + - ${__ScriptName} stable 2017.7 + - ${__ScriptName} stable 2017.7.2 + - ${__ScriptName} testing + - ${__ScriptName} git + - ${__ScriptName} git 2017.7 + - ${__ScriptName} git v2017.7.2 + - ${__ScriptName} git 06f249901a2e2f1ed310d58ea3921a129f214358 + + Options: + -h Display this message + -v Display script version + -n No colours + -D Show debug output + -c Temporary configuration directory + -g Salt Git repository URL. Default: ${_SALTSTACK_REPO_URL} + -w Install packages from downstream package repository rather than + upstream, saltstack package repository. This is currently only + implemented for SUSE. + -k Temporary directory holding the minion keys which will pre-seed + the master. + -s Sleep time used when waiting for daemons to start, restart and when + checking for the services running. Default: ${__DEFAULT_SLEEP} + -L Also install salt-cloud and required python-libcloud package + -M Also install salt-master + -S Also install salt-syndic + -N Do not install salt-minion + -X Do not start daemons after installation + -d Disables checking if Salt services are enabled to start on system boot. + You can also do this by touching /tmp/disable_salt_checks on the target + host. Default: \${BS_FALSE} + -P Allow pip based installations. On some distributions the required salt + packages or its dependencies are not available as a package for that + distribution. Using this flag allows the script to use pip as a last + resort method. NOTE: This only works for functions which actually + implement pip based installations. + -U If set, fully upgrade the system prior to bootstrapping Salt + -I If set, allow insecure connections while downloading any files. For + example, pass '--no-check-certificate' to 'wget' or '--insecure' to + 'curl'. On Debian and Ubuntu, using this option with -U allows obtaining + GnuPG archive keys insecurely if distro has changed release signatures. + -F Allow copied files to overwrite existing (config, init.d, etc) + -K If set, keep the temporary files in the temporary directories specified + with -c and -k + -C Only run the configuration function. Implies -F (forced overwrite). + To overwrite Master or Syndic configs, -M or -S, respectively, must + also be specified. Salt installation will be ommitted, but some of the + dependencies could be installed to write configuration with -j or -J. + -A Pass the salt-master DNS name or IP. This will be stored under + \${BS_SALT_ETC_DIR}/minion.d/99-master-address.conf + -i Pass the salt-minion id. This will be stored under + \${BS_SALT_ETC_DIR}/minion_id + -p Extra-package to install while installing Salt dependencies. One package + per -p flag. You are responsible for providing the proper package name. + -H Use the specified HTTP proxy for all download URLs (including https://). + For example: http://myproxy.example.com:3128 + -b Assume that dependencies are already installed and software sources are + set up. If git is selected, git tree is still checked out as dependency + step. + -f Force shallow cloning for git installations. + This may result in an "n/a" in the version number. + -l Disable ssl checks. When passed, switches "https" calls to "http" where + possible. + -V Install Salt into virtualenv + (only available for Ubuntu based distributions) + -a Pip install all Python pkg dependencies for Salt. Requires -V to install + all pip pkgs into the virtualenv. + (Only available for Ubuntu based distributions) + -r Disable all repository configuration performed by this script. This + option assumes all necessary repository configuration is already present + on the system. + -R Specify a custom repository URL. Assumes the custom repository URL + points to a repository that mirrors Salt packages located at + repo.saltstack.com. The option passed with -R replaces the + "repo.saltstack.com". If -R is passed, -r is also set. Currently only + works on CentOS/RHEL and Debian based distributions. + -J Replace the Master config file with data passed in as a JSON string. If + a Master config file is found, a reasonable effort will be made to save + the file with a ".bak" extension. If used in conjunction with -C or -F, + no ".bak" file will be created as either of those options will force + a complete overwrite of the file. + -j Replace the Minion config file with data passed in as a JSON string. If + a Minion config file is found, a reasonable effort will be made to save + the file with a ".bak" extension. If used in conjunction with -C or -F, + no ".bak" file will be created as either of those options will force + a complete overwrite of the file. + -q Quiet salt installation from git (setup.py install -q) + -x Changes the Python version used to install Salt. + For CentOS 6 git installations python2.7 is supported. + Fedora git installation, CentOS 7, Debian 9, Ubuntu 16.04 and 18.04 support python3. + -y Installs a different python version on host. Currently this has only been + tested with CentOS 6 and is considered experimental. This will install the + ius repo on the box if disable repo is false. This must be used in conjunction + with -x . For example: + sh bootstrap.sh -P -y -x python2.7 git v2017.7.2 + The above will install python27 and install the git version of salt using the + python2.7 executable. This only works for git and pip installations. + +EOT +} # ---------- end of function __usage ---------- + + +while getopts ':hvnDc:g:Gyx:wk:s:MSNXCPFUKIA:i:Lp:dH:bflV:J:j:rR:aq' opt +do + case "${opt}" in + + h ) __usage; exit 0 ;; + v ) echo "$0 -- Version $__ScriptVersion"; exit 0 ;; + n ) _COLORS=0; __detect_color_support ;; + D ) _ECHO_DEBUG=$BS_TRUE ;; + c ) _TEMP_CONFIG_DIR="$OPTARG" ;; + g ) _SALT_REPO_URL=$OPTARG ;; + + G ) echowarn "The '-G' option is DEPRECATED and will be removed in the future stable release!" + echowarn "Bootstrap will always use 'https' protocol to clone from SaltStack GitHub repo." + echowarn "No need to provide this option anymore, now it is a default behavior." + ;; + + w ) _DOWNSTREAM_PKG_REPO=$BS_TRUE ;; + k ) _TEMP_KEYS_DIR="$OPTARG" ;; + s ) _SLEEP=$OPTARG ;; + M ) _INSTALL_MASTER=$BS_TRUE ;; + S ) _INSTALL_SYNDIC=$BS_TRUE ;; + N ) _INSTALL_MINION=$BS_FALSE ;; + X ) _START_DAEMONS=$BS_FALSE ;; + C ) _CONFIG_ONLY=$BS_TRUE ;; + P ) _PIP_ALLOWED=$BS_TRUE ;; + F ) _FORCE_OVERWRITE=$BS_TRUE ;; + U ) _UPGRADE_SYS=$BS_TRUE ;; + K ) _KEEP_TEMP_FILES=$BS_TRUE ;; + I ) _INSECURE_DL=$BS_TRUE ;; + A ) _SALT_MASTER_ADDRESS=$OPTARG ;; + i ) _SALT_MINION_ID=$OPTARG ;; + L ) _INSTALL_CLOUD=$BS_TRUE ;; + p ) _EXTRA_PACKAGES="$_EXTRA_PACKAGES $OPTARG" ;; + d ) _DISABLE_SALT_CHECKS=$BS_TRUE ;; + H ) _HTTP_PROXY="$OPTARG" ;; + b ) _NO_DEPS=$BS_TRUE ;; + f ) _FORCE_SHALLOW_CLONE=$BS_TRUE ;; + l ) _DISABLE_SSL=$BS_TRUE ;; + V ) _VIRTUALENV_DIR="$OPTARG" ;; + a ) _PIP_ALL=$BS_TRUE ;; + r ) _DISABLE_REPOS=$BS_TRUE ;; + R ) _CUSTOM_REPO_URL=$OPTARG ;; + J ) _CUSTOM_MASTER_CONFIG=$OPTARG ;; + j ) _CUSTOM_MINION_CONFIG=$OPTARG ;; + q ) _QUIET_GIT_INSTALLATION=$BS_TRUE ;; + x ) _PY_EXE="$OPTARG" ;; + y ) _INSTALL_PY="$BS_TRUE" ;; + + \?) echo + echoerror "Option does not exist : $OPTARG" + __usage + exit 1 + ;; + + esac # --- end of case --- +done +shift $((OPTIND-1)) + + +# Define our logging file and pipe paths +LOGFILE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.log/g )" +LOGPIPE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.logpipe/g )" +# Ensure no residual pipe exists +rm "$LOGPIPE" 2>/dev/null + +# Create our logging pipe +# On FreeBSD we have to use mkfifo instead of mknod +if ! (mknod "$LOGPIPE" p >/dev/null 2>&1 || mkfifo "$LOGPIPE" >/dev/null 2>&1); then + echoerror "Failed to create the named pipe required to log" + exit 1 +fi + +# What ever is written to the logpipe gets written to the logfile +tee < "$LOGPIPE" "$LOGFILE" & + +# Close STDOUT, reopen it directing it to the logpipe +exec 1>&- +exec 1>"$LOGPIPE" +# Close STDERR, reopen it directing it to the logpipe +exec 2>&- +exec 2>"$LOGPIPE" + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __exit_cleanup +# DESCRIPTION: Cleanup any leftovers after script has ended +# +# +# http://www.unix.com/man-page/POSIX/1posix/trap/ +# +# Signal Number Signal Name +# 1 SIGHUP +# 2 SIGINT +# 3 SIGQUIT +# 6 SIGABRT +# 9 SIGKILL +# 14 SIGALRM +# 15 SIGTERM +#---------------------------------------------------------------------------------------------------------------------- +APT_ERR=$(mktemp /tmp/apt_error.XXXXXX) +__exit_cleanup() { + EXIT_CODE=$? + + if [ "$ITYPE" = "git" ] && [ -d "${_SALT_GIT_CHECKOUT_DIR}" ]; then + if [ $_KEEP_TEMP_FILES -eq $BS_FALSE ]; then + # Clean up the checked out repository + echodebug "Cleaning up the Salt Temporary Git Repository" + # shellcheck disable=SC2164 + cd "${__SALT_GIT_CHECKOUT_PARENT_DIR}" + rm -rf "${_SALT_GIT_CHECKOUT_DIR}" + #rm -rf "${_SALT_GIT_CHECKOUT_DIR}/deps" + else + echowarn "Not cleaning up the Salt Temporary git repository on request" + echowarn "Note that if you intend to re-run this script using the git approach, you might encounter some issues" + fi + fi + + # Remove the logging pipe when the script exits + if [ -p "$LOGPIPE" ]; then + echodebug "Removing the logging pipe $LOGPIPE" + rm -f "$LOGPIPE" + fi + + # Remove the temporary apt error file when the script exits + if [ -f "$APT_ERR" ]; then + echodebug "Removing the temporary apt error file $APT_ERR" + rm -f "$APT_ERR" + fi + + # Kill tee when exiting, CentOS, at least requires this + # shellcheck disable=SC2009 + TEE_PID=$(ps ax | grep tee | grep "$LOGFILE" | awk '{print $1}') + + [ "$TEE_PID" = "" ] && exit $EXIT_CODE + + echodebug "Killing logging pipe tee's with pid(s): $TEE_PID" + + # We need to trap errors since killing tee will cause a 127 errno + # We also do this as late as possible so we don't "mis-catch" other errors + __trap_errors() { + echoinfo "Errors Trapped: $EXIT_CODE" + # Exit with the "original" exit code, not the trapped code + exit $EXIT_CODE + } + trap "__trap_errors" INT ABRT QUIT TERM + + # Now we're "good" to kill tee + kill -s TERM "$TEE_PID" + + # In case the 127 errno is not triggered, exit with the "original" exit code + exit $EXIT_CODE +} +trap "__exit_cleanup" EXIT INT + + +# Let's discover how we're being called +# shellcheck disable=SC2009 +CALLER=$(ps -a -o pid,args | grep $$ | grep -v grep | tr -s ' ' | cut -d ' ' -f 3) + +if [ "${CALLER}x" = "${0}x" ]; then + CALLER="shell pipe" +fi + +echoinfo "Running version: ${__ScriptVersion}" +echoinfo "Executed by: ${CALLER}" +echoinfo "Command line: '${__ScriptFullName} ${__ScriptArgs}'" +#echowarn "Running the unstable version of ${__ScriptName}" + +# Define installation type +if [ "$#" -gt 0 ];then + __check_unparsed_options "$*" + ITYPE=$1 + shift +fi + +# Check installation type +if [ "$(echo "$ITYPE" | grep -E '(stable|testing|git)')" = "" ]; then + echoerror "Installation type \"$ITYPE\" is not known..." + exit 1 +fi + +# If doing a git install, check what branch/tag/sha will be checked out +if [ "$ITYPE" = "git" ]; then + if [ "$#" -eq 0 ];then + GIT_REV="master" + else + GIT_REV="$1" + shift + fi + + # Disable shell warning about unbound variable during git install + STABLE_REV="latest" + +# If doing stable install, check if version specified +elif [ "$ITYPE" = "stable" ]; then + if [ "$#" -eq 0 ];then + STABLE_REV="latest" + else + if [ "$(echo "$1" | grep -E '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7|2018\.3|2019\.2|3000|3001)$')" != "" ]; then + STABLE_REV="$1" + shift + elif [ "$(echo "$1" | grep -E '^(2[0-9]*\.[0-9]*\.[0-9]*|[3-9][0-9]{3}*(\.[0-9]*)?)$')" != "" ]; then + if [ "$(uname)" = "Darwin" ]; then + STABLE_REV="$1" + else + STABLE_REV="archive/$1" + fi + shift + else + echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, 2018.3, 2019.2, 3000, 3001, latest, \$MAJOR.\$MINOR.\$PATCH until 2019.2, \$MAJOR or \$MAJOR.\$PATCH starting from 3000)" + exit 1 + fi + fi +fi + +# Check for any unparsed arguments. Should be an error. +if [ "$#" -gt 0 ]; then + __usage + echo + echoerror "Too many arguments." + exit 1 +fi + +# whoami alternative for SunOS +if [ -f /usr/xpg4/bin/id ]; then + whoami='/usr/xpg4/bin/id -un' +else + whoami='whoami' +fi + +# Root permissions are required to run this script +if [ "$($whoami)" != "root" ]; then + echoerror "Salt requires root privileges to install. Please re-run this script as root." + exit 1 +fi + +# Check that we're actually installing one of minion/master/syndic +if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echowarn "Nothing to install or configure" + exit 1 +fi + +# Check that we're installing a minion if we're being passed a master address +if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_SALT_MASTER_ADDRESS" != "null" ]; then + echoerror "Don't pass a master address (-A) if no minion is going to be bootstrapped." + exit 1 +fi + +# Check that we're installing a minion if we're being passed a minion id +if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_SALT_MINION_ID" != "null" ]; then + echoerror "Don't pass a minion id (-i) if no minion is going to be bootstrapped." + exit 1 +fi + +# Check that we're installing or configuring a master if we're being passed a master config json dict +if [ "$_CUSTOM_MASTER_CONFIG" != "null" ]; then + if [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoerror "Don't pass a master config JSON dict (-J) if no master is going to be bootstrapped or configured." + exit 1 + fi +fi + +# Check that we're installing or configuring a minion if we're being passed a minion config json dict +if [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then + if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoerror "Don't pass a minion config JSON dict (-j) if no minion is going to be bootstrapped or configured." + exit 1 + fi +fi + +# Check if we're installing via a different Python executable and set major version variables +if [ -n "$_PY_EXE" ]; then + if [ "$(uname)" = "Darwin" ]; then + _PY_PKG_VER=$(echo "$_PY_EXE" | sed "s/\\.//g") + else + _PY_PKG_VER=$(echo "$_PY_EXE" | sed -E "s/\\.//g") + fi + + _PY_MAJOR_VERSION=$(echo "$_PY_PKG_VER" | cut -c 7) + if [ "$_PY_MAJOR_VERSION" != 3 ] && [ "$_PY_MAJOR_VERSION" != 2 ]; then + echoerror "Detected -x option, but Python major version is not 2 or 3." + echoerror "The -x option must be passed as python2, python27, or python2.7 (or use the Python '3' versions of examples)." + exit 1 + fi + + echoinfo "Detected -x option. Using $_PY_EXE to install Salt." +else + _PY_PKG_VER="" + _PY_MAJOR_VERSION="" +fi + +# If the configuration directory or archive does not exist, error out +if [ "$_TEMP_CONFIG_DIR" != "null" ]; then + _TEMP_CONFIG_DIR="$(__check_config_dir "$_TEMP_CONFIG_DIR")" + [ "$_TEMP_CONFIG_DIR" = "null" ] && exit 1 +fi + +# If the pre-seed keys directory does not exist, error out +if [ "$_TEMP_KEYS_DIR" != "null" ] && [ ! -d "$_TEMP_KEYS_DIR" ]; then + echoerror "The pre-seed keys directory ${_TEMP_KEYS_DIR} does not exist." + exit 1 +fi + +# -a and -V only work from git +if [ "$ITYPE" != "git" ]; then + if [ $_PIP_ALL -eq $BS_TRUE ]; then + echoerror "Pip installing all python packages with -a is only possible when installing Salt via git" + exit 1 + fi + if [ "$_VIRTUALENV_DIR" != "null" ]; then + echoerror "Virtualenv installs via -V is only possible when installing Salt via git" + exit 1 + fi +fi + +# Set the _REPO_URL value based on if -R was passed or not. Defaults to repo.saltstack.com. +if [ "$_CUSTOM_REPO_URL" != "null" ]; then + _REPO_URL="$_CUSTOM_REPO_URL" + + # Check for -r since -R is being passed. Set -r with a warning. + if [ "$_DISABLE_REPOS" -eq $BS_FALSE ]; then + echowarn "Detected -R option. No other repositories will be configured when -R is used. Setting -r option to True." + _DISABLE_REPOS=$BS_TRUE + fi +fi + +# Check the _DISABLE_SSL value and set HTTP or HTTPS. +if [ "$_DISABLE_SSL" -eq $BS_TRUE ]; then + HTTP_VAL="http" +else + HTTP_VAL="https" +fi + +# Check the _QUIET_GIT_INSTALLATION value and set SETUP_PY_INSTALL_ARGS. +if [ "$_QUIET_GIT_INSTALLATION" -eq $BS_TRUE ]; then + SETUP_PY_INSTALL_ARGS="-q" +else + SETUP_PY_INSTALL_ARGS="" +fi + +# Handle the insecure flags +if [ "$_INSECURE_DL" -eq $BS_TRUE ]; then + _CURL_ARGS="${_CURL_ARGS} --insecure" + _FETCH_ARGS="${_FETCH_ARGS} --no-verify-peer" + _GPG_ARGS="${_GPG_ARGS} --keyserver-options no-check-cert" + _WGET_ARGS="${_WGET_ARGS} --no-check-certificate" +else + _GPG_ARGS="${_GPG_ARGS} --keyserver-options ca-cert-file=/etc/ssl/certs/ca-certificates.crt" +fi + +# Export the http_proxy configuration to our current environment +if [ "${_HTTP_PROXY}" != "" ]; then + export http_proxy="${_HTTP_PROXY}" + export https_proxy="${_HTTP_PROXY}" + # Using "deprecated" option here, but that appears the only way to make it work. + # See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=818802 + # and https://bugs.launchpad.net/ubuntu/+source/gnupg2/+bug/1625848 + _GPG_ARGS="${_GPG_ARGS},http-proxy=${_HTTP_PROXY}" +fi + +# Work around for 'Docker + salt-bootstrap failure' https://github.com/saltstack/salt-bootstrap/issues/394 +if [ "${_DISABLE_SALT_CHECKS}" -eq $BS_FALSE ] && [ -f /tmp/disable_salt_checks ]; then + # shellcheck disable=SC2016 + echowarn 'Found file: /tmp/disable_salt_checks, setting _DISABLE_SALT_CHECKS=$BS_TRUE' + _DISABLE_SALT_CHECKS=$BS_TRUE +fi + +# Because -a can only be installed into virtualenv +if [ "${_PIP_ALL}" -eq $BS_TRUE ] && [ "${_VIRTUALENV_DIR}" = "null" ]; then + usage + # Could possibly set up a default virtualenv location when -a flag is passed + echoerror "Using -a requires -V because pip pkgs should be siloed from python system pkgs" + exit 1 +fi + +# Make sure virtualenv directory does not already exist +if [ -d "${_VIRTUALENV_DIR}" ]; then + echoerror "The directory ${_VIRTUALENV_DIR} for virtualenv already exists" + exit 1 +fi + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __fetch_url +# DESCRIPTION: Retrieves a URL and writes it to a given path +#---------------------------------------------------------------------------------------------------------------------- +__fetch_url() { + # shellcheck disable=SC2086 + curl $_CURL_ARGS -L -s -f -o "$1" "$2" >/dev/null 2>&1 || + wget $_WGET_ARGS -q -O "$1" "$2" >/dev/null 2>&1 || + fetch $_FETCH_ARGS -q -o "$1" "$2" >/dev/null 2>&1 || # FreeBSD + fetch -q -o "$1" "$2" >/dev/null 2>&1 || # Pre FreeBSD 10 + ftp -o "$1" "$2" >/dev/null 2>&1 || # OpenBSD + (echoerror "$2 failed to download to $1"; exit 1) +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __fetch_verify +# DESCRIPTION: Retrieves a URL, verifies its content and writes it to standard output +#---------------------------------------------------------------------------------------------------------------------- +__fetch_verify() { + fetch_verify_url="$1" + fetch_verify_sum="$2" + fetch_verify_size="$3" + + fetch_verify_tmpf=$(mktemp) && \ + __fetch_url "$fetch_verify_tmpf" "$fetch_verify_url" && \ + test "$(stat --format=%s "$fetch_verify_tmpf")" -eq "$fetch_verify_size" && \ + test "$(md5sum "$fetch_verify_tmpf" | awk '{ print $1 }')" = "$fetch_verify_sum" && \ + cat "$fetch_verify_tmpf" && \ + if rm -f "$fetch_verify_tmpf"; then + return 0 + fi + echo "Failed verification of $fetch_verify_url" + return 1 +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_hardware_info +# DESCRIPTION: Discover hardware information +#---------------------------------------------------------------------------------------------------------------------- +__gather_hardware_info() { + if [ -f /proc/cpuinfo ]; then + CPU_VENDOR_ID=$(awk '/vendor_id|Processor/ {sub(/-.*$/,"",$3); print $3; exit}' /proc/cpuinfo ) + elif [ -f /usr/bin/kstat ]; then + # SmartOS. + # Solaris!? + # This has only been tested for a GenuineIntel CPU + CPU_VENDOR_ID=$(/usr/bin/kstat -p cpu_info:0:cpu_info0:vendor_id | awk '{print $2}') + else + CPU_VENDOR_ID=$( sysctl -n hw.model ) + fi + # shellcheck disable=SC2034 + CPU_VENDOR_ID_L=$( echo "$CPU_VENDOR_ID" | tr '[:upper:]' '[:lower:]' ) + CPU_ARCH=$(uname -m 2>/dev/null || uname -p 2>/dev/null || echo "unknown") + CPU_ARCH_L=$( echo "$CPU_ARCH" | tr '[:upper:]' '[:lower:]' ) +} +__gather_hardware_info + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_os_info +# DESCRIPTION: Discover operating system information +#---------------------------------------------------------------------------------------------------------------------- +__gather_os_info() { + OS_NAME=$(uname -s 2>/dev/null) + OS_NAME_L=$( echo "$OS_NAME" | tr '[:upper:]' '[:lower:]' ) + OS_VERSION=$(uname -r) + # shellcheck disable=SC2034 + OS_VERSION_L=$( echo "$OS_VERSION" | tr '[:upper:]' '[:lower:]' ) +} +__gather_os_info + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __parse_version_string +# DESCRIPTION: Parse version strings ignoring the revision. +# MAJOR.MINOR.REVISION becomes MAJOR.MINOR +#---------------------------------------------------------------------------------------------------------------------- +__parse_version_string() { + VERSION_STRING="$1" + PARSED_VERSION=$( + echo "$VERSION_STRING" | + sed -e 's/^/#/' \ + -e 's/^#[^0-9]*\([0-9][0-9]*\.[0-9][0-9]*\)\(\.[0-9][0-9]*\).*$/\1/' \ + -e 's/^#[^0-9]*\([0-9][0-9]*\.[0-9][0-9]*\).*$/\1/' \ + -e 's/^#[^0-9]*\([0-9][0-9]*\).*$/\1/' \ + -e 's/^#.*$//' + ) + echo "$PARSED_VERSION" +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __derive_debian_numeric_version +# DESCRIPTION: Derive the numeric version from a Debian version string. +#---------------------------------------------------------------------------------------------------------------------- +__derive_debian_numeric_version() { + NUMERIC_VERSION="" + INPUT_VERSION="$1" + if echo "$INPUT_VERSION" | grep -q '^[0-9]'; then + NUMERIC_VERSION="$INPUT_VERSION" + elif [ -z "$INPUT_VERSION" ] && [ -f "/etc/debian_version" ]; then + INPUT_VERSION="$(cat /etc/debian_version)" + fi + if [ -z "$NUMERIC_VERSION" ]; then + if [ "$INPUT_VERSION" = "wheezy/sid" ]; then + # I've found an EC2 wheezy image which did not tell its version + NUMERIC_VERSION=$(__parse_version_string "7.0") + elif [ "$INPUT_VERSION" = "jessie/sid" ]; then + NUMERIC_VERSION=$(__parse_version_string "8.0") + elif [ "$INPUT_VERSION" = "stretch/sid" ]; then + NUMERIC_VERSION=$(__parse_version_string "9.0") + elif [ "$INPUT_VERSION" = "buster/sid" ]; then + NUMERIC_VERSION=$(__parse_version_string "10.0") + else + echowarn "Unable to parse the Debian Version (codename: '$INPUT_VERSION')" + fi + fi + echo "$NUMERIC_VERSION" +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __unquote_string +# DESCRIPTION: Strip single or double quotes from the provided string. +#---------------------------------------------------------------------------------------------------------------------- +__unquote_string() { + # shellcheck disable=SC1117 + echo "$*" | sed -e "s/^\([\"\']\)\(.*\)\1\$/\2/g" +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __camelcase_split +# DESCRIPTION: Convert 'CamelCased' strings to 'Camel Cased' +#---------------------------------------------------------------------------------------------------------------------- +__camelcase_split() { + echo "$*" | sed -e 's/\([^[:upper:][:punct:]]\)\([[:upper:]]\)/\1 \2/g' +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __strip_duplicates +# DESCRIPTION: Strip duplicate strings +#---------------------------------------------------------------------------------------------------------------------- +__strip_duplicates() { + echo "$*" | tr -s '[:space:]' '\n' | awk '!x[$0]++' +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __sort_release_files +# DESCRIPTION: Custom sort function. Alphabetical or numerical sort is not +# enough. +#---------------------------------------------------------------------------------------------------------------------- +__sort_release_files() { + KNOWN_RELEASE_FILES=$(echo "(arch|alpine|centos|debian|ubuntu|fedora|redhat|suse|\ + mandrake|mandriva|gentoo|slackware|turbolinux|unitedlinux|void|lsb|system|\ + oracle|os)(-|_)(release|version)" | sed -E 's:[[:space:]]::g') + primary_release_files="" + secondary_release_files="" + # Sort know VS un-known files first + for release_file in $(echo "${@}" | sed -E 's:[[:space:]]:\n:g' | sort -f | uniq); do + match=$(echo "$release_file" | grep -E -i "${KNOWN_RELEASE_FILES}") + if [ "${match}" != "" ]; then + primary_release_files="${primary_release_files} ${release_file}" + else + secondary_release_files="${secondary_release_files} ${release_file}" + fi + done + + # Now let's sort by know files importance, max important goes last in the max_prio list + max_prio="redhat-release centos-release oracle-release fedora-release" + for entry in $max_prio; do + if [ "$(echo "${primary_release_files}" | grep "$entry")" != "" ]; then + primary_release_files=$(echo "${primary_release_files}" | sed -e "s:\\(.*\\)\\($entry\\)\\(.*\\):\\2 \\1 \\3:g") + fi + done + # Now, least important goes last in the min_prio list + min_prio="lsb-release" + for entry in $min_prio; do + if [ "$(echo "${primary_release_files}" | grep "$entry")" != "" ]; then + primary_release_files=$(echo "${primary_release_files}" | sed -e "s:\\(.*\\)\\($entry\\)\\(.*\\):\\1 \\3 \\2:g") + fi + done + + # Echo the results collapsing multiple white-space into a single white-space + echo "${primary_release_files} ${secondary_release_files}" | sed -E 's:[[:space:]]+:\n:g' +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_linux_system_info +# DESCRIPTION: Discover Linux system information +#---------------------------------------------------------------------------------------------------------------------- +__gather_linux_system_info() { + DISTRO_NAME="" + DISTRO_VERSION="" + + # Let's test if the lsb_release binary is available + rv=$(lsb_release >/dev/null 2>&1) + + # shellcheck disable=SC2181 + if [ $? -eq 0 ]; then + DISTRO_NAME=$(lsb_release -si) + if [ "${DISTRO_NAME}" = "Scientific" ]; then + DISTRO_NAME="Scientific Linux" + elif [ "$(echo "$DISTRO_NAME" | grep ^CloudLinux)" != "" ]; then + DISTRO_NAME="Cloud Linux" + elif [ "$(echo "$DISTRO_NAME" | grep ^RedHat)" != "" ]; then + # Let's convert 'CamelCased' to 'Camel Cased' + n=$(__camelcase_split "$DISTRO_NAME") + # Skip setting DISTRO_NAME this time, splitting CamelCase has failed. + # See https://github.com/saltstack/salt-bootstrap/issues/918 + [ "$n" = "$DISTRO_NAME" ] && DISTRO_NAME="" || DISTRO_NAME="$n" + elif [ "$( echo "${DISTRO_NAME}" | grep openSUSE )" != "" ]; then + # lsb_release -si returns "openSUSE Tumbleweed" on openSUSE tumbleweed + # lsb_release -si returns "openSUSE project" on openSUSE 12.3 + # lsb_release -si returns "openSUSE" on openSUSE 15.n + DISTRO_NAME="opensuse" + elif [ "${DISTRO_NAME}" = "SUSE LINUX" ]; then + if [ "$(lsb_release -sd | grep -i opensuse)" != "" ]; then + # openSUSE 12.2 reports SUSE LINUX on lsb_release -si + DISTRO_NAME="opensuse" + else + # lsb_release -si returns "SUSE LINUX" on SLES 11 SP3 + DISTRO_NAME="suse" + fi + elif [ "${DISTRO_NAME}" = "EnterpriseEnterpriseServer" ]; then + # This the Oracle Linux Enterprise ID before ORACLE LINUX 5 UPDATE 3 + DISTRO_NAME="Oracle Linux" + elif [ "${DISTRO_NAME}" = "OracleServer" ]; then + # This the Oracle Linux Server 6.5 + DISTRO_NAME="Oracle Linux" + elif [ "${DISTRO_NAME}" = "AmazonAMI" ] || [ "${DISTRO_NAME}" = "Amazon" ]; then + DISTRO_NAME="Amazon Linux AMI" + elif [ "${DISTRO_NAME}" = "ManjaroLinux" ]; then + DISTRO_NAME="Arch Linux" + elif [ "${DISTRO_NAME}" = "Arch" ]; then + DISTRO_NAME="Arch Linux" + return + fi + rv=$(lsb_release -sr) + [ "${rv}" != "" ] && DISTRO_VERSION=$(__parse_version_string "$rv") + elif [ -f /etc/lsb-release ]; then + # We don't have the lsb_release binary, though, we do have the file it parses + DISTRO_NAME=$(grep DISTRIB_ID /etc/lsb-release | sed -e 's/.*=//') + rv=$(grep DISTRIB_RELEASE /etc/lsb-release | sed -e 's/.*=//') + [ "${rv}" != "" ] && DISTRO_VERSION=$(__parse_version_string "$rv") + fi + + if [ "$DISTRO_NAME" != "" ] && [ "$DISTRO_VERSION" != "" ]; then + # We already have the distribution name and version + return + fi + # shellcheck disable=SC2035,SC2086 + for rsource in $(__sort_release_files "$( + cd /etc && /bin/ls *[_-]release *[_-]version 2>/dev/null | env -i sort | \ + sed -e '/^redhat-release$/d' -e '/^lsb-release$/d'; \ + echo redhat-release lsb-release + )"); do + + [ ! -f "/etc/${rsource}" ] && continue # Does not exist + + n=$(echo "${rsource}" | sed -e 's/[_-]release$//' -e 's/[_-]version$//') + shortname=$(echo "${n}" | tr '[:upper:]' '[:lower:]') + if [ "$shortname" = "debian" ]; then + rv=$(__derive_debian_numeric_version "$(cat /etc/${rsource})") + else + rv=$( (grep VERSION "/etc/${rsource}"; cat "/etc/${rsource}") | grep '[0-9]' | sed -e 'q' ) + fi + [ "${rv}" = "" ] && [ "$shortname" != "arch" ] && continue # There's no version information. Continue to next rsource + v=$(__parse_version_string "$rv") + case $shortname in + redhat ) + if [ "$(grep -E 'CentOS' /etc/${rsource})" != "" ]; then + n="CentOS" + elif [ "$(grep -E 'Scientific' /etc/${rsource})" != "" ]; then + n="Scientific Linux" + elif [ "$(grep -E 'Red Hat Enterprise Linux' /etc/${rsource})" != "" ]; then + n="ed at nterprise inux" + else + n="ed at inux" + fi + ;; + arch ) n="Arch Linux" ;; + alpine ) n="Alpine Linux" ;; + centos ) n="CentOS" ;; + debian ) n="Debian" ;; + ubuntu ) n="Ubuntu" ;; + fedora ) n="Fedora" ;; + suse|opensuse ) n="SUSE" ;; + mandrake*|mandriva ) n="Mandriva" ;; + gentoo ) n="Gentoo" ;; + slackware ) n="Slackware" ;; + turbolinux ) n="TurboLinux" ;; + unitedlinux ) n="UnitedLinux" ;; + void ) n="VoidLinux" ;; + oracle ) n="Oracle Linux" ;; + system ) + while read -r line; do + [ "${n}x" != "systemx" ] && break + case "$line" in + *Amazon*Linux*AMI*) + n="Amazon Linux AMI" + break + esac + done < "/etc/${rsource}" + ;; + os ) + nn="$(__unquote_string "$(grep '^ID=' /etc/os-release | sed -e 's/^ID=\(.*\)$/\1/g')")" + rv="$(__unquote_string "$(grep '^VERSION_ID=' /etc/os-release | sed -e 's/^VERSION_ID=\(.*\)$/\1/g')")" + [ "${rv}" != "" ] && v=$(__parse_version_string "$rv") || v="" + case $(echo "${nn}" | tr '[:upper:]' '[:lower:]') in + alpine ) + n="Alpine Linux" + v="${rv}" + ;; + amzn ) + # Amazon AMI's after 2014.09 match here + n="Amazon Linux AMI" + ;; + arch ) + n="Arch Linux" + v="" # Arch Linux does not provide a version. + ;; + cloudlinux ) + n="Cloud Linux" + ;; + debian ) + n="Debian" + v=$(__derive_debian_numeric_version "$v") + ;; + sles ) + n="SUSE" + v="${rv}" + ;; + opensuse-* ) + n="opensuse" + v="${rv}" + ;; + * ) + n=${nn} + ;; + esac + ;; + * ) n="${n}" ; + esac + DISTRO_NAME=$n + DISTRO_VERSION=$v + break + done +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __install_python() +# DESCRIPTION: Install a different version of python on a host. Currently this has only been tested on CentOS 6 and +# is considered experimental. +#---------------------------------------------------------------------------------------------------------------------- +__install_python() { + if [ "$_PY_EXE" = "" ]; then + echoerror "Must specify -x with -y to install a specific python version" + exit 1 + fi + + __PACKAGES="$_PY_PKG_VER" + + if [ ${_DISABLE_REPOS} -eq ${BS_FALSE} ]; then + echoinfo "Attempting to install a repo to help provide a separate python package" + echoinfo "$DISTRO_NAME_L" + case "$DISTRO_NAME_L" in + "red_hat"|"centos") + __PYTHON_REPO_URL="https://repo.ius.io/ius-release-el${DISTRO_MAJOR_VERSION}.rpm" + ;; + *) + echoerror "Installing a repo to provide a python package is only supported on Redhat/CentOS. + If a repo is already available, please try running script with -r." + exit 1 + ;; + esac + + echoinfo "Installing IUS repo" + __yum_install_noinput "${__PYTHON_REPO_URL}" || return 1 + fi + + echoinfo "Installing ${__PACKAGES}" + __yum_install_noinput "${__PACKAGES}" || return 1 +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_sunos_system_info +# DESCRIPTION: Discover SunOS system info +#---------------------------------------------------------------------------------------------------------------------- +__gather_sunos_system_info() { + if [ -f /sbin/uname ]; then + DISTRO_VERSION=$(/sbin/uname -X | awk '/[kK][eE][rR][nN][eE][lL][iI][dD]/ { print $3 }') + fi + + DISTRO_NAME="" + if [ -f /etc/release ]; then + while read -r line; do + [ "${DISTRO_NAME}" != "" ] && break + case "$line" in + *OpenIndiana*oi_[0-9]*) + DISTRO_NAME="OpenIndiana" + DISTRO_VERSION=$(echo "$line" | sed -nE "s/OpenIndiana(.*)oi_([[:digit:]]+)(.*)/\\2/p") + break + ;; + *OpenSolaris*snv_[0-9]*) + DISTRO_NAME="OpenSolaris" + DISTRO_VERSION=$(echo "$line" | sed -nE "s/OpenSolaris(.*)snv_([[:digit:]]+)(.*)/\\2/p") + break + ;; + *Oracle*Solaris*[0-9]*) + DISTRO_NAME="Oracle Solaris" + DISTRO_VERSION=$(echo "$line" | sed -nE "s/(Oracle Solaris) ([[:digit:]]+)(.*)/\\2/p") + break + ;; + *Solaris*) + DISTRO_NAME="Solaris" + # Let's make sure we not actually on a Joyent's SmartOS VM since some releases + # don't have SmartOS in `/etc/release`, only `Solaris` + if uname -v | grep joyent >/dev/null 2>&1; then + DISTRO_NAME="SmartOS" + fi + break + ;; + *NexentaCore*) + DISTRO_NAME="Nexenta Core" + break + ;; + *SmartOS*) + DISTRO_NAME="SmartOS" + break + ;; + *OmniOS*) + DISTRO_NAME="OmniOS" + DISTRO_VERSION=$(echo "$line" | awk '{print $3}') + _SIMPLIFY_VERSION=$BS_FALSE + break + ;; + esac + done < /etc/release + fi + + if [ "${DISTRO_NAME}" = "" ]; then + DISTRO_NAME="Solaris" + DISTRO_VERSION=$( + echo "${OS_VERSION}" | + sed -e 's;^4\.;1.;' \ + -e 's;^5\.\([0-6]\)[^0-9]*$;2.\1;' \ + -e 's;^5\.\([0-9][0-9]*\).*;\1;' + ) + fi + + if [ "${DISTRO_NAME}" = "SmartOS" ]; then + VIRTUAL_TYPE="smartmachine" + if [ "$(zonename)" = "global" ]; then + VIRTUAL_TYPE="global" + fi + fi +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_bsd_system_info +# DESCRIPTION: Discover OpenBSD, NetBSD and FreeBSD systems information +#---------------------------------------------------------------------------------------------------------------------- +__gather_bsd_system_info() { + DISTRO_NAME=${OS_NAME} + DISTRO_VERSION=$(echo "${OS_VERSION}" | sed -e 's;[()];;' -e 's/-.*$//') +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_osx_system_info +# DESCRIPTION: Discover MacOS X +#---------------------------------------------------------------------------------------------------------------------- +__gather_osx_system_info() { + DISTRO_NAME="MacOSX" + DISTRO_VERSION=$(sw_vers -productVersion) +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_system_info +# DESCRIPTION: Discover which system and distribution we are running. +#---------------------------------------------------------------------------------------------------------------------- +__gather_system_info() { + case ${OS_NAME_L} in + linux ) + __gather_linux_system_info + ;; + sunos ) + __gather_sunos_system_info + ;; + openbsd|freebsd|netbsd ) + __gather_bsd_system_info + ;; + darwin ) + __gather_osx_system_info + ;; + * ) + echoerror "${OS_NAME} not supported."; + exit 1 + ;; + esac + +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __ubuntu_derivatives_translation +# DESCRIPTION: Map Ubuntu derivatives to their Ubuntu base versions. +# If distro has a known Ubuntu base version, use those install +# functions by pretending to be Ubuntu (i.e. change global vars) +#---------------------------------------------------------------------------------------------------------------------- +# shellcheck disable=SC2034 +__ubuntu_derivatives_translation() { + UBUNTU_DERIVATIVES="(trisquel|linuxmint|linaro|elementary_os|neon)" + # Mappings + trisquel_6_ubuntu_base="12.04" + linuxmint_13_ubuntu_base="12.04" + linuxmint_17_ubuntu_base="14.04" + linuxmint_18_ubuntu_base="16.04" + linuxmint_19_ubuntu_base="18.04" + linaro_12_ubuntu_base="12.04" + elementary_os_02_ubuntu_base="12.04" + neon_16_ubuntu_base="16.04" + neon_18_ubuntu_base="18.04" + neon_20_ubuntu_base="20.04" + + # Translate Ubuntu derivatives to their base Ubuntu version + match=$(echo "$DISTRO_NAME_L" | grep -E ${UBUNTU_DERIVATIVES}) + + if [ "${match}" != "" ]; then + case $match in + "elementary_os") + _major=$(echo "$DISTRO_VERSION" | sed 's/\.//g') + ;; + "linuxmint") + export LSB_ETC_LSB_RELEASE=/etc/upstream-release/lsb-release + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + ;; + *) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + ;; + esac + + _ubuntu_version=$(eval echo "\$${match}_${_major}_ubuntu_base") + + if [ "$_ubuntu_version" != "" ]; then + echodebug "Detected Ubuntu $_ubuntu_version derivative" + DISTRO_NAME_L="ubuntu" + DISTRO_VERSION="$_ubuntu_version" + fi + fi +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_dpkg_architecture +# DESCRIPTION: Determine the primary architecture for packages to install on Debian and derivatives +# and issue all necessary error messages. +#---------------------------------------------------------------------------------------------------------------------- +__check_dpkg_architecture() { + if __check_command_exists dpkg; then + DPKG_ARCHITECTURE="$(dpkg --print-architecture)" + else + echoerror "dpkg: command not found." + return 1 + fi + + __REPO_ARCH="$DPKG_ARCHITECTURE" + __REPO_ARCH_DEB='deb' + __return_code=0 + + case $DPKG_ARCHITECTURE in + "i386") + error_msg="$_REPO_URL likely doesn't have all required 32-bit packages for $DISTRO_NAME $DISTRO_MAJOR_VERSION." + # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location + __REPO_ARCH="amd64" + ;; + "amd64") + error_msg="" + ;; + "arm64") + if [ "$_CUSTOM_REPO_URL" != "null" ]; then + warn_msg="Support for arm64 is experimental, make sure the custom repository used has the expected structure and contents." + else + # Saltstack official repository does not yet have arm64 metadata, + # use amd64 repositories on arm64, since all pkgs are arch-independent + __REPO_ARCH="amd64" + __REPO_ARCH_DEB="deb [arch=$__REPO_ARCH]" + warn_msg="Support for arm64 packages is experimental and might rely on architecture-independent packages from the amd64 repository." + fi + error_msg="" + ;; + "armhf") + if [ "$DISTRO_NAME_L" = "ubuntu" ] || [ "$DISTRO_MAJOR_VERSION" -lt 8 ]; then + error_msg="Support for armhf packages at $_REPO_URL is limited to Debian/Raspbian 8 platforms." + __return_code=1 + else + error_msg="" + fi + ;; + *) + error_msg="$_REPO_URL doesn't have packages for your system architecture: $DPKG_ARCHITECTURE." + __return_code=1 + ;; + esac + + if [ "${warn_msg:-}" != "" ]; then + # AArch64: Do not fail at this point, but warn the user about experimental support + # See https://github.com/saltstack/salt-bootstrap/issues/1240 + echowarn "${warn_msg}" + fi + if [ "${error_msg}" != "" ]; then + echoerror "${error_msg}" + if [ "$ITYPE" != "git" ]; then + echoerror "You can try git installation mode, i.e.: sh ${__ScriptName} git v2017.7.2." + echoerror "It may be necessary to use git installation mode with pip and disable the SaltStack apt repository." + echoerror "For example:" + echoerror " sh ${__ScriptName} -r -P git v2017.7.2" + fi + fi + + if [ "${__return_code}" -eq 0 ]; then + return 0 + else + return 1 + fi +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __ubuntu_codename_translation +# DESCRIPTION: Map Ubuntu major versions to their corresponding codenames +#---------------------------------------------------------------------------------------------------------------------- +# shellcheck disable=SC2034 +__ubuntu_codename_translation() { + case $DISTRO_MINOR_VERSION in + "04") + _april="yes" + ;; + "10") + _april="" + ;; + *) + _april="yes" + ;; + esac + + case $DISTRO_MAJOR_VERSION in + "12") + DISTRO_CODENAME="precise" + ;; + "14") + DISTRO_CODENAME="trusty" + ;; + "16") + DISTRO_CODENAME="xenial" + ;; + "18") + DISTRO_CODENAME="bionic" + ;; + "20") + DISTRO_CODENAME="focal" + ;; + *) + DISTRO_CODENAME="trusty" + ;; + esac +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __debian_derivatives_translation +# DESCRIPTION: Map Debian derivatives to their Debian base versions. +# If distro has a known Debian base version, use those install +# functions by pretending to be Debian (i.e. change global vars) +#---------------------------------------------------------------------------------------------------------------------- +# shellcheck disable=SC2034 +__debian_derivatives_translation() { + # If the file does not exist, return + [ ! -f /etc/os-release ] && return + + DEBIAN_DERIVATIVES="(cumulus|devuan|kali|linuxmint|raspbian|bunsenlabs|turnkey)" + # Mappings + cumulus_2_debian_base="7.0" + cumulus_3_debian_base="8.0" + cumulus_4_debian_base="10.0" + devuan_1_debian_base="8.0" + devuan_2_debian_base="9.0" + kali_1_debian_base="7.0" + linuxmint_1_debian_base="8.0" + raspbian_8_debian_base="8.0" + raspbian_9_debian_base="9.0" + raspbian_10_debian_base="10.0" + bunsenlabs_9_debian_base="9.0" + turnkey_9_debian_base="9.0" + + # Translate Debian derivatives to their base Debian version + match=$(echo "$DISTRO_NAME_L" | grep -E ${DEBIAN_DERIVATIVES}) + + if [ "${match}" != "" ]; then + case $match in + cumulus*) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="cumulus" + ;; + devuan) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="devuan" + ;; + kali) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="kali" + ;; + linuxmint) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="linuxmint" + ;; + raspbian) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="raspbian" + ;; + bunsenlabs) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="bunsenlabs" + ;; + turnkey) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="turnkey" + ;; + esac + + _debian_version=$(eval echo "\$${_debian_derivative}_${_major}_debian_base" 2>/dev/null) + + if [ "$_debian_version" != "" ]; then + echodebug "Detected Debian $_debian_version derivative" + DISTRO_NAME_L="debian" + DISTRO_VERSION="$_debian_version" + DISTRO_MAJOR_VERSION="$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g')" + fi + fi +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __debian_codename_translation +# DESCRIPTION: Map Debian major versions to their corresponding code names +#---------------------------------------------------------------------------------------------------------------------- +# shellcheck disable=SC2034 +__debian_codename_translation() { + + case $DISTRO_MAJOR_VERSION in + "7") + DISTRO_CODENAME="wheezy" + ;; + "8") + DISTRO_CODENAME="jessie" + ;; + "9") + DISTRO_CODENAME="stretch" + ;; + "10") + DISTRO_CODENAME="buster" + ;; + *) + DISTRO_CODENAME="jessie" + ;; + esac +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_end_of_life_versions +# DESCRIPTION: Check for end of life distribution versions +#---------------------------------------------------------------------------------------------------------------------- +__check_end_of_life_versions() { + case "${DISTRO_NAME_L}" in + debian) + # Debian versions below 7 are not supported + if [ "$DISTRO_MAJOR_VERSION" -lt 8 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://wiki.debian.org/DebianReleases" + exit 1 + fi + ;; + + ubuntu) + # Ubuntu versions not supported + # + # < 14.04 + # = 14.10 + # = 15.04, 15.10 + # = 16.10 + # = 17.04, 17.10 + if [ "$DISTRO_MAJOR_VERSION" -lt 14 ] || \ + [ "$DISTRO_MAJOR_VERSION" -eq 15 ] || \ + [ "$DISTRO_MAJOR_VERSION" -eq 17 ] || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 16 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; }; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://wiki.ubuntu.com/Releases" + exit 1 + fi + ;; + + opensuse) + # openSUSE versions not supported + # + # <= 13.X + # <= 42.2 + if [ "$DISTRO_MAJOR_VERSION" -lt 15 ] || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 42 ] && [ "$DISTRO_MINOR_VERSION" -le 2 ]; }; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " http://en.opensuse.org/Lifetime" + exit 1 + fi + ;; + + suse) + # SuSE versions not supported + # + # < 11 SP4 + # < 12 SP2 + # < 15 SP1 + SUSE_PATCHLEVEL=$(awk -F'=' '/VERSION_ID/ { print $2 }' /etc/os-release | grep -oP "\.\K\w+") + if [ "${SUSE_PATCHLEVEL}" = "" ]; then + SUSE_PATCHLEVEL="00" + fi + if [ "$DISTRO_MAJOR_VERSION" -lt 11 ] || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 11 ] && [ "$SUSE_PATCHLEVEL" -lt 04 ]; } || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 15 ] && [ "$SUSE_PATCHLEVEL" -lt 01 ]; } || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 12 ] && [ "$SUSE_PATCHLEVEL" -lt 02 ]; }; then + echoerror "Versions lower than SuSE 11 SP4, 12 SP2 or 15 SP1 are not supported." + echoerror "Please consider upgrading to the next stable" + echoerror " https://www.suse.com/lifecycle/" + exit 1 + fi + ;; + + fedora) + # Fedora lower than 27 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 30 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://fedoraproject.org/wiki/Releases" + exit 1 + fi + ;; + + centos) + # CentOS versions lower than 6 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " http://wiki.centos.org/Download" + exit 1 + fi + ;; + + red_hat*linux) + # Red Hat (Enterprise) Linux versions lower than 6 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://access.redhat.com/support/policy/updates/errata/" + exit 1 + fi + ;; + + oracle*linux) + # Oracle Linux versions lower than 6 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " http://www.oracle.com/us/support/library/elsp-lifetime-069338.pdf" + exit 1 + fi + ;; + + scientific*linux) + # Scientific Linux versions lower than 6 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://www.scientificlinux.org/downloads/sl-versions/" + exit 1 + fi + ;; + + cloud*linux) + # Cloud Linux versions lower than 6 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://docs.cloudlinux.com/index.html?cloudlinux_life-cycle.html" + exit 1 + fi + ;; + + amazon*linux*ami) + # Amazon Linux versions lower than 2012.0X no longer supported + # Except for Amazon Linux 2, which reset the major version counter + if [ "$DISTRO_MAJOR_VERSION" -lt 2012 ] && [ "$DISTRO_MAJOR_VERSION" -gt 10 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://aws.amazon.com/amazon-linux-ami/" + exit 1 + fi + ;; + + freebsd) + # FreeBSD versions lower than 11 are EOL + if [ "$DISTRO_MAJOR_VERSION" -lt 11 ]; then + echoerror "Versions lower than FreeBSD 11 are EOL and no longer supported." + exit 1 + fi + ;; + + *) + ;; + esac +} + + +__gather_system_info + +echo +echoinfo "System Information:" +echoinfo " CPU: ${CPU_VENDOR_ID}" +echoinfo " CPU Arch: ${CPU_ARCH}" +echoinfo " OS Name: ${OS_NAME}" +echoinfo " OS Version: ${OS_VERSION}" +echoinfo " Distribution: ${DISTRO_NAME} ${DISTRO_VERSION}" +echo + +# Simplify distro name naming on functions +DISTRO_NAME_L=$(echo "$DISTRO_NAME" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-zA-Z0-9_ ]//g' | sed -Ee 's/([[:space:]])+/_/g' | sed -Ee 's/tumbleweed//' ) + +# Simplify version naming on functions +if [ "$DISTRO_VERSION" = "" ] || [ ${_SIMPLIFY_VERSION} -eq $BS_FALSE ]; then + DISTRO_MAJOR_VERSION="" + DISTRO_MINOR_VERSION="" + PREFIXED_DISTRO_MAJOR_VERSION="" + PREFIXED_DISTRO_MINOR_VERSION="" +else + DISTRO_MAJOR_VERSION=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + DISTRO_MINOR_VERSION=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).\([0-9]*\).*/\2/g') + PREFIXED_DISTRO_MAJOR_VERSION="_${DISTRO_MAJOR_VERSION}" + if [ "${PREFIXED_DISTRO_MAJOR_VERSION}" = "_" ]; then + PREFIXED_DISTRO_MAJOR_VERSION="" + fi + PREFIXED_DISTRO_MINOR_VERSION="_${DISTRO_MINOR_VERSION}" + if [ "${PREFIXED_DISTRO_MINOR_VERSION}" = "_" ]; then + PREFIXED_DISTRO_MINOR_VERSION="" + fi +fi + +# For Ubuntu derivatives, pretend to be their Ubuntu base version +__ubuntu_derivatives_translation + +# For Debian derivates, pretend to be their Debian base version +__debian_derivatives_translation + +# Fail soon for end of life versions +__check_end_of_life_versions + +echodebug "Binaries will be searched using the following \$PATH: ${PATH}" + +# Let users know that we'll use a proxy +if [ "${_HTTP_PROXY}" != "" ]; then + echoinfo "Using http proxy $_HTTP_PROXY" +fi + +# Let users know what's going to be installed/configured +if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Installing minion" + else + echoinfo "Configuring minion" + fi +fi + +if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Installing master" + else + echoinfo "Configuring master" + fi +fi + +if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Installing syndic" + else + echoinfo "Configuring syndic" + fi +fi + +if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Installing salt-cloud and required python-libcloud package" +fi + +if [ $_START_DAEMONS -eq $BS_FALSE ]; then + echoinfo "Daemons will not be started" +fi + +if [ "${DISTRO_NAME_L}" = "ubuntu" ]; then + # For ubuntu versions, obtain the codename from the release version + __ubuntu_codename_translation +elif [ "${DISTRO_NAME_L}" = "debian" ]; then + # For debian versions, obtain the codename from the release version + __debian_codename_translation +fi + +if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(debian|ubuntu|centos|red_hat|oracle|scientific|amazon|fedora|macosx)')" = "" ] && [ "$ITYPE" = "stable" ] && [ "$STABLE_REV" != "latest" ]; then + echoerror "${DISTRO_NAME} does not have major version pegged packages support" + exit 1 +fi + +# Only RedHat based distros have testing support +if [ "${ITYPE}" = "testing" ]; then + if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(centos|red_hat|amazon|oracle)')" = "" ]; then + echoerror "${DISTRO_NAME} does not have testing packages support" + exit 1 + fi + _EPEL_REPO="epel-testing" +fi + +# Only Ubuntu has support for installing to virtualenvs +if [ "${DISTRO_NAME_L}" != "ubuntu" ] && [ "$_VIRTUALENV_DIR" != "null" ]; then + echoerror "${DISTRO_NAME} does not have -V support" + exit 1 +fi + +# Only Ubuntu has support for pip installing all packages +if [ "${DISTRO_NAME_L}" != "ubuntu" ] && [ $_PIP_ALL -eq $BS_TRUE ]; then + echoerror "${DISTRO_NAME} does not have -a support" + exit 1 +fi + +if [ "$ITYPE" = "git" ]; then + + if [ "${GIT_REV}" = "master" ]; then + _POST_NEON_INSTALL=$BS_TRUE + __TAG_REGEX_MATCH="MATCH" + else + case ${OS_NAME_L} in + openbsd|freebsd|netbsd|darwin ) + __NEW_VS_TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed -E 's/^(v?3[0-9]{3}(\.[0-9]{1,2})?).*$/MATCH/') + if [ "$__NEW_VS_TAG_REGEX_MATCH" = "MATCH" ]; then + _POST_NEON_INSTALL=$BS_TRUE + __TAG_REGEX_MATCH="${__NEW_VS_TAG_REGEX_MATCH}" + if [ "$(echo "${GIT_REV}" | cut -c -1)" != "v" ]; then + # We do this to properly clone tags + GIT_REV="v${GIT_REV}" + fi + echodebug "Post Neon Tag Regex Match On: ${GIT_REV}" + else + __TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed -E 's/^(v?[0-9]{1,4}\.[0-9]{1,2})(\.[0-9]{1,2})?.*$/MATCH/') + echodebug "Pre Neon Tag Regex Match On: ${GIT_REV}" + fi + ;; + * ) + __NEW_VS_TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed 's/^.*\(v\?3[[:digit:]]\{3\}\(\.[[:digit:]]\{1,2\}\)\?\).*$/MATCH/') + if [ "$__NEW_VS_TAG_REGEX_MATCH" = "MATCH" ]; then + _POST_NEON_INSTALL=$BS_TRUE + __TAG_REGEX_MATCH="${__NEW_VS_TAG_REGEX_MATCH}" + if [ "$(echo "${GIT_REV}" | cut -c -1)" != "v" ]; then + # We do this to properly clone tags + GIT_REV="v${GIT_REV}" + fi + echodebug "Post Neon Tag Regex Match On: ${GIT_REV}" + else + __TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed 's/^.*\(v\?[[:digit:]]\{1,4\}\.[[:digit:]]\{1,2\}\)\(\.[[:digit:]]\{1,2\}\)\?.*$/MATCH/') + echodebug "Pre Neon Tag Regex Match On: ${GIT_REV}" + fi + ;; + esac + fi + + if [ "$_POST_NEON_INSTALL" -eq $BS_TRUE ]; then + echo + echowarn "Post Neon git based installations will always install salt" + echowarn "and its dependencies using pip which will be upgraded to" + echowarn "at least v${_MINIMUM_PIP_VERSION}, and, in case the setuptools version is also" + echowarn "too old, it will be upgraded to at least v${_MINIMUM_SETUPTOOLS_VERSION}" + echo + echowarn "You have 10 seconds to cancel and stop the bootstrap process..." + echo + sleep 10 + _PIP_ALLOWED=$BS_TRUE + fi +fi + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __function_defined +# DESCRIPTION: Checks if a function is defined within this scripts scope +# PARAMETERS: function name +# RETURNS: 0 or 1 as in defined or not defined +#---------------------------------------------------------------------------------------------------------------------- +__function_defined() { + FUNC_NAME=$1 + if [ "$(command -v "$FUNC_NAME")" != "" ]; then + echoinfo "Found function $FUNC_NAME" + return 0 + fi + echodebug "$FUNC_NAME not found...." + return 1 +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __wait_for_apt +# DESCRIPTION: Check if any apt, apt-get, aptitude, or dpkg processes are running before +# calling these again. This is useful when these process calls are part of +# a boot process, such as on AWS AMIs. This func will wait until the boot +# process is finished so the script doesn't exit on a locked proc. +#---------------------------------------------------------------------------------------------------------------------- +__wait_for_apt(){ + # Timeout set at 15 minutes + WAIT_TIMEOUT=900 + + # Run our passed in apt command + "${@}" 2>"$APT_ERR" + APT_RETURN=$? + + # Make sure we're not waiting on a lock + while [ $APT_RETURN -ne 0 ] && grep -q '^E: Could not get lock' "$APT_ERR"; do + echoinfo "Aware of the lock. Patiently waiting $WAIT_TIMEOUT more seconds..." + sleep 1 + WAIT_TIMEOUT=$((WAIT_TIMEOUT - 1)) + + if [ "$WAIT_TIMEOUT" -eq 0 ]; then + echoerror "Apt, apt-get, aptitude, or dpkg process is taking too long." + echoerror "Bootstrap script cannot proceed. Aborting." + return 1 + else + "${@}" 2>"$APT_ERR" + APT_RETURN=$? + fi + done + + return $APT_RETURN +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __apt_get_install_noinput +# DESCRIPTION: (DRY) apt-get install with noinput options +# PARAMETERS: packages +#---------------------------------------------------------------------------------------------------------------------- +__apt_get_install_noinput() { + __wait_for_apt apt-get install -y -o DPkg::Options::=--force-confold "${@}"; return $? +} # ---------- end of function __apt_get_install_noinput ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __apt_get_upgrade_noinput +# DESCRIPTION: (DRY) apt-get upgrade with noinput options +#---------------------------------------------------------------------------------------------------------------------- +__apt_get_upgrade_noinput() { + __wait_for_apt apt-get upgrade -y -o DPkg::Options::=--force-confold; return $? +} # ---------- end of function __apt_get_upgrade_noinput ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __temp_gpg_pub +# DESCRIPTION: Create a temporary file for downloading a GPG public key. +#---------------------------------------------------------------------------------------------------------------------- +__temp_gpg_pub() { + if __check_command_exists mktemp; then + tempfile="$(mktemp /tmp/salt-gpg-XXXXXXXX.pub 2>/dev/null)" + + if [ -z "$tempfile" ]; then + echoerror "Failed to create temporary file in /tmp" + return 1 + fi + else + tempfile="/tmp/salt-gpg-$$.pub" + fi + + echo $tempfile +} # ----------- end of function __temp_gpg_pub ----------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __apt_key_fetch +# DESCRIPTION: Download and import GPG public key for "apt-secure" +# PARAMETERS: url +#---------------------------------------------------------------------------------------------------------------------- +__apt_key_fetch() { + url=$1 + + tempfile="$(__temp_gpg_pub)" + + __fetch_url "$tempfile" "$url" || return 1 + apt-key add "$tempfile" || return 1 + rm -f "$tempfile" + + return 0 +} # ---------- end of function __apt_key_fetch ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __rpm_import_gpg +# DESCRIPTION: Download and import GPG public key to rpm database +# PARAMETERS: url +#---------------------------------------------------------------------------------------------------------------------- +__rpm_import_gpg() { + url=$1 + + tempfile="$(__temp_gpg_pub)" + + __fetch_url "$tempfile" "$url" || return 1 + + # At least on CentOS 8, a missing newline at the end causes: + # error: /tmp/salt-gpg-n1gKUb1u.pub: key 1 not an armored public key. + # shellcheck disable=SC1003,SC2086 + sed -i -e '$a\' $tempfile + + rpm --import "$tempfile" || return 1 + rm -f "$tempfile" + + return 0 +} # ---------- end of function __rpm_import_gpg ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __yum_install_noinput +# DESCRIPTION: (DRY) yum install with noinput options +#---------------------------------------------------------------------------------------------------------------------- +__yum_install_noinput() { + + ENABLE_EPEL_CMD="" + # Skip Amazon Linux for the first round, since EPEL is no longer required. + # See issue #724 + if [ $_DISABLE_REPOS -eq $BS_FALSE ] && [ "$DISTRO_NAME_L" != "amazon_linux_ami" ]; then + ENABLE_EPEL_CMD="--enablerepo=${_EPEL_REPO}" + fi + + if [ "$DISTRO_NAME_L" = "oracle_linux" ]; then + # We need to install one package at a time because --enablerepo=X disables ALL OTHER REPOS!!!! + for package in "${@}"; do + yum -y install "${package}" || yum -y install "${package}" ${ENABLE_EPEL_CMD} || return $? + done + else + yum -y install "${@}" ${ENABLE_EPEL_CMD} || return $? + fi +} # ---------- end of function __yum_install_noinput ---------- + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __dnf_install_noinput +# DESCRIPTION: (DRY) dnf install with noinput options +#---------------------------------------------------------------------------------------------------------------------- +__dnf_install_noinput() { + + dnf -y install "${@}" || return $? +} # ---------- end of function __dnf_install_noinput ---------- + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __git_clone_and_checkout +# DESCRIPTION: (DRY) Helper function to clone and checkout salt to a +# specific revision. +#---------------------------------------------------------------------------------------------------------------------- +__git_clone_and_checkout() { + + echodebug "Installed git version: $(git --version | awk '{ print $3 }')" + # Turn off SSL verification if -I flag was set for insecure downloads + if [ "$_INSECURE_DL" -eq $BS_TRUE ]; then + export GIT_SSL_NO_VERIFY=1 + fi + + __SALT_GIT_CHECKOUT_PARENT_DIR=$(dirname "${_SALT_GIT_CHECKOUT_DIR}" 2>/dev/null) + __SALT_GIT_CHECKOUT_PARENT_DIR="${__SALT_GIT_CHECKOUT_PARENT_DIR:-/tmp/git}" + __SALT_CHECKOUT_REPONAME="$(basename "${_SALT_GIT_CHECKOUT_DIR}" 2>/dev/null)" + __SALT_CHECKOUT_REPONAME="${__SALT_CHECKOUT_REPONAME:-salt}" + [ -d "${__SALT_GIT_CHECKOUT_PARENT_DIR}" ] || mkdir "${__SALT_GIT_CHECKOUT_PARENT_DIR}" + # shellcheck disable=SC2164 + cd "${__SALT_GIT_CHECKOUT_PARENT_DIR}" + if [ -d "${_SALT_GIT_CHECKOUT_DIR}" ]; then + echodebug "Found a checked out Salt repository" + # shellcheck disable=SC2164 + cd "${_SALT_GIT_CHECKOUT_DIR}" + echodebug "Fetching git changes" + git fetch || return 1 + # Tags are needed because of salt's versioning, also fetch that + echodebug "Fetching git tags" + git fetch --tags || return 1 + + # If we have the SaltStack remote set as upstream, we also need to fetch the tags from there + if [ "$(git remote -v | grep $_SALTSTACK_REPO_URL)" != "" ]; then + echodebug "Fetching upstream(SaltStack's Salt repository) git tags" + git fetch --tags upstream + else + echoinfo "Adding SaltStack's Salt repository as a remote" + git remote add upstream "$_SALTSTACK_REPO_URL" + echodebug "Fetching upstream(SaltStack's Salt repository) git tags" + git fetch --tags upstream + fi + + echodebug "Hard reseting the cloned repository to ${GIT_REV}" + git reset --hard "$GIT_REV" || return 1 + + # Just calling `git reset --hard $GIT_REV` on a branch name that has + # already been checked out will not update that branch to the upstream + # HEAD; instead it will simply reset to itself. Check the ref to see + # if it is a branch name, check out the branch, and pull in the + # changes. + if git branch -a | grep -q "${GIT_REV}"; then + echodebug "Rebasing the cloned repository branch" + git pull --rebase || return 1 + fi + else + if [ "$_FORCE_SHALLOW_CLONE" -eq "${BS_TRUE}" ]; then + echoinfo "Forced shallow cloning of git repository." + __SHALLOW_CLONE=$BS_TRUE + elif [ "$__TAG_REGEX_MATCH" = "MATCH" ]; then + echoinfo "Git revision matches a Salt version tag, shallow cloning enabled." + __SHALLOW_CLONE=$BS_TRUE + else + echowarn "The git revision being installed does not match a Salt version tag. Shallow cloning disabled" + __SHALLOW_CLONE=$BS_FALSE + fi + + if [ "$__SHALLOW_CLONE" -eq $BS_TRUE ]; then + # Let's try shallow cloning to speed up. + # Test for "--single-branch" option introduced in git 1.7.10, the minimal version of git where the shallow + # cloning we need actually works + if [ "$(git clone 2>&1 | grep 'single-branch')" != "" ]; then + # The "--single-branch" option is supported, attempt shallow cloning + echoinfo "Attempting to shallow clone $GIT_REV from Salt's repository ${_SALT_REPO_URL}" + if git clone --depth 1 --branch "$GIT_REV" "$_SALT_REPO_URL" "$__SALT_CHECKOUT_REPONAME"; then + # shellcheck disable=SC2164 + cd "${_SALT_GIT_CHECKOUT_DIR}" + __SHALLOW_CLONE=$BS_TRUE + else + # Shallow clone above failed(missing upstream tags???), let's resume the old behaviour. + echowarn "Failed to shallow clone." + echoinfo "Resuming regular git clone and remote SaltStack repository addition procedure" + __SHALLOW_CLONE=$BS_FALSE + fi + else + echodebug "Shallow cloning not possible. Required git version not met." + __SHALLOW_CLONE=$BS_FALSE + fi + fi + + if [ "$__SHALLOW_CLONE" -eq $BS_FALSE ]; then + git clone "$_SALT_REPO_URL" "$__SALT_CHECKOUT_REPONAME" || return 1 + # shellcheck disable=SC2164 + cd "${_SALT_GIT_CHECKOUT_DIR}" + + if ! echo "$_SALT_REPO_URL" | grep -q -F -w "${_SALTSTACK_REPO_URL#*://}"; then + # We need to add the saltstack repository as a remote and fetch tags for proper versioning + echoinfo "Adding SaltStack's Salt repository as a remote" + git remote add upstream "$_SALTSTACK_REPO_URL" || return 1 + + echodebug "Fetching upstream (SaltStack's Salt repository) git tags" + git fetch --tags upstream || return 1 + + # Check if GIT_REV is a remote branch or just a commit hash + if git branch -r | grep -q -F -w "origin/$GIT_REV"; then + GIT_REV="origin/$GIT_REV" + fi + fi + + echodebug "Checking out $GIT_REV" + git checkout "$GIT_REV" || return 1 + fi + + fi + + echoinfo "Cloning Salt's git repository succeeded" + return 0 +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __copyfile +# DESCRIPTION: Simple function to copy files. Overrides if asked. +#---------------------------------------------------------------------------------------------------------------------- +__copyfile() { + overwrite=$_FORCE_OVERWRITE + if [ $# -eq 2 ]; then + sfile=$1 + dfile=$2 + elif [ $# -eq 3 ]; then + sfile=$1 + dfile=$2 + overwrite=$3 + else + echoerror "Wrong number of arguments for __copyfile()" + echoinfo "USAGE: __copyfile OR __copyfile " + exit 1 + fi + + # Does the source file exist? + if [ ! -f "$sfile" ]; then + echowarn "$sfile does not exist!" + return 1 + fi + + # If the destination is a directory, let's make it a full path so the logic + # below works as expected + if [ -d "$dfile" ]; then + echodebug "The passed destination ($dfile) is a directory" + dfile="${dfile}/$(basename "$sfile")" + echodebug "Full destination path is now: $dfile" + fi + + if [ ! -f "$dfile" ]; then + # The destination file does not exist, copy + echodebug "Copying $sfile to $dfile" + cp "$sfile" "$dfile" || return 1 + elif [ -f "$dfile" ] && [ "$overwrite" -eq $BS_TRUE ]; then + # The destination exist and we're overwriting + echodebug "Overwriting $dfile with $sfile" + cp -f "$sfile" "$dfile" || return 1 + elif [ -f "$dfile" ] && [ "$overwrite" -ne $BS_TRUE ]; then + echodebug "Not overwriting $dfile with $sfile" + fi + return 0 +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __movefile +# DESCRIPTION: Simple function to move files. Overrides if asked. +#---------------------------------------------------------------------------------------------------------------------- +__movefile() { + overwrite=$_FORCE_OVERWRITE + if [ $# -eq 2 ]; then + sfile=$1 + dfile=$2 + elif [ $# -eq 3 ]; then + sfile=$1 + dfile=$2 + overwrite=$3 + else + echoerror "Wrong number of arguments for __movefile()" + echoinfo "USAGE: __movefile OR __movefile " + exit 1 + fi + + if [ $_KEEP_TEMP_FILES -eq $BS_TRUE ]; then + # We're being told not to move files, instead copy them so we can keep + # them around + echodebug "Since BS_KEEP_TEMP_FILES=1 we're copying files instead of moving them" + __copyfile "$sfile" "$dfile" "$overwrite" + return $? + fi + + # Does the source file exist? + if [ ! -f "$sfile" ]; then + echowarn "$sfile does not exist!" + return 1 + fi + + # If the destination is a directory, let's make it a full path so the logic + # below works as expected + if [ -d "$dfile" ]; then + echodebug "The passed destination($dfile) is a directory" + dfile="${dfile}/$(basename "$sfile")" + echodebug "Full destination path is now: $dfile" + fi + + if [ ! -f "$dfile" ]; then + # The destination file does not exist, move + echodebug "Moving $sfile to $dfile" + mv "$sfile" "$dfile" || return 1 + elif [ -f "$dfile" ] && [ "$overwrite" -eq $BS_TRUE ]; then + # The destination exist and we're overwriting + echodebug "Overriding $dfile with $sfile" + mv -f "$sfile" "$dfile" || return 1 + elif [ -f "$dfile" ] && [ "$overwrite" -ne $BS_TRUE ]; then + echodebug "Not overriding $dfile with $sfile" + fi + + return 0 +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __linkfile +# DESCRIPTION: Simple function to create symlinks. Overrides if asked. Accepts globs. +#---------------------------------------------------------------------------------------------------------------------- +__linkfile() { + overwrite=$_FORCE_OVERWRITE + if [ $# -eq 2 ]; then + target=$1 + linkname=$2 + elif [ $# -eq 3 ]; then + target=$1 + linkname=$2 + overwrite=$3 + else + echoerror "Wrong number of arguments for __linkfile()" + echoinfo "USAGE: __linkfile OR __linkfile " + exit 1 + fi + + for sfile in $target; do + # Does the source file exist? + if [ ! -f "$sfile" ]; then + echowarn "$sfile does not exist!" + return 1 + fi + + # If the destination is a directory, let's make it a full path so the logic + # below works as expected + if [ -d "$linkname" ]; then + echodebug "The passed link name ($linkname) is a directory" + linkname="${linkname}/$(basename "$sfile")" + echodebug "Full destination path is now: $linkname" + fi + + if [ ! -e "$linkname" ]; then + # The destination file does not exist, create link + echodebug "Creating $linkname symlink pointing to $sfile" + ln -s "$sfile" "$linkname" || return 1 + elif [ -e "$linkname" ] && [ "$overwrite" -eq $BS_TRUE ]; then + # The destination exist and we're overwriting + echodebug "Overwriting $linkname symlink to point on $sfile" + ln -sf "$sfile" "$linkname" || return 1 + elif [ -e "$linkname" ] && [ "$overwrite" -ne $BS_TRUE ]; then + echodebug "Not overwriting $linkname symlink to point on $sfile" + fi + done + + return 0 +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __overwriteconfig() +# DESCRIPTION: Simple function to overwrite master or minion config files. +#---------------------------------------------------------------------------------------------------------------------- +__overwriteconfig() { + if [ $# -eq 2 ]; then + target=$1 + json=$2 + else + echoerror "Wrong number of arguments for __convert_json_to_yaml_str()" + echoinfo "USAGE: __convert_json_to_yaml_str " + exit 1 + fi + + # Make a tempfile to dump any python errors into. + if __check_command_exists mktemp; then + tempfile="$(mktemp /tmp/salt-config-XXXXXXXX 2>/dev/null)" + + if [ -z "$tempfile" ]; then + echoerror "Failed to create temporary file in /tmp" + return 1 + fi + else + tempfile="/tmp/salt-config-$$" + fi + + if [ -n "$_PY_EXE" ]; then + good_python="$_PY_EXE" + # If python does not have yaml installed we're on Arch and should use python2 + elif python -c "import yaml" 2> /dev/null; then + good_python=python + else + good_python=python2 + fi + + # Convert json string to a yaml string and write it to config file. Output is dumped into tempfile. + "$good_python" -c "import json; import yaml; jsn=json.loads('$json'); yml=yaml.safe_dump(jsn, line_break='\\n', default_flow_style=False); config_file=open('$target', 'w'); config_file.write(yml); config_file.close();" 2>$tempfile + + # No python errors output to the tempfile + if [ ! -s "$tempfile" ]; then + rm -f "$tempfile" + return 0 + fi + + # Errors are present in the tempfile - let's expose them to the user. + fullerror=$(cat "$tempfile") + echodebug "$fullerror" + echoerror "Python error encountered. This is likely due to passing in a malformed JSON string. Please use -D to see stacktrace." + + rm -f "$tempfile" + + return 1 + +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_systemd +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_systemd() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + if [ "$(systemctl is-enabled "${servicename}")" = "enabled" ]; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_systemd ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_upstart +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_upstart() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + # Check if service is enabled to start at boot + if initctl list | grep "${servicename}" > /dev/null 2>&1; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_upstart ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_sysvinit +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_sysvinit() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + if [ "$(LC_ALL=C /sbin/chkconfig --list | grep "\\<${servicename}\\>" | grep '[2-5]:on')" != "" ]; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_sysvinit ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_debian +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_debian() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + # Check if the service is going to be started at any runlevel, fixes bootstrap in container (Docker, LXC) + if ls /etc/rc?.d/S*"${servicename}" >/dev/null 2>&1; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_debian ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_openbsd +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_openbsd() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + # shellcheck disable=SC2086,SC2046,SC2144 + if rcctl get ${servicename} status; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_openbsd ---------- + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_alpine +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_alpine() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + # shellcheck disable=SC2086,SC2046,SC2144 + if rc-status $(rc-status -r) | tail -n +2 | grep -q "\\<$servicename\\>"; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_openbsd ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __create_virtualenv +# DESCRIPTION: Return 0 or 1 depending on successful creation of virtualenv +#---------------------------------------------------------------------------------------------------------------------- +__create_virtualenv() { + if [ ! -d "$_VIRTUALENV_DIR" ]; then + echoinfo "Creating virtualenv ${_VIRTUALENV_DIR}" + if [ $_PIP_ALL -eq $BS_TRUE ]; then + virtualenv --no-site-packages "${_VIRTUALENV_DIR}" || return 1 + else + virtualenv --system-site-packages "${_VIRTUALENV_DIR}" || return 1 + fi + fi + return 0 +} # ---------- end of function __create_virtualenv ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __activate_virtualenv +# DESCRIPTION: Return 0 or 1 depending on successful activation of virtualenv +#---------------------------------------------------------------------------------------------------------------------- +__activate_virtualenv() { + set +o nounset + # Is virtualenv empty + if [ -z "$_VIRTUALENV_DIR" ]; then + __create_virtualenv || return 1 + # shellcheck source=/dev/null + . "${_VIRTUALENV_DIR}/bin/activate" || return 1 + echoinfo "Activated virtualenv ${_VIRTUALENV_DIR}" + fi + set -o nounset + return 0 +} # ---------- end of function __activate_virtualenv ---------- + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __install_pip_pkgs +# DESCRIPTION: Return 0 or 1 if successfully able to install pip packages. Can provide a different python version to +# install pip packages with. If $py_ver is not specified it will use the default python version. +# PARAMETERS: pkgs, py_ver +#---------------------------------------------------------------------------------------------------------------------- + +__install_pip_pkgs() { + _pip_pkgs="$1" + _py_exe="$2" + _py_pkg=$(echo "$_py_exe" | sed -E "s/\\.//g") + _pip_cmd="${_py_exe} -m pip" + + if [ "${_py_exe}" = "" ]; then + _py_exe='python' + fi + + __check_pip_allowed + + # Install pip and pip dependencies + if ! __check_command_exists "${_pip_cmd} --version"; then + __PACKAGES="${_py_pkg}-setuptools ${_py_pkg}-pip gcc" + # shellcheck disable=SC2086 + if [ "$DISTRO_NAME_L" = "debian" ] || [ "$DISTRO_NAME_L" = "ubuntu" ];then + __PACKAGES="${__PACKAGES} ${_py_pkg}-dev" + __apt_get_install_noinput ${__PACKAGES} || return 1 + else + __PACKAGES="${__PACKAGES} ${_py_pkg}-devel" + if [ "$DISTRO_NAME_L" = "fedora" ];then + __dnf_install_noinput ${__PACKAGES} || return 1 + else + __yum_install_noinput ${__PACKAGES} || return 1 + fi + fi + + fi + + echoinfo "Installing pip packages: ${_pip_pkgs} using ${_py_exe}" + # shellcheck disable=SC2086 + ${_pip_cmd} install ${_pip_pkgs} || return 1 +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __install_tornado_pip +# PARAMETERS: python executable +# DESCRIPTION: Return 0 or 1 if successfully able to install tornado<5.0 +#---------------------------------------------------------------------------------------------------------------------- +__install_tornado_pip() { + # OS needs tornado <5.0 from pip + __check_pip_allowed "You need to allow pip based installations (-P) for Tornado <5.0 in order to install Salt on Python 3" + ## install pip if its not installed and install tornado + __install_pip_pkgs "tornado<5.0" "${1}" || return 1 +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __install_pip_deps +# DESCRIPTION: Return 0 or 1 if successfully able to install pip packages via requirements file +# PARAMETERS: requirements_file +#---------------------------------------------------------------------------------------------------------------------- +__install_pip_deps() { + # Install virtualenv to system pip before activating virtualenv if thats going to be used + # We assume pip pkg is installed since that is distro specific + if [ "$_VIRTUALENV_DIR" != "null" ]; then + if ! __check_command_exists pip; then + echoerror "Pip not installed: required for -a installs" + exit 1 + fi + pip install -U virtualenv + __activate_virtualenv || return 1 + else + echoerror "Must have virtualenv dir specified for -a installs" + fi + + requirements_file=$1 + if [ ! -f "${requirements_file}" ]; then + echoerror "Requirements file: ${requirements_file} cannot be found, needed for -a (pip pkg) installs" + exit 1 + fi + + __PIP_PACKAGES='' + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # shellcheck disable=SC2089 + __PIP_PACKAGES="${__PIP_PACKAGES} 'apache-libcloud>=$_LIBCLOUD_MIN_VERSION'" + fi + + # shellcheck disable=SC2086,SC2090 + pip install -U -r ${requirements_file} ${__PIP_PACKAGES} +} # ---------- end of function __install_pip_deps ---------- + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __install_salt_from_repo_post_neon +# DESCRIPTION: Return 0 or 1 if successfully able to install. Can provide a different python version to +# install pip packages with. If $py_exe is not specified it will use the default python version. +# PARAMETERS: py_exe +#---------------------------------------------------------------------------------------------------------------------- +__install_salt_from_repo_post_neon() { + _py_exe="$1" + + if [ "${_py_exe}" = "" ]; then + _py_exe='python' + fi + + echodebug "__install_salt_from_repo_post_neon py_exe=$_py_exe" + + _py_version=$(${_py_exe} -c "import sys; print('{0}.{1}'.format(*sys.version_info))") + _pip_cmd="pip${_py_version}" + if ! __check_command_exists "${_pip_cmd}"; then + echodebug "The pip binary '${_pip_cmd}' was not found in PATH" + _pip_cmd="pip$(echo "${_py_version}" | cut -c -1)" + if ! __check_command_exists "${_pip_cmd}"; then + echodebug "The pip binary '${_pip_cmd}' was not found in PATH" + _pip_cmd="pip" + if ! __check_command_exists "${_pip_cmd}"; then + echoerror "Unable to find a pip binary" + return 1 + fi + fi + fi + + __check_pip_allowed + + echodebug "Installed pip version: $(${_pip_cmd} --version)" + + CHECK_PIP_VERSION_SCRIPT=$(cat << EOM +import sys +try: + import pip + installed_pip_version=tuple([int(part.strip()) for part in pip.__version__.split('.') if part.isdigit()]) + desired_pip_version=($(echo ${_MINIMUM_PIP_VERSION} | sed 's/\./, /g' )) + if installed_pip_version < desired_pip_version: + print('Desired pip version {!r} > Installed pip version {!r}'.format('.'.join(map(str, desired_pip_version)), '.'.join(map(str, installed_pip_version)))) + sys.exit(1) + print('Desired pip version {!r} < Installed pip version {!r}'.format('.'.join(map(str, desired_pip_version)), '.'.join(map(str, installed_pip_version)))) + sys.exit(0) +except ImportError: + print('Failed to import pip') + sys.exit(1) +EOM +) + if ! ${_py_exe} -c "$CHECK_PIP_VERSION_SCRIPT"; then + # Upgrade pip to at least 1.2 which is when we can start using "python -m pip" + echodebug "Running '${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} pip>=${_MINIMUM_PIP_VERSION}'" + ${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} -v "pip>=${_MINIMUM_PIP_VERSION}" + sleep 1 + echodebug "PATH: ${PATH}" + _pip_cmd="pip${_py_version}" + if ! __check_command_exists "${_pip_cmd}"; then + echodebug "The pip binary '${_pip_cmd}' was not found in PATH" + _pip_cmd="pip$(echo "${_py_version}" | cut -c -1)" + if ! __check_command_exists "${_pip_cmd}"; then + echodebug "The pip binary '${_pip_cmd}' was not found in PATH" + _pip_cmd="pip" + if ! __check_command_exists "${_pip_cmd}"; then + echoerror "Unable to find a pip binary" + return 1 + fi + fi + fi + echodebug "Installed pip version: $(${_pip_cmd} --version)" + fi + + # We also lock setuptools to <45 which is the latest release to support both py2 and py3 + echodebug "Running '${_pip_cmd} install wheel setuptools>=${_MINIMUM_SETUPTOOLS_VERSION},<45'" + ${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} wheel "setuptools>=${_MINIMUM_SETUPTOOLS_VERSION},<45" + + echoinfo "Installing salt using ${_py_exe}" + cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 + + mkdir /tmp/git/deps + echoinfo "Downloading Salt Dependencies from PyPi" + echodebug "Running '${_pip_cmd} download -d /tmp/git/deps .'" + ${_pip_cmd} download -d /tmp/git/deps . || (echo "Failed to download salt dependencies" && return 1) + + echoinfo "Installing Downloaded Salt Dependencies" + echodebug "Running '${_pip_cmd} install --ignore-installed ${_POST_NEON_PIP_INSTALL_ARGS} /tmp/git/deps/*'" + ${_pip_cmd} install --ignore-installed ${_POST_NEON_PIP_INSTALL_ARGS} /tmp/git/deps/* || return 1 + rm -f /tmp/git/deps/* + + echoinfo "Building Salt Python Wheel" + + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + SETUP_PY_INSTALL_ARGS="-v" + fi + + echodebug "Running '${_py_exe} setup.py --salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} bdist_wheel'" + ${_py_exe} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} bdist_wheel || return 1 + mv dist/salt*.whl /tmp/git/deps/ || return 1 + + cd "${__SALT_GIT_CHECKOUT_PARENT_DIR}" || return 1 + + echoinfo "Installing Built Salt Wheel" + ${_pip_cmd} uninstall --yes salt 2>/dev/null || true + echodebug "Running '${_pip_cmd} install --no-deps --force-reinstall ${_POST_NEON_PIP_INSTALL_ARGS} /tmp/git/deps/salt*.whl'" + ${_pip_cmd} install --no-deps --force-reinstall \ + ${_POST_NEON_PIP_INSTALL_ARGS} \ + --global-option="--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" \ + /tmp/git/deps/salt*.whl || return 1 + + echoinfo "Checking if Salt can be imported using ${_py_exe}" + CHECK_SALT_SCRIPT=$(cat << EOM +import os +import sys +try: + import salt + import salt.version + print('\nInstalled Salt Version: {}'.format(salt.version.__version__)) + print('Installed Salt Package Path: {}\n'.format(os.path.dirname(salt.__file__))) + sys.exit(0) +except ImportError: + print('\nFailed to import salt\n') + sys.exit(1) +EOM +) + if ! ${_py_exe} -c "$CHECK_SALT_SCRIPT"; then + return 1 + fi + return 0 +} # ---------- end of function __install_salt_from_repo_post_neon ---------- + + +if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + if [ "x${_PY_MAJOR_VERSION}" = "x" ]; then + # Default to python 2 for pre Neon installs + _PY_MAJOR_VERSION=2 + fi +else + if [ "x${_PY_MAJOR_VERSION}" = "x" ]; then + # Default to python 3 for post Neon install + _PY_MAJOR_VERSION=3 + fi +fi + +####################################################################################################################### +# +# Distribution install functions +# +# In order to install salt for a distribution you need to define: +# +# To Install Dependencies, which is required, one of: +# 1. install____deps +# 2. install_____deps +# 3. install___deps +# 4 install____deps +# 5. install___deps +# 6. install__deps +# +# Optionally, define a salt configuration function, which will be called if +# the -c (config-dir) option is passed. One of: +# 1. config____salt +# 2. config_____salt +# 3. config___salt +# 4 config____salt +# 5. config___salt +# 6. config__salt +# 7. config_salt [THIS ONE IS ALREADY DEFINED AS THE DEFAULT] +# +# Optionally, define a salt master pre-seed function, which will be called if +# the -k (pre-seed master keys) option is passed. One of: +# 1. preseed____master +# 2. preseed_____master +# 3. preseed___master +# 4 preseed____master +# 5. preseed___master +# 6. preseed__master +# 7. preseed_master [THIS ONE IS ALREADY DEFINED AS THE DEFAULT] +# +# To install salt, which, of course, is required, one of: +# 1. install___ +# 2. install____ +# 3. install__ +# +# Optionally, define a post install function, one of: +# 1. install____post +# 2. install_____post +# 3. install___post +# 4 install____post +# 5. install___post +# 6. install__post +# +# Optionally, define a start daemons function, one of: +# 1. install____restart_daemons +# 2. install_____restart_daemons +# 3. install___restart_daemons +# 4 install____restart_daemons +# 5. install___restart_daemons +# 6. install__restart_daemons +# +# NOTE: The start daemons function should be able to restart any daemons +# which are running, or start if they're not running. +# +# Optionally, define a daemons running function, one of: +# 1. daemons_running___ +# 2. daemons_running____ +# 3. daemons_running__ +# 4 daemons_running___ +# 5. daemons_running__ +# 6. daemons_running_ +# 7. daemons_running [THIS ONE IS ALREADY DEFINED AS THE DEFAULT] +# +# Optionally, check enabled Services: +# 1. install____check_services +# 2. install_____check_services +# 3. install___check_services +# 4 install____check_services +# 5. install___check_services +# 6. install__check_services +# +####################################################################################################################### + + +####################################################################################################################### +# +# Ubuntu Install Functions +# +__enable_universe_repository() { + if [ "$(grep -R universe /etc/apt/sources.list /etc/apt/sources.list.d/ | grep -v '#')" != "" ]; then + # The universe repository is already enabled + return 0 + fi + + echodebug "Enabling the universe repository" + + add-apt-repository -y "deb http://archive.ubuntu.com/ubuntu $(lsb_release -sc) universe" || return 1 + + return 0 +} + +__install_saltstack_ubuntu_repository() { + # Workaround for latest non-LTS ubuntu + if [ "$DISTRO_MAJOR_VERSION" -eq 19 ] || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 18 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; }; then + echowarn "Non-LTS Ubuntu detected, but stable packages requested. Trying packages for previous LTS release. You may experience problems." + UBUNTU_VERSION=18.04 + UBUNTU_CODENAME="bionic" + else + UBUNTU_VERSION=${DISTRO_VERSION} + UBUNTU_CODENAME=${DISTRO_CODENAME} + fi + + # Install downloader backend for GPG keys fetching + __PACKAGES='wget' + + # Required as it is not installed by default on Ubuntu 18+ + if [ "$DISTRO_MAJOR_VERSION" -ge 18 ]; then + __PACKAGES="${__PACKAGES} gnupg" + fi + + # Make sure https transport is available + if [ "$HTTP_VAL" = "https" ] ; then + __PACKAGES="${__PACKAGES} apt-transport-https ca-certificates" + fi + + # shellcheck disable=SC2086,SC2090 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + __PY_VERSION_REPO="apt" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + # SaltStack's stable Ubuntu repository: + SALTSTACK_UBUNTU_URL="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/ubuntu/${UBUNTU_VERSION}/${__REPO_ARCH}/${STABLE_REV}" + echo "$__REPO_ARCH_DEB $SALTSTACK_UBUNTU_URL $UBUNTU_CODENAME main" > /etc/apt/sources.list.d/saltstack.list + + __apt_key_fetch "$SALTSTACK_UBUNTU_URL/SALTSTACK-GPG-KEY.pub" || return 1 + + __wait_for_apt apt-get update || return 1 +} + +install_ubuntu_deps() { + if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then + # Install add-apt-repository + if ! __check_command_exists add-apt-repository; then + __apt_get_install_noinput software-properties-common || return 1 + fi + + __enable_universe_repository || return 1 + + __wait_for_apt apt-get update || return 1 + fi + + __PACKAGES='' + + if [ "$DISTRO_MAJOR_VERSION" -lt 16 ]; then + # Minimal systems might not have upstart installed, install it + __PACKAGES="upstart" + fi + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 16 ] && [ -z "$_PY_EXE" ]; then + __PACKAGES="${__PACKAGES} python2.7" + fi + + if [ "$_VIRTUALENV_DIR" != "null" ]; then + __PACKAGES="${__PACKAGES} python-virtualenv" + fi + # Need python-apt for managing packages via Salt + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-apt" + + # requests is still used by many salt modules + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-requests" + + # YAML module is used for generating custom master/minion configs + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-yaml" + + # Additionally install procps and pciutils which allows for Docker bootstraps. See 366#issuecomment-39666813 + __PACKAGES="${__PACKAGES} procps pciutils" + + # shellcheck disable=SC2086,SC2090 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __apt_get_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_ubuntu_stable_deps() { + if [ "${_SLEEP}" -eq "${__DEFAULT_SLEEP}" ] && [ "$DISTRO_MAJOR_VERSION" -lt 16 ]; then + # The user did not pass a custom sleep value as an argument, let's increase the default value + echodebug "On Ubuntu systems we increase the default sleep value to 10." + echodebug "See https://github.com/saltstack/salt/issues/12248 for more info." + _SLEEP=10 + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 20 ]; then + # Default Ubuntu 20.04 to Py3 + if [ "x${_PY_EXE}" = "x" ]; then + _PY_EXE=python3 + _PY_MAJOR_VERSION=3 + PY_PKG_VER=3 + fi + fi + + if [ $_START_DAEMONS -eq $BS_FALSE ]; then + echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." + fi + + # No user interaction, libc6 restart services for example + export DEBIAN_FRONTEND=noninteractive + + __wait_for_apt apt-get update || return 1 + + if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then + if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then + __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && + apt-key update && apt-get update || return 1 + fi + + __apt_get_upgrade_noinput || return 1 + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __check_dpkg_architecture || return 1 + __install_saltstack_ubuntu_repository || return 1 + fi + + install_ubuntu_deps || return 1 +} + +install_ubuntu_git_deps() { + __wait_for_apt apt-get update || return 1 + + if ! __check_command_exists git; then + __apt_get_install_noinput git-core || return 1 + fi + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __apt_get_install_noinput ca-certificates + fi + + __git_clone_and_checkout || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + __PACKAGES="" + + # See how we are installing packages + if [ "${_PIP_ALL}" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python-dev swig libssl-dev libzmq3 libzmq3-dev" + + if ! __check_command_exists pip; then + __PACKAGES="${__PACKAGES} python-setuptools python-pip" + fi + + # Get just the apt packages that are required to build all the pythons + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + # Install the pythons from requirements (only zmq for now) + __install_pip_deps "${_SALT_GIT_CHECKOUT_DIR}/requirements/zeromq.txt" || return 1 + else + install_ubuntu_stable_deps || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PACKAGES="${__PACKAGES} python3-setuptools" + else + # There is no m2crypto package for Py3 at this time - only install for Py2 + __PACKAGES="${__PACKAGES} python-m2crypto" + fi + + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado python${PY_PKG_VER}-yaml" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-zmq" + __PACKAGES="${__PACKAGES} python-concurrent.futures" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # Install python-libcloud if asked to + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + fi + else + __PACKAGES="python${PY_PKG_VER}-dev python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_ubuntu_stable() { + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_ubuntu_git() { + # Activate virtualenv before install + if [ "${_VIRTUALENV_DIR}" != "null" ]; then + __activate_virtualenv || return 1 + fi + + if [ -n "$_PY_EXE" ]; then + _PYEXE=${_PY_EXE} + else + _PYEXE=python2.7 + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + # We can use --prefix on debian based ditributions + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + _POST_NEON_PIP_INSTALL_ARGS="--target=/usr/lib/python3/dist-packages --install-option=--install-scripts=/usr/bin" + else + _POST_NEON_PIP_INSTALL_ARGS="--target=/usr/lib/python2.7/dist-packages --install-option=--install-scripts=/usr/bin" + fi + _POST_NEON_PIP_INSTALL_ARGS="" + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 + sed -i 's:/usr/bin:/usr/local/bin:g' pkg/*.service + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + # shellcheck disable=SC2086 + "${_PYEXE}" setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 + else + # shellcheck disable=SC2086 + "${_PYEXE}" setup.py ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 + fi + + return 0 +} + +install_ubuntu_stable_post() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + # Using systemd + /bin/systemctl is-enabled salt-$fname.service > /dev/null 2>&1 || ( + /bin/systemctl preset salt-$fname.service > /dev/null 2>&1 && + /bin/systemctl enable salt-$fname.service > /dev/null 2>&1 + ) + sleep 1 + /bin/systemctl daemon-reload + elif [ -f /etc/init.d/salt-$fname ]; then + update-rc.d salt-$fname defaults + fi + done + + return 0 +} + +install_ubuntu_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 1 + systemctl daemon-reload + elif [ -f /sbin/initctl ]; then + _upstart_conf="/etc/init/salt-$fname.conf" + # We have upstart support + echodebug "There's upstart support" + if [ ! -f $_upstart_conf ]; then + # upstart does not know about our service, let's copy the proper file + echowarn "Upstart does not appear to know about salt-$fname" + echodebug "Copying ${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-$fname.upstart to $_upstart_conf" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.upstart" "$_upstart_conf" + # Set service to know about virtualenv + if [ "${_VIRTUALENV_DIR}" != "null" ]; then + echo "SALT_USE_VIRTUALENV=${_VIRTUALENV_DIR}" > /etc/default/salt-${fname} + fi + /sbin/initctl reload-configuration || return 1 + fi + # No upstart support in Ubuntu!? + elif [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.init" ]; then + echodebug "There's NO upstart support!?" + echodebug "Copying ${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.init to /etc/init.d/salt-$fname" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.init" "/etc/init.d/salt-$fname" + chmod +x /etc/init.d/salt-$fname + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + update-rc.d salt-$fname defaults + else + echoerror "Neither upstart nor init.d was setup for salt-$fname" + fi + done + + return 0 +} + +install_ubuntu_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + # Ensure upstart configs / systemd units are loaded + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then + systemctl daemon-reload + elif [ -f /sbin/initctl ]; then + /sbin/initctl reload-configuration + fi + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then + echodebug "There's systemd support while checking salt-$fname" + systemctl stop salt-$fname > /dev/null 2>&1 + systemctl start salt-$fname.service && continue + # We failed to start the service, let's test the SysV code below + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + fi + + if [ -f /sbin/initctl ]; then + echodebug "There's upstart support while checking salt-$fname" + + if status salt-$fname 2>/dev/null | grep -q running; then + stop salt-$fname || (echodebug "Failed to stop salt-$fname" && return 1) + fi + + start salt-$fname && continue + # We failed to start the service, let's test the SysV code below + echodebug "Failed to start salt-$fname using Upstart" + fi + + if [ ! -f /etc/init.d/salt-$fname ]; then + echoerror "No init.d support for salt-$fname was found" + return 1 + fi + + /etc/init.d/salt-$fname stop > /dev/null 2>&1 + /etc/init.d/salt-$fname start + done + + return 0 +} + +install_ubuntu_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then + __check_services_systemd salt-$fname || return 1 + elif [ -f /sbin/initctl ] && [ -f /etc/init/salt-${fname}.conf ]; then + __check_services_upstart salt-$fname || return 1 + elif [ -f /etc/init.d/salt-$fname ]; then + __check_services_debian salt-$fname || return 1 + fi + done + + return 0 +} +# +# End of Ubuntu Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Debian Install Functions +# +__install_saltstack_debian_repository() { + DEBIAN_RELEASE="$DISTRO_MAJOR_VERSION" + DEBIAN_CODENAME="$DISTRO_CODENAME" + + __PY_VERSION_REPO="apt" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + # Install downloader backend for GPG keys fetching + __PACKAGES='wget' + + # Required as it is not installed by default on Debian 9+ + if [ "$DISTRO_MAJOR_VERSION" -ge 9 ]; then + __PACKAGES="${__PACKAGES} gnupg2" + fi + + # Make sure https transport is available + if [ "$HTTP_VAL" = "https" ] ; then + __PACKAGES="${__PACKAGES} apt-transport-https ca-certificates" + fi + + # shellcheck disable=SC2086,SC2090 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location + SALTSTACK_DEBIAN_URL="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/debian/${DEBIAN_RELEASE}/${__REPO_ARCH}/${STABLE_REV}" + echo "$__REPO_ARCH_DEB $SALTSTACK_DEBIAN_URL $DEBIAN_CODENAME main" > "/etc/apt/sources.list.d/saltstack.list" + + __apt_key_fetch "$SALTSTACK_DEBIAN_URL/SALTSTACK-GPG-KEY.pub" || return 1 + + __wait_for_apt apt-get update || return 1 +} + +install_debian_deps() { + if [ $_START_DAEMONS -eq $BS_FALSE ]; then + echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." + fi + + # No user interaction, libc6 restart services for example + export DEBIAN_FRONTEND=noninteractive + + __wait_for_apt apt-get update || return 1 + + if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then + # Try to update GPG keys first if allowed + if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then + __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && + apt-key update && apt-get update || return 1 + fi + + __apt_get_upgrade_noinput || return 1 + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 10 ]; then + # Default Debian 10 to Py3 + if [ "x${_PY_EXE}" = "x" ]; then + _PY_EXE=python3 + _PY_MAJOR_VERSION=3 + PY_PKG_VER=3 + fi + fi + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + fi + + # Additionally install procps and pciutils which allows for Docker bootstraps. See 366#issuecomment-39666813 + __PACKAGES='procps pciutils' + + # YAML module is used for generating custom master/minion configs + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-yaml" + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __check_dpkg_architecture || return 1 + __install_saltstack_debian_repository || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __apt_get_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_debian_git_pre() { + if ! __check_command_exists git; then + __apt_get_install_noinput git || return 1 + fi + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __apt_get_install_noinput ca-certificates + fi + + __git_clone_and_checkout || return 1 + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi +} + +install_debian_git_deps() { + install_debian_deps || return 1 + install_debian_git_pre || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + __PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-backports.ssl-match-hostname" + __PACKAGES="${__PACKAGES} python-crypto python-jinja2 python-msgpack python-m2crypto" + __PACKAGES="${__PACKAGES} python-requests python-tornado python-yaml python-zmq" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # Install python-libcloud if asked to + __PACKAGES="${__PACKAGES} python-libcloud" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + else + __PACKAGES="python${PY_PKG_VER}-dev python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + echodebug "install_debian_git_deps() Installing ${__PACKAGES}" + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + fi + + return 0 +} + +install_debian_7_git_deps() { + install_debian_deps || return 1 + install_debian_git_deps || return 1 + + return 0 +} + +install_debian_8_git_deps() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + echodebug "CALLING install_debian_git_deps" + install_debian_git_deps || return 1 + return 0 + fi + + install_debian_deps || return 1 + + if ! __check_command_exists git; then + __apt_get_install_noinput git || return 1 + fi + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __apt_get_install_noinput ca-certificates + fi + + __git_clone_and_checkout || return 1 + + __PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-crypto python-jinja2" + __PACKAGES="${__PACKAGES} python-m2crypto python-msgpack python-requests python-systemd" + __PACKAGES="${__PACKAGES} python-yaml python-zmq python-concurrent.futures" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # Install python-libcloud if asked to + __PACKAGES="${__PACKAGES} python-libcloud" + fi + + __PIP_PACKAGES='' + if (__check_pip_allowed >/dev/null 2>&1); then + __PIP_PACKAGES='tornado<5.0' + # Install development environment for building tornado Python module + __PACKAGES="${__PACKAGES} build-essential python-dev" + + if ! __check_command_exists pip; then + __PACKAGES="${__PACKAGES} python-pip" + fi + # Attempt to configure backports repo on non-x86_64 system + elif [ $_DISABLE_REPOS -eq $BS_FALSE ] && [ "$DPKG_ARCHITECTURE" != "amd64" ]; then + # Check if Debian Backports repo already configured + if ! apt-cache policy | grep -q 'Debian Backports'; then + echo 'deb http://httpredir.debian.org/debian jessie-backports main' > \ + /etc/apt/sources.list.d/backports.list + fi + + __wait_for_apt apt-get update || return 1 + + # python-tornado package should be installed from backports repo + __PACKAGES="${__PACKAGES} python-backports.ssl-match-hostname python-tornado/jessie-backports" + else + __PACKAGES="${__PACKAGES} python-backports.ssl-match-hostname python-tornado" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + if [ "${__PIP_PACKAGES}" != "" ]; then + # shellcheck disable=SC2086,SC2090 + pip install -U ${__PIP_PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_debian_9_git_deps() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + install_debian_git_deps || return 1 + return 0 + fi + + install_debian_deps || return 1 + install_debian_git_pre || return 1 + + __PACKAGES="libzmq5 lsb-release" + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + + # These packages are PY2-ONLY + __PACKAGES="${__PACKAGES} python-backports-abc python-m2crypto python-concurrent.futures" + fi + + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-apt python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests python${PY_PKG_VER}-systemd" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado python${PY_PKG_VER}-yaml python${PY_PKG_VER}-zmq" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # Install python-libcloud if asked to + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_debian_10_git_deps() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + install_debian_git_deps || return 1 + return 0 + fi + + install_debian_deps || return 1 + install_debian_git_pre || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + _py=${_PY_EXE} + PY_PKG_VER=3 + __PACKAGES="python${PY_PKG_VER}-distutils" + else + _py="python" + PY_PKG_VER="" + __PACKAGES="" + fi + + __install_tornado_pip ${_py}|| return 1 + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado python${PY_PKG_VER}-yaml python${PY_PKG_VER}-zmq" + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_debian_stable() { + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_debian_7_stable() { + install_debian_stable || return 1 + return 0 +} + +install_debian_8_stable() { + install_debian_stable || return 1 + return 0 +} + +install_debian_9_stable() { + install_debian_stable || return 1 + return 0 +} + +install_debian_git() { + if [ -n "$_PY_EXE" ]; then + _PYEXE=${_PY_EXE} + else + _PYEXE=python + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + # We can use --prefix on debian based ditributions + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + _POST_NEON_PIP_INSTALL_ARGS="--target=/usr/lib/python3/dist-packages --install-option=--install-scripts=/usr/bin" + else + _POST_NEON_PIP_INSTALL_ARGS="--target=/usr/lib/python2.7/dist-packages --install-option=--install-scripts=/usr/bin" + fi + _POST_NEON_PIP_INSTALL_ARGS="" + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 + sed -i 's:/usr/bin:/usr/local/bin:g' pkg/*.service + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + # shellcheck disable=SC2086 + "${_PYEXE}" setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 + else + # shellcheck disable=SC2086 + "${_PYEXE}" setup.py ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 + fi +} + +install_debian_7_git() { + install_debian_git || return 1 + return 0 +} + +install_debian_8_git() { + install_debian_git || return 1 + return 0 +} + +install_debian_9_git() { + install_debian_git || return 1 + return 0 +} + +install_debian_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ "$fname" = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ "$fname" = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ "$fname" = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ "$fname" = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # Configure SystemD for Debian 8 "Jessie" and later + if [ -f /bin/systemctl ]; then + if [ ! -f /lib/systemd/system/salt-${fname}.service ] || \ + { [ -f /lib/systemd/system/salt-${fname}.service ] && [ $_FORCE_OVERWRITE -eq $BS_TRUE ]; }; then + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" /lib/systemd/system + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.environment" "/etc/default/salt-${fname}" + else + # workaround before adding Debian-specific unit files to the Salt main repo + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" /lib/systemd/system + sed -i -e '/^Type/ s/notify/simple/' /lib/systemd/system/salt-${fname}.service + fi + fi + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ "$fname" = "api" ] && continue + + /bin/systemctl enable "salt-${fname}.service" + SYSTEMD_RELOAD=$BS_TRUE + + # Install initscripts for Debian 7 "Wheezy" + elif [ ! -f "/etc/init.d/salt-$fname" ] || \ + { [ -f "/etc/init.d/salt-$fname" ] && [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; }; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/deb/salt-${fname}.init" "/etc/init.d/salt-${fname}" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/deb/salt-${fname}.environment" "/etc/default/salt-${fname}" + + if [ ! -f "/etc/init.d/salt-${fname}" ]; then + echowarn "The init script for salt-${fname} was not found, skipping it..." + continue + fi + + chmod +x "/etc/init.d/salt-${fname}" + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ "$fname" = "api" ] && continue + + update-rc.d "salt-${fname}" defaults + fi + done +} + +install_debian_restart_daemons() { + [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + # Debian 8 uses systemd + /bin/systemctl stop salt-$fname > /dev/null 2>&1 + /bin/systemctl start salt-$fname.service && continue + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + elif [ -f /etc/init.d/salt-$fname ]; then + # Still in SysV init + /etc/init.d/salt-$fname stop > /dev/null 2>&1 + /etc/init.d/salt-$fname start + fi + done +} + +install_debian_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + __check_services_systemd salt-$fname || return 1 + elif [ -f /etc/init.d/salt-$fname ]; then + __check_services_debian salt-$fname || return 1 + fi + done + return 0 +} +# +# Ended Debian Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Fedora Install Functions +# + +install_fedora_deps() { + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + dnf -y update || return 1 + fi + + __PACKAGES="${__PACKAGES:=}" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -lt 3 ]; then + echoerror "There are no Python 2 stable packages for Fedora, only Py3 packages" + return 1 + fi + + # Salt on Fedora is Py3 + PY_PKG_VER=3 + + __PACKAGES="${__PACKAGES} dnf-utils libyaml procps-ng python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests python${PY_PKG_VER}-zmq" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-pip python${PY_PKG_VER}-m2crypto python${PY_PKG_VER}-pyyaml" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-systemd" + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + fi + + # shellcheck disable=SC2086 + __dnf_install_noinput ${__PACKAGES} ${_EXTRA_PACKAGES} || return 1 + + return 0 +} + +install_fedora_stable() { + if [ "$STABLE_REV" = "latest" ]; then + __SALT_VERSION="" + else + __SALT_VERSION="$(dnf list --showduplicates salt | grep "$STABLE_REV" | head -n 1 | awk '{print $2}')" + if [ "x${__SALT_VERSION}" = "x" ]; then + echoerror "Could not find a stable install for Salt ${STABLE_REV}" + exit 1 + fi + echoinfo "Installing Stable Package Version ${__SALT_VERSION}" + __SALT_VERSION="-${__SALT_VERSION}" + fi + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud${__SALT_VERSION}" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master${__SALT_VERSION}" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion${__SALT_VERSION}" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic${__SALT_VERSION}" + fi + + # shellcheck disable=SC2086 + __dnf_install_noinput ${__PACKAGES} || return 1 + + __python="python3" + if ! __check_command_exists python3; then + echoerror "Could not find a python3 binary?!" + return 1 + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + __check_pip_allowed "You need to allow pip based installations (-P) for Tornado <5.0 in order to install Salt" + __installed_tornado_rpm=$(rpm -qa | grep python${PY_PKG_VER}-tornado) + if [ -n "${__installed_tornado_rpm}" ]; then + echodebug "Removing system package ${__installed_tornado_rpm}" + rpm -e --nodeps "${__installed_tornado_rpm}" || return 1 + fi + __get_site_packages_dir_code=$(cat << EOM +import site +print([d for d in site.getsitepackages() if d.startswith('/usr/lib/python')][0]) +EOM +) + __target_path=$(${__python} -c "${__get_site_packages_dir_code}") + echodebug "Running '${__python}' -m pip install --target ${__target_path} 'tornado<5.0'" + "${__python}" -m pip install --target "${__target_path}" "tornado<5" || return 1 + fi + + return 0 +} + +install_fedora_stable_post() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 1 + systemctl daemon-reload + done +} + +install_fedora_git_deps() { + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + # Packages are named python3- + PY_PKG_VER=3 + else + PY_PKG_VER=2 + fi + + __PACKAGES="" + if ! __check_command_exists ps; then + __PACKAGES="${__PACKAGES} procps-ng" + fi + if ! __check_command_exists git; then + __PACKAGES="${__PACKAGES} git" + fi + + if [ -n "${__PACKAGES}" ]; then + # shellcheck disable=SC2086 + __dnf_install_noinput ${__PACKAGES} || return 1 + __PACKAGES="" + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __PACKAGES="${__PACKAGES} ca-certificates" + fi + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud python${PY_PKG_VER}-netaddr" + fi + + install_fedora_deps || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + if __check_command_exists python3; then + __python="python3" + fi + elif [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + if __check_command_exists python2; then + __python="python2" + fi + else + if ! __check_command_exists python; then + echoerror "Unable to find a python binary?!" + return 1 + fi + # Let's hope it's the right one + __python="python" + fi + + grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" | while IFS=' + ' read -r dep; do + echodebug "Running '${__python}' -m pip install '${dep}'" + "${__python}" -m pip install "${dep}" || return 1 + done + else + __PACKAGES="python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + # shellcheck disable=SC2086 + __dnf_install_noinput ${__PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_fedora_git() { + if [ "${_PY_EXE}" != "" ]; then + _PYEXE=${_PY_EXE} + echoinfo "Using the following python version: ${_PY_EXE} to install salt" + else + _PYEXE='python2' + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + ${_PYEXE} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + else + ${_PYEXE} setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + fi + return 0 +} + +install_fedora_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 1 + systemctl daemon-reload + done +} + +install_fedora_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + systemctl stop salt-$fname > /dev/null 2>&1 + systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + done +} + +install_fedora_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_systemd salt-$fname || return 1 + done + + return 0 +} +# +# Ended Fedora Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# CentOS Install Functions +# +__install_epel_repository() { + if [ ${_EPEL_REPOS_INSTALLED} -eq $BS_TRUE ]; then + return 0 + fi + + # Check if epel repo is already enabled and flag it accordingly + if yum repolist | grep -q "^[!]\\?${_EPEL_REPO}/"; then + _EPEL_REPOS_INSTALLED=$BS_TRUE + return 0 + fi + + # Download latest 'epel-release' package for the distro version directly + epel_repo_url="${HTTP_VAL}://dl.fedoraproject.org/pub/epel/epel-release-latest-${DISTRO_MAJOR_VERSION}.noarch.rpm" + rpm -Uvh --force "$epel_repo_url" || return 1 + + _EPEL_REPOS_INSTALLED=$BS_TRUE + + return 0 +} + +__install_saltstack_rhel_repository() { + if [ "$ITYPE" = "stable" ]; then + repo_rev="$STABLE_REV" + else + repo_rev="latest" + fi + + __PY_VERSION_REPO="yum" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + # Avoid using '$releasever' variable for yum. + # Instead, this should work correctly on all RHEL variants. + base_url="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/\$basearch/${repo_rev}/" + gpg_key="SALTSTACK-GPG-KEY.pub" + repo_file="/etc/yum.repos.d/saltstack.repo" + + if [ ! -s "$repo_file" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then + cat <<_eof > "$repo_file" +[saltstack] +name=SaltStack ${repo_rev} Release Channel for RHEL/CentOS \$releasever +baseurl=${base_url} +skip_if_unavailable=True +gpgcheck=1 +gpgkey=${base_url}${gpg_key} +enabled=1 +enabled_metadata=1 +_eof + + fetch_url="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${repo_rev}/" + __rpm_import_gpg "${fetch_url}${gpg_key}" || return 1 + yum clean metadata || return 1 + elif [ "$repo_rev" != "latest" ]; then + echowarn "saltstack.repo already exists, ignoring salt version argument." + echowarn "Use -F (forced overwrite) to install $repo_rev." + fi + + return 0 +} + +install_centos_stable_deps() { + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + yum -y update || return 1 + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + # CentOS/RHEL 8 Default to Py3 + if [ "x${_PY_EXE}" = "x" ]; then + _PY_EXE=python3 + _PY_MAJOR_VERSION=3 + fi + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_TRUE" ] && [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + echowarn "Detected -r or -R option while installing Salt packages for Python 3." + echowarn "Python 3 packages for older Salt releases requires the EPEL repository to be installed." + echowarn "Installing the EPEL repository automatically is disabled when using the -r or -R options." + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then + __install_epel_repository || return 1 + __install_saltstack_rhel_repository || return 1 + fi + + # If -R was passed, we need to configure custom repo url with rsync-ed packages + # Which is still handled in __install_saltstack_rhel_repository. This call has + # its own check in case -r was passed without -R. + if [ "$_CUSTOM_REPO_URL" != "null" ]; then + __install_saltstack_rhel_repository || return 1 + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + __PACKAGES="dnf-utils chkconfig" + else + __PACKAGES="yum-utils chkconfig" + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + # YAML module is used for generating custom master/minion configs + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PACKAGES="${__PACKAGES} python3-pyyaml" + else + __PACKAGES="${__PACKAGES} python2-pyyaml" + fi + elif [ "$DISTRO_MAJOR_VERSION" -eq 7 ]; then + # YAML module is used for generating custom master/minion configs + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PACKAGES="${__PACKAGES} python36-PyYAML" + else + __PACKAGES="${__PACKAGES} PyYAML" + fi + else + # YAML module is used for generating custom master/minion configs + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PACKAGES="${__PACKAGES} python34-PyYAML" + else + __PACKAGES="${__PACKAGES} PyYAML" + fi + fi + fi + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + + return 0 +} + +install_centos_stable() { + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_centos_stable_post() { + SYSTEMD_RELOAD=$BS_FALSE + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + /bin/systemctl is-enabled salt-${fname}.service > /dev/null 2>&1 || ( + /bin/systemctl preset salt-${fname}.service > /dev/null 2>&1 && + /bin/systemctl enable salt-${fname}.service > /dev/null 2>&1 + ) + + SYSTEMD_RELOAD=$BS_TRUE + elif [ -f "/etc/init.d/salt-${fname}" ]; then + /sbin/chkconfig salt-${fname} on + fi + done + + if [ "$SYSTEMD_RELOAD" -eq $BS_TRUE ]; then + /bin/systemctl daemon-reload + fi + + return 0 +} + +install_centos_git_deps() { + install_centos_stable_deps || return 1 + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __yum_install_noinput ca-certificates || return 1 + fi + + if ! __check_command_exists git; then + __yum_install_noinput git || return 1 + fi + + __git_clone_and_checkout || return 1 + + __PACKAGES="" + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + # Packages are named python3- + PY_PKG_VER=3 + __PACKAGES="${__PACKAGES} python3" + else + # Packages are named python36- + PY_PKG_VER=36 + __PACKAGES="${__PACKAGES} python36" + fi + else + PY_PKG_VER="" + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + __PACKAGES="${__PACKAGES} python2" + elif [ "$DISTRO_MAJOR_VERSION" -eq 6 ]; then + PY_PKG_VER=27 + __PACKAGES="${__PACKAGES} python27" + else + __PACKAGES="${__PACKAGES} python" + fi + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + _install_m2crypto_req=false + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + _py=${_PY_EXE} + if [ "$DISTRO_MAJOR_VERSION" -gt 6 ]; then + _install_m2crypto_req=true + fi + else + if [ "$DISTRO_MAJOR_VERSION" -eq 6 ]; then + _install_m2crypto_req=true + fi + _py="python" + + # Only Py2 needs python-futures + __PACKAGES="${__PACKAGES} python-futures" + + # There is no systemd-python3 package as of this writing + if [ "$DISTRO_MAJOR_VERSION" -ge 7 ]; then + __PACKAGES="${__PACKAGES} systemd-python" + fi + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + __install_tornado_pip ${_py} || return 1 + __PACKAGES="${__PACKAGES} python3-m2crypto" + else + __PACKAGES="${__PACKAGES} m2crypto python${PY_PKG_VER}-crypto" + fi + + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado python${PY_PKG_VER}-zmq" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud" + fi + + if [ "${_INSTALL_PY}" -eq "${BS_TRUE}" ]; then + # Install Python if "-y" was passed in. + __install_python || return 1 + fi + + if [ "${_PY_EXE}" != "" ] && [ "$_PIP_ALLOWED" -eq "$BS_TRUE" ]; then + # If "-x" is defined, install dependencies with pip based on the Python version given. + _PIP_PACKAGES="m2crypto!=0.33.0 jinja2 msgpack-python pycrypto PyYAML tornado<5.0 zmq futures>=2.0" + + # install swig and openssl on cent6 + if $_install_m2crypto_req; then + __yum_install_noinput openssl-devel swig || return 1 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # Filter out any commented lines from the requirements file + _REQ_LINES="$(grep '^[^#]' "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + for SINGLE_PACKAGE in ${_PIP_PACKAGES}; do + __REQUIRED_VERSION="$(grep "${SINGLE_PACKAGE}" "${_REQ_LINES}")" + if [ "${__REQUIRED_VERSION}" != "" ]; then + _PIP_PACKAGES=$(echo "$_PIP_PACKAGES" | sed "s/${SINGLE_PACKAGE}/${__REQUIRED_VERSION}/") + fi + done + fi + + if [ "$_INSTALL_CLOUD" -eq "${BS_TRUE}" ]; then + _PIP_PACKAGES="${_PIP_PACKAGES} apache-libcloud" + fi + + __install_pip_pkgs "${_PIP_PACKAGES}" "${_PY_EXE}" || return 1 + else + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + else + if [ "${_INSTALL_PY}" -eq "${BS_TRUE}" ] && [ "$DISTRO_MAJOR_VERSION" -lt 8 ]; then + # Install Python if "-y" was passed in. + __install_python || return 1 + fi + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_centos_git() { + if [ "${_PY_EXE}" != "" ]; then + _PYEXE=${_PY_EXE} + echoinfo "Using the following python version: ${_PY_EXE} to install salt" + else + _PYEXE='python2' + fi + + echodebug "_PY_EXE: $_PY_EXE" + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + $_PYEXE setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + else + $_PYEXE setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + fi + + return 0 +} + +install_centos_git_post() { + SYSTEMD_RELOAD=$BS_FALSE + + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + if [ ! -f "/usr/lib/systemd/system/salt-${fname}.service" ] || \ + { [ -f "/usr/lib/systemd/system/salt-${fname}.service" ] && [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; }; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" /usr/lib/systemd/system + fi + + SYSTEMD_RELOAD=$BS_TRUE + elif [ ! -f "/etc/init.d/salt-$fname" ] || \ + { [ -f "/etc/init.d/salt-$fname" ] && [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; }; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}" /etc/init.d + chmod +x /etc/init.d/salt-${fname} + fi + done + + if [ "$SYSTEMD_RELOAD" -eq $BS_TRUE ]; then + /bin/systemctl daemon-reload + fi + + install_centos_stable_post || return 1 + + return 0 +} + +install_centos_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /sbin/initctl ] && [ -f /etc/init/salt-${fname}.conf ]; then + # We have upstart support and upstart knows about our service + if ! /sbin/initctl status salt-$fname > /dev/null 2>&1; then + # Everything is in place and upstart gave us an error code? Fail! + return 1 + fi + + # upstart knows about this service. + # Let's try to stop it, and then start it + /sbin/initctl stop salt-$fname > /dev/null 2>&1 + # Restart service + if ! /sbin/initctl start salt-$fname > /dev/null 2>&1; then + # Failed the restart?! + return 1 + fi + elif [ -f /etc/init.d/salt-$fname ]; then + # Disable stdin to fix shell session hang on killing tee pipe + service salt-$fname stop < /dev/null > /dev/null 2>&1 + service salt-$fname start < /dev/null + elif [ -f /usr/bin/systemctl ]; then + # CentOS 7 uses systemd + /usr/bin/systemctl stop salt-$fname > /dev/null 2>&1 + /usr/bin/systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + fi + done +} + +install_centos_testing_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_centos_testing() { + install_centos_stable || return 1 + return 0 +} + +install_centos_testing_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_centos_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /sbin/initctl ] && [ -f /etc/init/salt-${fname}.conf ]; then + __check_services_upstart salt-$fname || return 1 + elif [ -f /etc/init.d/salt-$fname ]; then + __check_services_sysvinit salt-$fname || return 1 + elif [ -f /usr/bin/systemctl ]; then + __check_services_systemd salt-$fname || return 1 + fi + done + + return 0 +} +# +# Ended CentOS Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# RedHat Install Functions +# +install_red_hat_linux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_red_hat_linux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_red_hat_enterprise_linux_stable_deps() { + install_red_hat_linux_stable_deps || return 1 + return 0 +} + +install_red_hat_enterprise_linux_git_deps() { + install_red_hat_linux_git_deps || return 1 + return 0 +} + +install_red_hat_enterprise_server_stable_deps() { + install_red_hat_linux_stable_deps || return 1 + return 0 +} + +install_red_hat_enterprise_server_git_deps() { + install_red_hat_linux_git_deps || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_stable_deps() { + install_red_hat_linux_stable_deps || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_git_deps() { + install_red_hat_linux_git_deps || return 1 + return 0 +} + +install_red_hat_linux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_red_hat_linux_git() { + install_centos_git || return 1 + return 0 +} + +install_red_hat_enterprise_linux_stable() { + install_red_hat_linux_stable || return 1 + return 0 +} + +install_red_hat_enterprise_linux_git() { + install_red_hat_linux_git || return 1 + return 0 +} + +install_red_hat_enterprise_server_stable() { + install_red_hat_linux_stable || return 1 + return 0 +} + +install_red_hat_enterprise_server_git() { + install_red_hat_linux_git || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_stable() { + install_red_hat_linux_stable || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_git() { + install_red_hat_linux_git || return 1 + return 0 +} + +install_red_hat_linux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_red_hat_linux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_red_hat_linux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_red_hat_enterprise_linux_stable_post() { + install_red_hat_linux_stable_post || return 1 + return 0 +} + +install_red_hat_enterprise_linux_restart_daemons() { + install_red_hat_linux_restart_daemons || return 1 + return 0 +} + +install_red_hat_enterprise_linux_git_post() { + install_red_hat_linux_git_post || return 1 + return 0 +} + +install_red_hat_enterprise_server_stable_post() { + install_red_hat_linux_stable_post || return 1 + return 0 +} + +install_red_hat_enterprise_server_restart_daemons() { + install_red_hat_linux_restart_daemons || return 1 + return 0 +} + +install_red_hat_enterprise_server_git_post() { + install_red_hat_linux_git_post || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_stable_post() { + install_red_hat_linux_stable_post || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_restart_daemons() { + install_red_hat_linux_restart_daemons || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_git_post() { + install_red_hat_linux_git_post || return 1 + return 0 +} + +install_red_hat_linux_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_red_hat_linux_testing() { + install_centos_testing || return 1 + return 0 +} + +install_red_hat_linux_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_red_hat_enterprise_server_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_red_hat_enterprise_server_testing() { + install_centos_testing || return 1 + return 0 +} + +install_red_hat_enterprise_server_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_testing() { + install_centos_testing || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_testing_post() { + install_centos_testing_post || return 1 + return 0 +} +# +# Ended RedHat Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Oracle Linux Install Functions +# +install_oracle_linux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_oracle_linux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_oracle_linux_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_oracle_linux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_oracle_linux_git() { + install_centos_git || return 1 + return 0 +} + +install_oracle_linux_testing() { + install_centos_testing || return 1 + return 0 +} + +install_oracle_linux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_oracle_linux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_oracle_linux_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_oracle_linux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_oracle_linux_check_services() { + install_centos_check_services || return 1 + return 0 +} +# +# Ended Oracle Linux Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Scientific Linux Install Functions +# +install_scientific_linux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_scientific_linux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_scientific_linux_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_scientific_linux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_scientific_linux_git() { + install_centos_git || return 1 + return 0 +} + +install_scientific_linux_testing() { + install_centos_testing || return 1 + return 0 +} + +install_scientific_linux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_scientific_linux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_scientific_linux_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_scientific_linux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_scientific_linux_check_services() { + install_centos_check_services || return 1 + return 0 +} +# +# Ended Scientific Linux Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# CloudLinux Install Functions +# +install_cloud_linux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_cloud_linux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_cloud_linux_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_cloud_linux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_cloud_linux_git() { + install_centos_git || return 1 + return 0 +} + +install_cloud_linux_testing() { + install_centos_testing || return 1 + return 0 +} + +install_cloud_linux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_cloud_linux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_cloud_linux_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_cloud_linux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_cloud_linux_check_services() { + install_centos_check_services || return 1 + return 0 +} +# +# End of CloudLinux Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Alpine Linux Install Functions +# +install_alpine_linux_stable_deps() { + if ! grep -q '^[^#].\+alpine/.\+/community' /etc/apk/repositories; then + # Add community repository entry based on the "main" repo URL + __REPO=$(grep '^[^#].\+alpine/.\+/main\>' /etc/apk/repositories) + echo "${__REPO}" | sed -e 's/main/community/' >> /etc/apk/repositories + fi + + apk update + + # Get latest root CA certs + apk -U add ca-certificates + + if ! __check_command_exists openssl; then + # Install OpenSSL to be able to pull from https:// URLs + apk -U add openssl + fi +} + +install_alpine_linux_git_deps() { + install_alpine_linux_stable_deps || return 1 + + if ! __check_command_exists git; then + apk -U add git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + apk -U add python2 py-virtualenv py2-crypto py2-m2crypto py2-setuptools \ + py2-jinja2 py2-yaml py2-markupsafe py2-msgpack py2-psutil \ + py2-zmq zeromq py2-requests || return 1 + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + apk -U add py2-tornado || return 1 + fi + fi + else + apk -U add python2 py2-pip py2-setuptools || return 1 + _PY_EXE=python2 + return 0 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi +} + +install_alpine_linux_stable() { + __PACKAGES="salt" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + apk -U add ${__PACKAGES} || return 1 + return 0 +} + +install_alpine_linux_git() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + python2 setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install || return 1 + else + python2 setup.py ${SETUP_PY_INSTALL_ARGS} install || return 1 + fi +} + +install_alpine_linux_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /sbin/rc-update ]; then + script_url="${_SALTSTACK_REPO_URL%.git}/raw/master/pkg/alpine/salt-$fname" + [ -f "/etc/init.d/salt-$fname" ] || __fetch_url "/etc/init.d/salt-$fname" "$script_url" + + # shellcheck disable=SC2181 + if [ $? -eq 0 ]; then + chmod +x "/etc/init.d/salt-$fname" + else + echoerror "Failed to get OpenRC init script for $OS_NAME from $script_url." + return 1 + fi + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + /sbin/rc-update add "salt-$fname" > /dev/null 2>&1 || return 1 + fi + done +} + +install_alpine_linux_restart_daemons() { + [ "${_START_DAEMONS}" -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # Disable stdin to fix shell session hang on killing tee pipe + /sbin/rc-service salt-$fname stop < /dev/null > /dev/null 2>&1 + /sbin/rc-service salt-$fname start < /dev/null || return 1 + done +} + +install_alpine_linux_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_alpine salt-$fname || return 1 + done + + return 0 +} + +daemons_running_alpine_linux() { + [ "${_START_DAEMONS}" -eq $BS_FALSE ] && return + + FAILED_DAEMONS=0 + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # shellcheck disable=SC2009 + if [ "$(ps wwwaux | grep -v grep | grep salt-$fname)" = "" ]; then + echoerror "salt-$fname was not found running" + FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) + fi + done + + return $FAILED_DAEMONS +} + +# +# Ended Alpine Linux Install Functions +# +####################################################################################################################### + + +####################################################################################################################### +# +# Amazon Linux AMI Install Functions +# + +install_amazon_linux_ami_deps() { + # Shim to figure out if we're using old (rhel) or new (aws) rpms. + _USEAWS=$BS_FALSE + pkg_append="python" + + if [ "$ITYPE" = "stable" ]; then + repo_rev="$STABLE_REV" + else + repo_rev="latest" + fi + + if echo $repo_rev | grep -E -q '^archive'; then + year=$(echo "$repo_rev" | cut -d '/' -f 2 | cut -c1-4) + else + year=$(echo "$repo_rev" | cut -c1-4) + fi + + if echo "$repo_rev" | grep -E -q '^(latest|2016\.11)$' || \ + [ "$year" -gt 2016 ]; then + _USEAWS=$BS_TRUE + pkg_append="python27" + fi + + # We need to install yum-utils before doing anything else when installing on + # Amazon Linux ECS-optimized images. See issue #974. + __yum_install_noinput yum-utils + + # Do upgrade early + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + yum -y update || return 1 + fi + + if [ $_DISABLE_REPOS -eq $BS_FALSE ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __REPO_FILENAME="saltstack-repo.repo" + + # Set a few vars to make life easier. + if [ $_USEAWS -eq $BS_TRUE ]; then + base_url="$HTTP_VAL://${_REPO_URL}/yum/amazon/latest/\$basearch/$repo_rev/" + gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" + repo_name="SaltStack repo for Amazon Linux" + else + base_url="$HTTP_VAL://${_REPO_URL}/yum/redhat/6/\$basearch/$repo_rev/" + gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" + repo_name="SaltStack repo for RHEL/CentOS 6" + fi + + # This should prob be refactored to use __install_saltstack_rhel_repository() + # With args passed in to do the right thing. Reformatted to be more like the + # amazon linux yum file. + if [ ! -s "/etc/yum.repos.d/${__REPO_FILENAME}" ]; then + cat <<_eof > "/etc/yum.repos.d/${__REPO_FILENAME}" +[saltstack-repo] +name=$repo_name +failovermethod=priority +priority=10 +gpgcheck=1 +gpgkey=$gpg_key +baseurl=$base_url +_eof + fi + + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + # Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64 + # which is already installed + __PACKAGES="m2crypto ${pkg_append}-crypto ${pkg_append}-jinja2 ${pkg_append}-PyYAML" + __PACKAGES="${__PACKAGES} ${pkg_append}-msgpack ${pkg_append}-requests ${pkg_append}-zmq" + __PACKAGES="${__PACKAGES} ${pkg_append}-futures" + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi +} + +install_amazon_linux_ami_git_deps() { + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + yum -y install ca-certificates || return 1 + fi + + PIP_EXE='pip' + if __check_command_exists python2.7; then + if ! __check_command_exists pip2.7; then + if ! __check_command_exists easy_install-2.7; then + __yum_install_noinput python27-setuptools + fi + /usr/bin/easy_install-2.7 pip || return 1 + fi + PIP_EXE='/usr/local/bin/pip2.7' + _PY_EXE='python2.7' + fi + + install_amazon_linux_ami_deps || return 1 + + if ! __check_command_exists git; then + __yum_install_noinput git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + __PACKAGES="" + __PIP_PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __check_pip_allowed "You need to allow pip based installations (-P) in order to install apache-libcloud" + __PACKAGES="${__PACKAGES} python27-pip" + __PIP_PACKAGES="${__PIP_PACKAGES} apache-libcloud>=$_LIBCLOUD_MIN_VERSION" + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + __PACKAGES="${__PACKAGES} ${pkg_append}-tornado" + fi + fi + + if [ "${__PACKAGES}" != "" ]; then + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if [ "${__PIP_PACKAGES}" != "" ]; then + # shellcheck disable=SC2086 + ${PIP_EXE} install ${__PIP_PACKAGES} || return 1 + fi + else + __PACKAGES="python27-pip python27-setuptools python27-devel gcc" + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_amazon_linux_ami_2_git_deps() { + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + yum -y install ca-certificates || return 1 + fi + + install_amazon_linux_ami_2_deps || return 1 + + if [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + PY_PKG_VER=2 + PIP_EXE='/bin/pip' + else + PY_PKG_VER=3 + PIP_EXE='/bin/pip3' + fi + __PACKAGES="python${PY_PKG_VER}-pip" + + if ! __check_command_exists "${PIP_EXE}"; then + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if ! __check_command_exists git; then + __yum_install_noinput git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + __PACKAGES="" + __PIP_PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq "$BS_TRUE" ]; then + __check_pip_allowed "You need to allow pip based installations (-P) in order to install apache-libcloud" + if [ "$PARSED_VERSION" -eq "2" ]; then + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq "3" ]; then + __PACKAGES="${__PACKAGES} python3-pip" + __PIP_PACKAGES="${__PIP_PACKAGES} tornado<$_TORNADO_MAX_PY3_VERSION" + else + __PACKAGES="${__PACKAGES} python2-pip" + fi + else + __PACKAGES="${__PACKAGES} python27-pip" + fi + __PIP_PACKAGES="${__PIP_PACKAGES} apache-libcloud>=$_LIBCLOUD_MIN_VERSION" + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq "3" ]; then + __PIP_PACKAGES="${__PIP_PACKAGES} tornado<$_TORNADO_MAX_PY3_VERSION" + else + __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-tornado" + fi + fi + fi + + if [ "${__PIP_PACKAGES}" != "" ]; then + __check_pip_allowed "You need to allow pip based installations (-P) in order to install ${__PIP_PACKAGES}" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-pip" + fi + + if [ "${__PACKAGES}" != "" ]; then + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if [ "${__PIP_PACKAGES}" != "" ]; then + # shellcheck disable=SC2086 + ${PIP_EXE} install ${__PIP_PACKAGES} || return 1 + fi + else + __PACKAGES="python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools python${PY_PKG_VER}-devel gcc" + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_amazon_linux_ami_2_deps() { + # Shim to figure out if we're using old (rhel) or new (aws) rpms. + _USEAWS=$BS_FALSE + pkg_append="python" + + if [ "$ITYPE" = "stable" ]; then + repo_rev="$STABLE_REV" + else + repo_rev="latest" + fi + + if echo $repo_rev | grep -E -q '^archive'; then + year=$(echo "$repo_rev" | cut -d '/' -f 2 | cut -c1-4) + else + year=$(echo "$repo_rev" | cut -c1-4) + fi + + if echo "$repo_rev" | grep -E -q '^(latest|2016\.11)$' || \ + [ "$year" -gt 2016 ]; then + _USEAWS=$BS_TRUE + pkg_append="python" + fi + + # We need to install yum-utils before doing anything else when installing on + # Amazon Linux ECS-optimized images. See issue #974. + __yum_install_noinput yum-utils + + # Do upgrade early + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + yum -y update || return 1 + fi + + if [ $_DISABLE_REPOS -eq $BS_FALSE ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __REPO_FILENAME="saltstack-repo.repo" + __PY_VERSION_REPO="yum" + PY_PKG_VER="" + repo_label="saltstack-repo" + repo_name="SaltStack repo for Amazon Linux 2" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __REPO_FILENAME="saltstack-py3-repo.repo" + __PY_VERSION_REPO="py3" + PY_PKG_VER=3 + repo_label="saltstack-py3-repo" + repo_name="SaltStack Python 3 repo for Amazon Linux 2" + fi + + base_url="$HTTP_VAL://${_REPO_URL}/${__PY_VERSION_REPO}/amazon/2/\$basearch/$repo_rev/" + gpg_key="${base_url}SALTSTACK-GPG-KEY.pub,${base_url}base/RPM-GPG-KEY-CentOS-7" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" + fi + + # This should prob be refactored to use __install_saltstack_rhel_repository() + # With args passed in to do the right thing. Reformatted to be more like the + # amazon linux yum file. + if [ ! -s "/etc/yum.repos.d/${__REPO_FILENAME}" ]; then + cat <<_eof > "/etc/yum.repos.d/${__REPO_FILENAME}" +[$repo_label] +name=$repo_name +failovermethod=priority +priority=10 +gpgcheck=1 +gpgkey=$gpg_key +baseurl=$base_url +_eof + fi + + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + # Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64 + # which is already installed + if [ -n "${PY_PKG_VER}" ] && [ "${PY_PKG_VER}" -eq 3 ]; then + __PACKAGES="${pkg_append}${PY_PKG_VER}-m2crypto ${pkg_append}${PY_PKG_VER}-pyyaml" + else + __PACKAGES="m2crypto PyYAML ${pkg_append}-futures" + fi + + __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-crypto ${pkg_append}${PY_PKG_VER}-jinja2 procps-ng" + __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-msgpack ${pkg_append}${PY_PKG_VER}-requests ${pkg_append}${PY_PKG_VER}-zmq" + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi +} + +install_amazon_linux_ami_stable() { + install_centos_stable || return 1 + return 0 +} + +install_amazon_linux_ami_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_amazon_linux_ami_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_amazon_linux_ami_git() { + install_centos_git || return 1 + return 0 +} + +install_amazon_linux_ami_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_amazon_linux_ami_testing() { + install_centos_testing || return 1 + return 0 +} + +install_amazon_linux_ami_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_amazon_linux_ami_2_stable() { + install_centos_stable || return 1 + return 0 +} + +install_amazon_linux_ami_2_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_amazon_linux_ami_2_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_amazon_linux_ami_2_git() { + install_centos_git || return 1 + return 0 +} + +install_amazon_linux_ami_2_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_amazon_linux_ami_2_testing() { + install_centos_testing || return 1 + return 0 +} + +install_amazon_linux_ami_2_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_amazon_linux_ami_2_check_services() { + install_centos_check_services || return 1 + return 0 +} + +# +# Ended Amazon Linux AMI Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Arch Install Functions +# +install_arch_linux_stable_deps() { + if [ ! -f /etc/pacman.d/gnupg ]; then + pacman-key --init && pacman-key --populate archlinux || return 1 + fi + + # Pacman does not resolve dependencies on outdated versions + # They always need to be updated + pacman -Syy --noconfirm + + pacman -S --noconfirm --needed archlinux-keyring || return 1 + + pacman -Su --noconfirm --needed pacman || return 1 + + if __check_command_exists pacman-db-upgrade; then + pacman-db-upgrade || return 1 + fi + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + PY_PKG_VER=2 + else + PY_PKG_VER="" + fi + + # YAML module is used for generating custom master/minion configs + # shellcheck disable=SC2086 + pacman -Su --noconfirm --needed python${PY_PKG_VER}-yaml + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # shellcheck disable=SC2086 + pacman -Su --noconfirm --needed python${PY_PKG_VER}-apache-libcloud || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + pacman -Su --noconfirm --needed ${_EXTRA_PACKAGES} || return 1 + fi +} + +install_arch_linux_git_deps() { + install_arch_linux_stable_deps + + # Don't fail if un-installing python2-distribute threw an error + if ! __check_command_exists git; then + pacman -Sy --noconfirm --needed git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + pacman -R --noconfirm python2-distribute + pacman -Su --noconfirm --needed python2-crypto python2-setuptools python2-jinja \ + python2-m2crypto python2-futures python2-markupsafe python2-msgpack python2-psutil \ + python2-pyzmq zeromq python2-requests python2-systemd || return 1 + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + pacman -Su --noconfirm --needed python2-tornado + fi + fi + else + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + PY_PKG_VER=2 + else + PY_PKG_VER="" + fi + __PACKAGES="python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + # shellcheck disable=SC2086 + pacman -Su --noconfirm --needed ${__PACKAGES} + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_arch_linux_stable() { + # Pacman does not resolve dependencies on outdated versions + # They always need to be updated + pacman -Syy --noconfirm + + pacman -Su --noconfirm --needed pacman || return 1 + # See https://mailman.archlinux.org/pipermail/arch-dev-public/2013-June/025043.html + # to know why we're ignoring below. + pacman -Syu --noconfirm --ignore filesystem,bash || return 1 + pacman -S --noconfirm --needed bash || return 1 + pacman -Su --noconfirm || return 1 + # We can now resume regular salt update + pacman -Syu --noconfirm salt python2-futures || return 1 + return 0 +} + +install_arch_linux_git() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + python2 setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install || return 1 + else + python2 setup.py ${SETUP_PY_INSTALL_ARGS} install || return 1 + fi + return 0 +} + +install_arch_linux_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # Since Arch's pacman renames configuration files + if [ "$_TEMP_CONFIG_DIR" != "null" ] && [ -f "$_SALT_ETC_DIR/$fname.pacorig" ]; then + # Since a configuration directory was provided, it also means that any + # configuration file copied was renamed by Arch, see: + # https://wiki.archlinux.org/index.php/Pacnew_and_Pacsave_Files#.pacorig + __copyfile "$_SALT_ETC_DIR/$fname.pacorig" "$_SALT_ETC_DIR/$fname" $BS_TRUE + fi + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + if [ -f /usr/bin/systemctl ]; then + # Using systemd + /usr/bin/systemctl is-enabled salt-$fname.service > /dev/null 2>&1 || ( + /usr/bin/systemctl preset salt-$fname.service > /dev/null 2>&1 && + /usr/bin/systemctl enable salt-$fname.service > /dev/null 2>&1 + ) + sleep 1 + /usr/bin/systemctl daemon-reload + continue + fi + + # XXX: How do we enable old Arch init.d scripts? + done +} + +install_arch_linux_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /usr/bin/systemctl ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + /usr/bin/systemctl is-enabled salt-${fname}.service > /dev/null 2>&1 || ( + /usr/bin/systemctl preset salt-${fname}.service > /dev/null 2>&1 && + /usr/bin/systemctl enable salt-${fname}.service > /dev/null 2>&1 + ) + sleep 1 + /usr/bin/systemctl daemon-reload + continue + fi + + # SysV init!? + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-$fname" "/etc/rc.d/init.d/salt-$fname" + chmod +x /etc/rc.d/init.d/salt-$fname + done +} + +install_arch_linux_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /usr/bin/systemctl ]; then + /usr/bin/systemctl stop salt-$fname.service > /dev/null 2>&1 + /usr/bin/systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + fi + + /etc/rc.d/salt-$fname stop > /dev/null 2>&1 + /etc/rc.d/salt-$fname start + done +} + +install_arch_check_services() { + if [ ! -f /usr/bin/systemctl ]; then + # Not running systemd!? Don't check! + return 0 + fi + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_systemd salt-$fname || return 1 + done + + return 0 +} +# +# Ended Arch Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# FreeBSD Install Functions +# + +# Using a separate conf step to head for idempotent install... +__configure_freebsd_pkg_details() { + _SALT_ETC_DIR="/usr/local/etc/salt" +} + +install_freebsd_deps() { + __configure_freebsd_pkg_details + pkg install -y pkg +} + +install_freebsd_git_deps() { + install_freebsd_deps || return 1 + + if ! __check_command_exists git; then + /usr/local/sbin/pkg install -y git || return 1 + fi + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + SALT_DEPENDENCIES=$(/usr/local/sbin/pkg rquery %dn py37-salt) + # shellcheck disable=SC2086 + /usr/local/sbin/pkg install -y ${SALT_DEPENDENCIES} python || return 1 + + /usr/local/sbin/pkg install -y py37-requests || return 1 + else + /usr/local/sbin/pkg install -y python python-pip python-setuptools || return 1 + fi + + echodebug "Adapting paths to FreeBSD" + # The list of files was taken from Salt's BSD port Makefile + for file in doc/man/salt-key.1 doc/man/salt-cp.1 doc/man/salt-minion.1 \ + doc/man/salt-syndic.1 doc/man/salt-master.1 doc/man/salt-run.1 \ + doc/man/salt.7 doc/man/salt.1 doc/man/salt-call.1; do + [ ! -f $file ] && continue + echodebug "Patching ${file}" + sed -in -e "s|/etc/salt|${_SALT_ETC_DIR}|" \ + -e "s|/srv/salt|${_SALT_ETC_DIR}/states|" \ + -e "s|/srv/pillar|${_SALT_ETC_DIR}/pillar|" ${file} + done + if [ ! -f salt/syspaths.py ]; then + # We still can't provide the system paths, salt 0.16.x + # Let's patch salt's source and adapt paths to what's expected on FreeBSD + echodebug "Replacing occurrences of '/etc/salt' with ${_SALT_ETC_DIR}" + # The list of files was taken from Salt's BSD port Makefile + for file in conf/minion conf/master salt/config.py salt/client.py \ + salt/modules/mysql.py salt/utils/parsers.py salt/modules/tls.py \ + salt/modules/postgres.py salt/utils/migrations.py; do + [ ! -f $file ] && continue + echodebug "Patching ${file}" + sed -in -e "s|/etc/salt|${_SALT_ETC_DIR}|" \ + -e "s|/srv/salt|${_SALT_ETC_DIR}/states|" \ + -e "s|/srv/pillar|${_SALT_ETC_DIR}/pillar|" ${file} + done + fi + echodebug "Finished patching" + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + + fi + + return 0 +} + +install_freebsd_stable() { +# +# installing latest version of salt from FreeBSD CURRENT ports repo +# + # shellcheck disable=SC2086 + /usr/local/sbin/pkg install -y py37-salt || return 1 + + return 0 +} + +install_freebsd_git() { + + # /usr/local/bin/python3 in FreeBSD is a symlink to /usr/local/bin/python3.7 + __PYTHON_PATH=$(readlink -f "$(command -v python3)") + __ESCAPED_PYTHON_PATH=$(echo "${__PYTHON_PATH}" | sed 's/\//\\\//g') + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${__PYTHON_PATH}" || return 1 + return 0 + fi + + # Install from git + if [ ! -f salt/syspaths.py ]; then + # We still can't provide the system paths, salt 0.16.x + ${__PYTHON_PATH} setup.py ${SETUP_PY_INSTALL_ARGS} install || return 1 + else + ${__PYTHON_PATH} setup.py \ + --salt-root-dir=/ \ + --salt-config-dir="${_SALT_ETC_DIR}" \ + --salt-cache-dir="${_SALT_CACHE_DIR}" \ + --salt-sock-dir=/var/run/salt \ + --salt-srv-root-dir="${_SALT_ETC_DIR}" \ + --salt-base-file-roots-dir="${_SALT_ETC_DIR}/states" \ + --salt-base-pillar-roots-dir="${_SALT_ETC_DIR}/pillar" \ + --salt-base-master-roots-dir="${_SALT_ETC_DIR}/salt-master" \ + --salt-logs-dir=/var/log/salt \ + --salt-pidfile-dir=/var/run \ + ${SETUP_PY_INSTALL_ARGS} install \ + || return 1 + fi + + for script in salt_api salt_master salt_minion salt_proxy salt_syndic; do + __fetch_url "/usr/local/etc/rc.d/${script}" "https://raw.githubusercontent.com/freebsd/freebsd-ports/master/sysutils/py-salt/files/${script}.in" || return 1 + sed -i '' 's/%%PREFIX%%/\/usr\/local/g' /usr/local/etc/rc.d/${script} + sed -i '' "s/%%PYTHON_CMD%%/${__ESCAPED_PYTHON_PATH}/g" /usr/local/etc/rc.d/${script} + chmod +x /usr/local/etc/rc.d/${script} || return 1 + done + + # And we're good to go + return 0 +} + +install_freebsd_stable_post() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + enable_string="salt_${fname}_enable=YES" + grep "$enable_string" /etc/rc.conf >/dev/null 2>&1 + [ $? -eq 1 ] && sysrc $enable_string + + done +} + +install_freebsd_git_post() { + install_freebsd_stable_post || return 1 + return 0 +} + +install_freebsd_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + service salt_$fname stop > /dev/null 2>&1 + service salt_$fname start + done +} +# +# Ended FreeBSD Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# OpenBSD Install Functions +# + +install_openbsd_deps() { + if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then + OPENBSD_REPO='https://cdn.openbsd.org/pub/OpenBSD' + echoinfo "setting package repository to $OPENBSD_REPO" + echo "${OPENBSD_REPO}" >/etc/installurl || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + pkg_add -I -v ${_EXTRA_PACKAGES} || return 1 + fi + return 0 +} + +install_openbsd_git_deps() { + install_openbsd_deps || return 1 + + if ! __check_command_exists git; then + pkg_add -I -v git || return 1 + fi + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + pkg_add -I -v py-pip py-setuptools + fi + + # + # Let's trigger config_salt() + # + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_openbsd_git() { + # + # Install from git + # + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ ! -f salt/syspaths.py ]; then + # We still can't provide the system paths, salt 0.16.x + /usr/local/bin/python2.7 setup.py ${SETUP_PY_INSTALL_ARGS} install || return 1 + fi + return 0 +} + +install_openbsd_stable() { + pkg_add -r -I -v salt || return 1 + return 0 +} + +install_openbsd_post() { + for fname in api master minion syndic; do + [ $fname = "api" ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + rcctl enable salt_$fname + done + + return 0 +} + +install_openbsd_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && continue + + if [ -f /etc/rc.d/salt_${fname} ]; then + __check_services_openbsd salt_${fname} || return 1 + fi + done + + return 0 +} + +install_openbsd_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + rcctl restart salt_${fname} + done + + return 0 +} + +# +# Ended OpenBSD Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# SmartOS Install Functions +# +install_smartos_deps() { + smartos_deps="$(pkgin show-deps salt | grep '^\s' | grep -v '\snot' | xargs) py27-m2crypto" + pkgin -y install "${smartos_deps}" || return 1 + + # Set _SALT_ETC_DIR to SmartOS default if they didn't specify + _SALT_ETC_DIR=${BS_SALT_ETC_DIR:-/opt/local/etc/salt} + # We also need to redefine the PKI directory + _PKI_DIR=${_SALT_ETC_DIR}/pki + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + # Let's set the configuration directory to /tmp + _TEMP_CONFIG_DIR="/tmp" + CONFIG_SALT_FUNC="config_salt" + + # Let's download, since they were not provided, the default configuration files + if [ ! -f "$_SALT_ETC_DIR/minion" ] && [ ! -f "$_TEMP_CONFIG_DIR/minion" ]; then + # shellcheck disable=SC2086 + curl $_CURL_ARGS -s -o "$_TEMP_CONFIG_DIR/minion" -L \ + https://raw.githubusercontent.com/saltstack/salt/master/conf/minion || return 1 + fi + if [ ! -f "$_SALT_ETC_DIR/master" ] && [ ! -f $_TEMP_CONFIG_DIR/master ]; then + # shellcheck disable=SC2086 + curl $_CURL_ARGS -s -o "$_TEMP_CONFIG_DIR/master" -L \ + https://raw.githubusercontent.com/saltstack/salt/master/conf/master || return 1 + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + pkgin -y install py27-apache-libcloud || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + pkgin -y install ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_smartos_git_deps() { + install_smartos_deps || return 1 + + if ! __check_command_exists git; then + pkgin -y install git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # Install whichever tornado is in the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + __check_pip_allowed "You need to allow pip based installations (-P) in order to install the python package '${__REQUIRED_TORNADO}'" + + # Install whichever futures is in the requirements file + __REQUIRED_FUTURES="$(grep futures "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + __check_pip_allowed "You need to allow pip based installations (-P) in order to install the python package '${__REQUIRED_FUTURES}'" + + if [ "${__REQUIRED_TORNADO}" != "" ]; then + if ! __check_command_exists pip; then + pkgin -y install py27-pip + fi + pip install -U "${__REQUIRED_TORNADO}" + fi + + if [ "${__REQUIRED_FUTURES}" != "" ]; then + if ! __check_command_exists pip; then + pkgin -y install py27-pip + fi + pip install -U "${__REQUIRED_FUTURES}" + fi + fi + else + if ! __check_command_exists pip; then + pkgin -y install py27-pip + fi + pkgin -y install py27-setuptools + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_smartos_stable() { + pkgin -y install salt || return 1 + return 0 +} + +install_smartos_git() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + # Use setuptools in order to also install dependencies + # lets force our config path on the setup for now, since salt/syspaths.py only got fixed in 2015.5.0 + USE_SETUPTOOLS=1 /opt/local/bin/python setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install || return 1 + return 0 +} + +install_smartos_post() { + smf_dir="/opt/custom/smf" + + # Install manifest files if needed. + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + svcs network/salt-$fname > /dev/null 2>&1 + if [ $? -eq 1 ]; then + if [ ! -f "$_TEMP_CONFIG_DIR/salt-$fname.xml" ]; then + # shellcheck disable=SC2086 + curl $_CURL_ARGS -s -o "$_TEMP_CONFIG_DIR/salt-$fname.xml" -L \ + "https://raw.githubusercontent.com/saltstack/salt/master/pkg/smartos/salt-$fname.xml" + fi + svccfg import "$_TEMP_CONFIG_DIR/salt-$fname.xml" + if [ "${VIRTUAL_TYPE}" = "global" ]; then + if [ ! -d "$smf_dir" ]; then + mkdir -p "$smf_dir" || return 1 + fi + if [ ! -f "$smf_dir/salt-$fname.xml" ]; then + __copyfile "$_TEMP_CONFIG_DIR/salt-$fname.xml" "$smf_dir/" || return 1 + fi + fi + fi + done + + return 0 +} + +install_smartos_git_post() { + smf_dir="/opt/custom/smf" + + # Install manifest files if needed. + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + svcs "network/salt-$fname" > /dev/null 2>&1 + if [ $? -eq 1 ]; then + svccfg import "${_SALT_GIT_CHECKOUT_DIR}/pkg/smartos/salt-$fname.xml" + if [ "${VIRTUAL_TYPE}" = "global" ]; then + if [ ! -d $smf_dir ]; then + mkdir -p "$smf_dir" + fi + if [ ! -f "$smf_dir/salt-$fname.xml" ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/smartos/salt-$fname.xml" "$smf_dir/" + fi + fi + fi + done + + return 0 +} + +install_smartos_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # Stop if running && Start service + svcadm disable salt-$fname > /dev/null 2>&1 + svcadm enable salt-$fname + done + + return 0 +} +# +# Ended SmartOS Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# openSUSE Install Functions. +# +__ZYPPER_REQUIRES_REPLACE_FILES=-1 + +__set_suse_pkg_repo() { + + # Set distro repo variable + if [ "${DISTRO_MAJOR_VERSION}" -gt 2015 ]; then + DISTRO_REPO="openSUSE_Tumbleweed" + elif [ "${DISTRO_MAJOR_VERSION}" -ge 42 ] || [ "${DISTRO_MAJOR_VERSION}" -eq 15 ]; then + DISTRO_REPO="openSUSE_Leap_${DISTRO_MAJOR_VERSION}.${DISTRO_MINOR_VERSION}" + else + DISTRO_REPO="SLE_${DISTRO_MAJOR_VERSION}_SP${SUSE_PATCHLEVEL}" + fi + + if [ "$_DOWNSTREAM_PKG_REPO" -eq $BS_TRUE ]; then + suse_pkg_url_base="https://download.opensuse.org/repositories/systemsmanagement:/saltstack" + suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack.repo" + else + suse_pkg_url_base="${HTTP_VAL}://repo.saltstack.com/opensuse" + suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack:products.repo" + fi + SUSE_PKG_URL="$suse_pkg_url_base/$suse_pkg_url_path" +} + +__check_and_refresh_suse_pkg_repo() { + # Check to see if systemsmanagement_saltstack exists + __zypper repos | grep -q systemsmanagement_saltstack + + if [ $? -eq 1 ]; then + # zypper does not yet know anything about systemsmanagement_saltstack + __zypper addrepo --refresh "${SUSE_PKG_URL}" || return 1 + fi +} + +__version_lte() { + if ! __check_command_exists python; then + zypper --non-interactive install --replacefiles --auto-agree-with-licenses python || \ + zypper --non-interactive install --auto-agree-with-licenses python || return 1 + fi + + if [ "$(python -c 'import sys; V1=tuple([int(i) for i in sys.argv[1].split(".")]); V2=tuple([int(i) for i in sys.argv[2].split(".")]); print V1<=V2' "$1" "$2")" = "True" ]; then + __ZYPPER_REQUIRES_REPLACE_FILES=${BS_TRUE} + else + __ZYPPER_REQUIRES_REPLACE_FILES=${BS_FALSE} + fi +} + +__zypper() { + # Check if any zypper process is running before calling zypper again. + # This is useful when a zypper call is part of a boot process and will + # wait until the zypper process is finished, such as on AWS AMIs. + while pgrep -l zypper; do + sleep 1 + done + + zypper --non-interactive "${@}" + # Return codes between 100 and 104 are only informations, not errors + # https://en.opensuse.org/SDB:Zypper_manual#EXIT_CODES + if [ "$?" -gt "99" ] && [ "$?" -le "104" ]; then + return 0 + fi + return $? +} + +__zypper_install() { + if [ "${__ZYPPER_REQUIRES_REPLACE_FILES}" = "-1" ]; then + __version_lte "1.10.4" "$(zypper --version | awk '{ print $2 }')" + fi + if [ "${__ZYPPER_REQUIRES_REPLACE_FILES}" = "${BS_TRUE}" ]; then + # In case of file conflicts replace old files. + # Option present in zypper 1.10.4 and newer: + # https://github.com/openSUSE/zypper/blob/95655728d26d6d5aef7796b675f4cc69bc0c05c0/package/zypper.changes#L253 + __zypper install --auto-agree-with-licenses --replacefiles "${@}"; return $? + else + __zypper install --auto-agree-with-licenses "${@}"; return $? + fi +} + +__opensuse_prep_install() { + # DRY function for common installation preparatory steps for SUSE + if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then + # Is the repository already known + __set_suse_pkg_repo + # Check zypper repos and refresh if necessary + __check_and_refresh_suse_pkg_repo + fi + + __zypper --gpg-auto-import-keys refresh + + # shellcheck disable=SC2181 + if [ $? -ne 0 ] && [ $? -ne 4 ]; then + # If the exit code is not 0, and it's not 4 (failed to update a + # repository) return a failure. Otherwise continue. + return 1 + fi + + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + __zypper --gpg-auto-import-keys update || return 1 + fi +} + +install_opensuse_stable_deps() { + __opensuse_prep_install || return 1 + + if [ "$DISTRO_MAJOR_VERSION" -eq 12 ] && [ "$DISTRO_MINOR_VERSION" -eq 3 ]; then + # Because patterns-openSUSE-minimal_base-conflicts conflicts with python, lets remove the first one + __zypper remove patterns-openSUSE-minimal_base-conflicts + fi + + # YAML module is used for generating custom master/minion configs + # requests is still used by many salt modules + # Salt needs python-zypp installed in order to use the zypper module + __PACKAGES="python-PyYAML python-requests python-zypp" + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __zypper_install ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_opensuse_git_deps() { + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ] && ! __check_command_exists update-ca-certificates; then + __zypper_install ca-certificates || return 1 + fi + + install_opensuse_stable_deps || return 1 + + if ! __check_command_exists git; then + __zypper_install git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + __zypper_install patch || return 1 + + __PACKAGES="libzmq5 python-Jinja2 python-m2crypto python-msgpack-python python-pycrypto python-pyzmq python-xml python-futures" + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + __PACKAGES="${__PACKAGES} python-tornado" + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python-apache-libcloud" + fi + else + __PACKAGES="python-pip python-setuptools gcc" + fi + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_opensuse_stable() { + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __zypper_install $__PACKAGES || return 1 + + return 0 +} + +install_opensuse_git() { + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + python setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + return 0 +} + +install_opensuse_stable_post() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ] || [ -f /usr/bin/systemctl ]; then + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 1 + systemctl daemon-reload + continue + fi + + /sbin/chkconfig --add salt-$fname + /sbin/chkconfig salt-$fname on + done + + return 0 +} + +install_opensuse_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + use_usr_lib=$BS_FALSE + + if [ "${DISTRO_MAJOR_VERSION}" -ge 15 ]; then + use_usr_lib=$BS_TRUE + fi + + if [ "${DISTRO_MAJOR_VERSION}" -eq 12 ] && [ -d "/usr/lib/systemd/" ]; then + use_usr_lib=$BS_TRUE + fi + + if [ "${use_usr_lib}" -eq $BS_TRUE ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/usr/lib/systemd/system/salt-${fname}.service" + else + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + fi + + continue + fi + + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-$fname" "/etc/init.d/salt-$fname" + chmod +x /etc/init.d/salt-$fname + done + + install_opensuse_stable_post || return 1 + + return 0 +} + +install_opensuse_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + systemctl stop salt-$fname > /dev/null 2>&1 + systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + fi + + service salt-$fname stop > /dev/null 2>&1 + service salt-$fname start + done +} + +install_opensuse_check_services() { + if [ ! -f /bin/systemctl ]; then + # Not running systemd!? Don't check! + return 0 + fi + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_systemd salt-$fname > /dev/null 2>&1 || __check_services_systemd salt-$fname.service > /dev/null 2>&1 || return 1 + done + + return 0 +} +# +# End of openSUSE Install Functions. +# +####################################################################################################################### + +####################################################################################################################### +# +# openSUSE Leap 15 +# + +install_opensuse_15_stable_deps() { + __opensuse_prep_install || return 1 + + # SUSE only packages Salt for Python 3 on Leap 15 + # Py3 is the default bootstrap install for Leap 15 + # However, git installs might specify "-x python2" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + PY_PKG_VER=2 + else + PY_PKG_VER=3 + fi + + # YAML module is used for generating custom master/minion configs + # requests is still used by many salt modules + __PACKAGES="python${PY_PKG_VER}-PyYAML python${PY_PKG_VER}-requests" + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __zypper_install ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_opensuse_15_git_deps() { + install_opensuse_15_stable_deps || return 1 + + if ! __check_command_exists git; then + __zypper_install git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + PY_PKG_VER=2 + else + PY_PKG_VER=3 + fi + + __PACKAGES="python${PY_PKG_VER}-xml" + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + # Py3 is the default bootstrap install for Leap 15 + # However, git installs might specify "-x python2" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + # This is required by some of the python2 packages below + __PACKAGES="${__PACKAGES} libpython2_7-1_0 python2-futures python-ipaddress" + fi + + __PACKAGES="${__PACKAGES} libzmq5 python${PY_PKG_VER}-Jinja2 python${PY_PKG_VER}-msgpack" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-pycrypto python${PY_PKG_VER}-pyzmq" + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado" + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-apache-libcloud" + fi + else + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + fi + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_opensuse_15_git() { + + # Py3 is the default bootstrap install for Leap 15 + if [ -n "$_PY_EXE" ]; then + _PYEXE=${_PY_EXE} + else + _PYEXE=python3 + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + ${_PYEXE} setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + return 0 +} + +# +# End of openSUSE Leap 15 +# +####################################################################################################################### + +####################################################################################################################### +# +# SUSE Enterprise 15 +# + +install_suse_15_stable_deps() { + __opensuse_prep_install || return 1 + install_opensuse_15_stable_deps || return 1 + + return 0 +} + +install_suse_15_git_deps() { + install_suse_15_stable_deps || return 1 + + if ! __check_command_exists git; then + __zypper_install git-core || return 1 + fi + + install_opensuse_15_git_deps || return 1 + + return 0 +} + +install_suse_15_stable() { + install_opensuse_stable || return 1 + return 0 +} + +install_suse_15_git() { + install_opensuse_15_git || return 1 + return 0 +} + +install_suse_15_stable_post() { + install_opensuse_stable_post || return 1 + return 0 +} + +install_suse_15_git_post() { + install_opensuse_git_post || return 1 + return 0 +} + +install_suse_15_restart_daemons() { + install_opensuse_restart_daemons || return 1 + return 0 +} + +# +# End of SUSE Enterprise 15 +# +####################################################################################################################### + +####################################################################################################################### +# +# SUSE Enterprise 12 +# + +install_suse_12_stable_deps() { + __opensuse_prep_install || return 1 + + # YAML module is used for generating custom master/minion configs + # requests is still used by many salt modules + # Salt needs python-zypp installed in order to use the zypper module + __PACKAGES="python-PyYAML python-requests python-zypp" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python-apache-libcloud" + fi + + # shellcheck disable=SC2086,SC2090 + __zypper_install ${__PACKAGES} || return 1 + + # SLES 11 SP3 ships with both python-M2Crypto-0.22.* and python-m2crypto-0.21 and we will be asked which + # we want to install, even with --non-interactive. + # Let's try to install the higher version first and then the lower one in case of failure + __zypper_install 'python-M2Crypto>=0.22' || __zypper_install 'python-M2Crypto>=0.21' || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __zypper_install ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_suse_12_git_deps() { + install_suse_12_stable_deps || return 1 + + if ! __check_command_exists git; then + __zypper_install git-core || return 1 + fi + + __git_clone_and_checkout || return 1 + + __PACKAGES="" + # shellcheck disable=SC2089 + __PACKAGES="${__PACKAGES} libzmq4 python-Jinja2 python-msgpack-python python-pycrypto" + __PACKAGES="${__PACKAGES} python-pyzmq python-xml" + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + __PACKAGES="${__PACKAGES} python-tornado" + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python-apache-libcloud" + fi + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_suse_12_stable() { + install_opensuse_stable || return 1 + return 0 +} + +install_suse_12_git() { + install_opensuse_git || return 1 + return 0 +} + +install_suse_12_stable_post() { + install_opensuse_stable_post || return 1 + return 0 +} + +install_suse_12_git_post() { + install_opensuse_git_post || return 1 + return 0 +} + +install_suse_12_restart_daemons() { + install_opensuse_restart_daemons || return 1 + return 0 +} + +# +# End of SUSE Enterprise 12 +# +####################################################################################################################### + +####################################################################################################################### +# +# SUSE Enterprise 11 +# + +install_suse_11_stable_deps() { + __opensuse_prep_install || return 1 + + # YAML module is used for generating custom master/minion configs + __PACKAGES="python-PyYAML" + + # shellcheck disable=SC2086,SC2090 + __zypper_install ${__PACKAGES} || return 1 + + # SLES 11 SP3 ships with both python-M2Crypto-0.22.* and python-m2crypto-0.21 and we will be asked which + # we want to install, even with --non-interactive. + # Let's try to install the higher version first and then the lower one in case of failure + __zypper_install 'python-M2Crypto>=0.22' || __zypper_install 'python-M2Crypto>=0.21' || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __zypper_install ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_suse_11_git_deps() { + install_suse_11_stable_deps || return 1 + + if ! __check_command_exists git; then + __zypper_install git || return 1 + fi + + __git_clone_and_checkout || return 1 + + __PACKAGES="" + # shellcheck disable=SC2089 + __PACKAGES="${__PACKAGES} libzmq4 python-Jinja2 python-msgpack-python python-pycrypto" + __PACKAGES="${__PACKAGES} python-pyzmq python-xml python-zypp" + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + __PACKAGES="${__PACKAGES} python-tornado" + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python-apache-libcloud" + fi + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_suse_11_stable() { + install_opensuse_stable || return 1 + return 0 +} + +install_suse_11_git() { + install_opensuse_git || return 1 + return 0 +} + +install_suse_11_stable_post() { + install_opensuse_stable_post || return 1 + return 0 +} + +install_suse_11_git_post() { + install_opensuse_git_post || return 1 + return 0 +} + +install_suse_11_restart_daemons() { + install_opensuse_restart_daemons || return 1 + return 0 +} + + +# +# End of SUSE Enterprise 11 +# +####################################################################################################################### + +####################################################################################################################### +# +# SUSE Enterprise General Functions +# + +# Used for both SLE 11 and 12 +install_suse_check_services() { + if [ ! -f /bin/systemctl ]; then + # Not running systemd!? Don't check! + return 0 + fi + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_systemd salt-$fname || return 1 + done + + return 0 +} + +# +# End of SUSE Enterprise General Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Gentoo Install Functions. +# +__autounmask() { + emerge --autounmask-write --autounmask-only "${@}"; return $? +} + +__emerge() { + if [ "$_GENTOO_USE_BINHOST" -eq $BS_TRUE ]; then + emerge --getbinpkg "${@}"; return $? + fi + emerge "${@}"; return $? +} + +__gentoo_config_protection() { + # usually it's a good thing to have config files protected by portage, but + # in this case this would require to interrupt the bootstrapping script at + # this point, manually merge the changes using etc-update/dispatch-conf/ + # cfg-update and then restart the bootstrapping script, so instead we allow + # at this point to modify certain config files directly + export CONFIG_PROTECT_MASK="${CONFIG_PROTECT_MASK:-} /etc/portage/package.accept_keywords /etc/portage/package.keywords /etc/portage/package.license /etc/portage/package.unmask /etc/portage/package.use" + + # emerge currently won't write to files that aren't there, so we need to ensure their presence + touch /etc/portage/package.accept_keywords /etc/portage/package.keywords /etc/portage/package.license /etc/portage/package.unmask /etc/portage/package.use +} + +__gentoo_pre_dep() { + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + if __check_command_exists eix; then + eix-sync + else + emerge --sync + fi + else + if __check_command_exists eix; then + eix-sync -q + else + emerge --sync --quiet + fi + fi + if [ ! -d /etc/portage ]; then + mkdir /etc/portage + fi +} + +__gentoo_post_dep() { + # ensures dev-lib/crypto++ compiles happily + __emerge --oneshot 'sys-devel/libtool' + # the -o option asks it to emerge the deps but not the package. + __gentoo_config_protection + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __autounmask 'dev-python/libcloud' + __emerge -v 'dev-python/libcloud' + fi + + __autounmask 'dev-python/requests' + __autounmask 'app-admin/salt' + + __emerge -vo 'dev-python/requests' + __emerge -vo 'app-admin/salt' + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __autounmask ${_EXTRA_PACKAGES} || return 1 + # shellcheck disable=SC2086 + __emerge -v ${_EXTRA_PACKAGES} || return 1 + fi +} + +install_gentoo_deps() { + __gentoo_pre_dep || return 1 + __gentoo_post_dep || return 1 +} + +install_gentoo_git_deps() { + __gentoo_pre_dep || return 1 + __gentoo_post_dep || return 1 +} + +install_gentoo_stable() { + __gentoo_config_protection + __emerge -v 'app-admin/salt' || return 1 +} + +install_gentoo_git() { + __gentoo_config_protection + __emerge -v '=app-admin/salt-9999' || return 1 +} + +install_gentoo_post() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -d "/run/systemd/system" ]; then + systemctl enable salt-$fname.service + systemctl start salt-$fname.service + else + rc-update add salt-$fname default + /etc/init.d/salt-$fname start + fi + done +} + +install_gentoo_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -d "/run/systemd/system" ]; then + systemctl stop salt-$fname > /dev/null 2>&1 + systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + else + /etc/init.d/salt-$fname stop > /dev/null 2>&1 + /etc/init.d/salt-$fname start + fi + done +} + +install_gentoo_check_services() { + if [ ! -d "/run/systemd/system" ]; then + # Not running systemd!? Don't check! + return 0 + fi + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_systemd salt-$fname || return 1 + done + + return 0 +} +# +# End of Gentoo Install Functions. +# +####################################################################################################################### + +####################################################################################################################### +# +# VoidLinux Install Functions +# +install_voidlinux_stable_deps() { + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + xbps-install -Suy || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + xbps-install -Suy "${_EXTRA_PACKAGES}" || return 1 + fi + + return 0 +} + +install_voidlinux_stable() { + xbps-install -Suy salt || return 1 + return 0 +} + +install_voidlinux_stable_post() { + for fname in master minion syndic; do + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + ln -s /etc/sv/salt-$fname /var/service/. + done +} + +install_voidlinux_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in master minion syndic; do + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + sv restart salt-$fname + done +} + +install_voidlinux_check_services() { + for fname in master minion syndic; do + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + [ -e /var/service/salt-$fname ] || return 1 + done + + return 0 +} + +daemons_running_voidlinux() { + [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 + + FAILED_DAEMONS=0 + for fname in master minion syndic; do + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ "$(sv status salt-$fname | grep run)" = "" ]; then + echoerror "salt-$fname was not found running" + FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) + fi + done + + return $FAILED_DAEMONS +} +# +# Ended VoidLinux Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# OS X / Darwin Install Functions +# + +__macosx_get_packagesite() { + DARWIN_ARCH="x86_64" + + __PY_VERSION_REPO="py2" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + PKG="salt-${STABLE_REV}-${__PY_VERSION_REPO}-${DARWIN_ARCH}.pkg" + SALTPKGCONFURL="https://repo.saltstack.com/osx/${PKG}" +} + +# Using a separate conf step to head for idempotent install... +__configure_macosx_pkg_details() { + __macosx_get_packagesite || return 1 + return 0 +} + +install_macosx_stable_deps() { + __configure_macosx_pkg_details || return 1 + return 0 +} + +install_macosx_git_deps() { + install_macosx_stable_deps || return 1 + + if ! echo "$PATH" | grep -q /usr/local/bin; then + echowarn "/usr/local/bin was not found in \$PATH. Adding it for the duration of the script execution." + export PATH=/usr/local/bin:$PATH + fi + + __fetch_url "/tmp/get-pip.py" "https://bootstrap.pypa.io/get-pip.py" || return 1 + + if [ -n "$_PY_EXE" ]; then + _PYEXE=${_PY_EXE} + else + _PYEXE=python2.7 + fi + + # Install PIP + $_PYEXE /tmp/get-pip.py || return 1 + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + return 0 + fi + + __PIP_REQUIREMENTS="dev_python27.txt" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PIP_REQUIREMENTS="dev_python34.txt" + fi + + requirements_file="${_SALT_GIT_CHECKOUT_DIR}/requirements/${__PIP_REQUIREMENTS}" + pip install -U -r "${requirements_file}" --install-option="--prefix=/opt/salt" || return 1 + + return 0 +} + +install_macosx_stable() { + install_macosx_stable_deps || return 1 + + /usr/bin/curl "${SALTPKGCONFURL}" > "/tmp/${PKG}" || return 1 + + /usr/sbin/installer -pkg "/tmp/${PKG}" -target / || return 1 + + return 0 +} + +install_macosx_git() { + + if [ -n "$_PY_EXE" ]; then + _PYEXE=${_PY_EXE} + else + _PYEXE=python2.7 + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + $_PYEXE setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/opt/salt || return 1 + else + $_PYEXE setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/opt/salt || return 1 + fi + + return 0 +} + +install_macosx_stable_post() { + if [ ! -f /etc/paths.d/salt ]; then + print "%s\n" "/opt/salt/bin" "/usr/local/sbin" > /etc/paths.d/salt + fi + + # Don'f fail because of unknown variable on the next step + set +o nounset + # shellcheck disable=SC1091 + . /etc/profile + # Revert nounset to it's previous state + set -o nounset + + return 0 +} + +install_macosx_git_post() { + install_macosx_stable_post || return 1 + return 0 +} + +install_macosx_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + /bin/launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 + /bin/launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 + + return 0 +} +# +# Ended OS X / Darwin Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Default minion configuration function. Matches ANY distribution as long as +# the -c options is passed. +# +config_salt() { + # If the configuration directory is not passed, return + [ "$_TEMP_CONFIG_DIR" = "null" ] && return + + if [ "$_CONFIG_ONLY" -eq $BS_TRUE ]; then + echowarn "Passing -C (config only) option implies -F (forced overwrite)." + + if [ "$_FORCE_OVERWRITE" -ne $BS_TRUE ]; then + echowarn "Overwriting configs in 11 seconds!" + sleep 11 + _FORCE_OVERWRITE=$BS_TRUE + fi + fi + + # Let's create the necessary directories + [ -d "$_SALT_ETC_DIR" ] || mkdir "$_SALT_ETC_DIR" || return 1 + [ -d "$_PKI_DIR" ] || (mkdir -p "$_PKI_DIR" && chmod 700 "$_PKI_DIR") || return 1 + + # If -C or -F was passed, we don't need a .bak file for the config we're updating + # This is used in the custom master/minion config file checks below + CREATE_BAK=$BS_TRUE + if [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then + CREATE_BAK=$BS_FALSE + fi + + CONFIGURED_ANYTHING=$BS_FALSE + + # Copy the grains file if found + if [ -f "$_TEMP_CONFIG_DIR/grains" ]; then + echodebug "Moving provided grains file from $_TEMP_CONFIG_DIR/grains to $_SALT_ETC_DIR/grains" + __movefile "$_TEMP_CONFIG_DIR/grains" "$_SALT_ETC_DIR/grains" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + if [ "$_INSTALL_MINION" -eq $BS_TRUE ] || \ + [ "$_CONFIG_ONLY" -eq $BS_TRUE ] || [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then + # Create the PKI directory + [ -d "$_PKI_DIR/minion" ] || (mkdir -p "$_PKI_DIR/minion" && chmod 700 "$_PKI_DIR/minion") || return 1 + + # Check to see if a custom minion config json dict was provided + if [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then + + # Check if a minion config file already exists and move to .bak if needed + if [ -f "$_SALT_ETC_DIR/minion" ] && [ "$CREATE_BAK" -eq "$BS_TRUE" ]; then + __movefile "$_SALT_ETC_DIR/minion" "$_SALT_ETC_DIR/minion.bak" $BS_TRUE || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + # Overwrite/create the config file with the yaml string + __overwriteconfig "$_SALT_ETC_DIR/minion" "$_CUSTOM_MINION_CONFIG" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + + # Copy the minions configuration if found + # Explicitly check for custom master config to avoid moving the minion config + elif [ -f "$_TEMP_CONFIG_DIR/minion" ] && [ "$_CUSTOM_MASTER_CONFIG" = "null" ]; then + __movefile "$_TEMP_CONFIG_DIR/minion" "$_SALT_ETC_DIR" "$_FORCE_OVERWRITE" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + # Copy the minion's keys if found + if [ -f "$_TEMP_CONFIG_DIR/minion.pem" ]; then + __movefile "$_TEMP_CONFIG_DIR/minion.pem" "$_PKI_DIR/minion/" "$_FORCE_OVERWRITE" || return 1 + chmod 400 "$_PKI_DIR/minion/minion.pem" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + if [ -f "$_TEMP_CONFIG_DIR/minion.pub" ]; then + __movefile "$_TEMP_CONFIG_DIR/minion.pub" "$_PKI_DIR/minion/" "$_FORCE_OVERWRITE" || return 1 + chmod 664 "$_PKI_DIR/minion/minion.pub" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + # For multi-master-pki, copy the master_sign public key if found + if [ -f "$_TEMP_CONFIG_DIR/master_sign.pub" ]; then + __movefile "$_TEMP_CONFIG_DIR/master_sign.pub" "$_PKI_DIR/minion/" || return 1 + chmod 664 "$_PKI_DIR/minion/master_sign.pub" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + fi + + # only (re)place master or syndic configs if -M (install master) or -S + # (install syndic) specified + OVERWRITE_MASTER_CONFIGS=$BS_FALSE + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ] && [ "$_CONFIG_ONLY" -eq $BS_TRUE ]; then + OVERWRITE_MASTER_CONFIGS=$BS_TRUE + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ] && [ "$_CONFIG_ONLY" -eq $BS_TRUE ]; then + OVERWRITE_MASTER_CONFIGS=$BS_TRUE + fi + + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ] || [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ] || [ "$OVERWRITE_MASTER_CONFIGS" -eq $BS_TRUE ] || [ "$_CUSTOM_MASTER_CONFIG" != "null" ]; then + # Create the PKI directory + [ -d "$_PKI_DIR/master" ] || (mkdir -p "$_PKI_DIR/master" && chmod 700 "$_PKI_DIR/master") || return 1 + + # Check to see if a custom master config json dict was provided + if [ "$_CUSTOM_MASTER_CONFIG" != "null" ]; then + + # Check if a master config file already exists and move to .bak if needed + if [ -f "$_SALT_ETC_DIR/master" ] && [ "$CREATE_BAK" -eq "$BS_TRUE" ]; then + __movefile "$_SALT_ETC_DIR/master" "$_SALT_ETC_DIR/master.bak" $BS_TRUE || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + # Overwrite/create the config file with the yaml string + __overwriteconfig "$_SALT_ETC_DIR/master" "$_CUSTOM_MASTER_CONFIG" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + + # Copy the masters configuration if found + elif [ -f "$_TEMP_CONFIG_DIR/master" ]; then + __movefile "$_TEMP_CONFIG_DIR/master" "$_SALT_ETC_DIR" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + # Copy the master's keys if found + if [ -f "$_TEMP_CONFIG_DIR/master.pem" ]; then + __movefile "$_TEMP_CONFIG_DIR/master.pem" "$_PKI_DIR/master/" || return 1 + chmod 400 "$_PKI_DIR/master/master.pem" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + if [ -f "$_TEMP_CONFIG_DIR/master.pub" ]; then + __movefile "$_TEMP_CONFIG_DIR/master.pub" "$_PKI_DIR/master/" || return 1 + chmod 664 "$_PKI_DIR/master/master.pub" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # Recursively copy salt-cloud configs with overwriting if necessary + for file in "$_TEMP_CONFIG_DIR"/cloud*; do + if [ -f "$file" ]; then + __copyfile "$file" "$_SALT_ETC_DIR" || return 1 + elif [ -d "$file" ]; then + subdir="$(basename "$file")" + mkdir -p "$_SALT_ETC_DIR/$subdir" + for file_d in "$_TEMP_CONFIG_DIR/$subdir"/*; do + if [ -f "$file_d" ]; then + __copyfile "$file_d" "$_SALT_ETC_DIR/$subdir" || return 1 + fi + done + fi + done + fi + + if [ "$_CONFIG_ONLY" -eq $BS_TRUE ] && [ $CONFIGURED_ANYTHING -eq $BS_FALSE ]; then + echowarn "No configuration or keys were copied over. No configuration was done!" + exit 0 + fi + + return 0 +} +# +# Ended Default Configuration function +# +####################################################################################################################### + +####################################################################################################################### +# +# Default salt master minion keys pre-seed function. Matches ANY distribution +# as long as the -k option is passed. +# +preseed_master() { + # Create the PKI directory + + if [ "$(find "$_TEMP_KEYS_DIR" -maxdepth 1 -type f | wc -l)" -lt 1 ]; then + echoerror "No minion keys were uploaded. Unable to pre-seed master" + return 1 + fi + + SEED_DEST="$_PKI_DIR/master/minions" + [ -d "$SEED_DEST" ] || (mkdir -p "$SEED_DEST" && chmod 700 "$SEED_DEST") || return 1 + + for keyfile in "$_TEMP_KEYS_DIR"/*; do + keyfile=$(basename "${keyfile}") + src_keyfile="${_TEMP_KEYS_DIR}/${keyfile}" + dst_keyfile="${SEED_DEST}/${keyfile}" + + # If it's not a file, skip to the next + [ ! -f "$src_keyfile" ] && continue + + __movefile "$src_keyfile" "$dst_keyfile" || return 1 + chmod 664 "$dst_keyfile" || return 1 + done + + return 0 +} +# +# Ended Default Salt Master Pre-Seed minion keys function +# +####################################################################################################################### + +####################################################################################################################### +# +# This function checks if all of the installed daemons are running or not. +# +daemons_running() { + [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 + + FAILED_DAEMONS=0 + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # shellcheck disable=SC2009 + if [ "${DISTRO_NAME}" = "SmartOS" ]; then + if [ "$(svcs -Ho STA salt-$fname)" != "ON" ]; then + echoerror "salt-$fname was not found running" + FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) + fi + elif [ "$(ps wwwaux | grep -v grep | grep salt-$fname)" = "" ]; then + echoerror "salt-$fname was not found running" + FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) + fi + done + + return $FAILED_DAEMONS +} +# +# Ended daemons running check function +# +####################################################################################################################### + +#====================================================================================================================== +# LET'S PROCEED WITH OUR INSTALLATION +#====================================================================================================================== + +# Let's get the dependencies install function +DEP_FUNC_NAMES="" +if [ ${_NO_DEPS} -eq $BS_FALSE ]; then + DEP_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_deps" + DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_deps" + DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_deps" + DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_deps" + DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_deps" + DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}_deps" +fi + +DEPS_INSTALL_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$DEP_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + DEPS_INSTALL_FUNC="$FUNC_NAME" + break + fi +done +echodebug "DEPS_INSTALL_FUNC=${DEPS_INSTALL_FUNC}" + +# Let's get the Salt config function +CONFIG_FUNC_NAMES="config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}_${ITYPE}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_salt" + +CONFIG_SALT_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$CONFIG_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + CONFIG_SALT_FUNC="$FUNC_NAME" + break + fi +done +echodebug "CONFIG_SALT_FUNC=${CONFIG_SALT_FUNC}" + +# Let's get the pre-seed master function +PRESEED_FUNC_NAMES="preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}_${ITYPE}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_master" + +PRESEED_MASTER_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$PRESEED_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + PRESEED_MASTER_FUNC="$FUNC_NAME" + break + fi +done +echodebug "PRESEED_MASTER_FUNC=${PRESEED_MASTER_FUNC}" + +# Let's get the install function +INSTALL_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}" +INSTALL_FUNC_NAMES="$INSTALL_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}" +INSTALL_FUNC_NAMES="$INSTALL_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}" + +INSTALL_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$INSTALL_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + INSTALL_FUNC="$FUNC_NAME" + break + fi +done +echodebug "INSTALL_FUNC=${INSTALL_FUNC}" + +# Let's get the post install function +POST_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}_post" + +POST_INSTALL_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$POST_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + POST_INSTALL_FUNC="$FUNC_NAME" + break + fi +done +echodebug "POST_INSTALL_FUNC=${POST_INSTALL_FUNC}" + +# Let's get the start daemons install function +STARTDAEMONS_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}_restart_daemons" + +STARTDAEMONS_INSTALL_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$STARTDAEMONS_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + STARTDAEMONS_INSTALL_FUNC="$FUNC_NAME" + break + fi +done +echodebug "STARTDAEMONS_INSTALL_FUNC=${STARTDAEMONS_INSTALL_FUNC}" + +# Let's get the daemons running check function. +DAEMONS_RUNNING_FUNC_NAMES="daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}_${ITYPE}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running" + +DAEMONS_RUNNING_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$DAEMONS_RUNNING_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + DAEMONS_RUNNING_FUNC="$FUNC_NAME" + break + fi +done +echodebug "DAEMONS_RUNNING_FUNC=${DAEMONS_RUNNING_FUNC}" + +# Let's get the check services function +if [ ${_DISABLE_SALT_CHECKS} -eq $BS_FALSE ]; then + CHECK_SERVICES_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}_check_services" +else + CHECK_SERVICES_FUNC_NAMES="" +fi + +CHECK_SERVICES_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$CHECK_SERVICES_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + CHECK_SERVICES_FUNC="$FUNC_NAME" + break + fi +done +echodebug "CHECK_SERVICES_FUNC=${CHECK_SERVICES_FUNC}" + +if [ ${_NO_DEPS} -eq $BS_FALSE ] && [ "$DEPS_INSTALL_FUNC" = "null" ]; then + echoerror "No dependencies installation function found. Exiting..." + exit 1 +fi + +if [ "$INSTALL_FUNC" = "null" ]; then + echoerror "No installation function found. Exiting..." + exit 1 +fi + + +# Install dependencies +if [ ${_NO_DEPS} -eq $BS_FALSE ] && [ $_CONFIG_ONLY -eq $BS_FALSE ]; then + # Only execute function is not in config mode only + echoinfo "Running ${DEPS_INSTALL_FUNC}()" + if ! ${DEPS_INSTALL_FUNC}; then + echoerror "Failed to run ${DEPS_INSTALL_FUNC}()!!!" + exit 1 + fi +fi + + +if [ "${ITYPE}" = "git" ] && [ ${_NO_DEPS} -eq ${BS_TRUE} ]; then + if ! __git_clone_and_checkout; then + echo "Failed to clone and checkout git repository." + exit 1 + fi +fi + + +# Triggering config_salt() if overwriting master or minion configs +if [ "$_CUSTOM_MASTER_CONFIG" != "null" ] || [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="$_SALT_ETC_DIR" + fi + + if [ ${_NO_DEPS} -eq $BS_FALSE ] && [ $_CONFIG_ONLY -eq $BS_TRUE ]; then + # Execute function to satisfy dependencies for configuration step + echoinfo "Running ${DEPS_INSTALL_FUNC}()" + if ! ${DEPS_INSTALL_FUNC}; then + echoerror "Failed to run ${DEPS_INSTALL_FUNC}()!!!" + exit 1 + fi + fi +fi + +# Configure Salt +if [ "$CONFIG_SALT_FUNC" != "null" ] && [ "$_TEMP_CONFIG_DIR" != "null" ]; then + echoinfo "Running ${CONFIG_SALT_FUNC}()" + if ! ${CONFIG_SALT_FUNC}; then + echoerror "Failed to run ${CONFIG_SALT_FUNC}()!!!" + exit 1 + fi +fi + +# Drop the master address if passed +if [ "$_SALT_MASTER_ADDRESS" != "null" ]; then + [ ! -d "$_SALT_ETC_DIR/minion.d" ] && mkdir -p "$_SALT_ETC_DIR/minion.d" + cat <<_eof > "$_SALT_ETC_DIR/minion.d/99-master-address.conf" +master: $_SALT_MASTER_ADDRESS +_eof +fi + +# Drop the minion id if passed +if [ "$_SALT_MINION_ID" != "null" ]; then + [ ! -d "$_SALT_ETC_DIR" ] && mkdir -p "$_SALT_ETC_DIR" + echo "$_SALT_MINION_ID" > "$_SALT_ETC_DIR/minion_id" +fi + +# Pre-seed master keys +if [ "$PRESEED_MASTER_FUNC" != "null" ] && [ "$_TEMP_KEYS_DIR" != "null" ]; then + echoinfo "Running ${PRESEED_MASTER_FUNC}()" + if ! ${PRESEED_MASTER_FUNC}; then + echoerror "Failed to run ${PRESEED_MASTER_FUNC}()!!!" + exit 1 + fi +fi + +# Install Salt +if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + # Only execute function is not in config mode only + echoinfo "Running ${INSTALL_FUNC}()" + if ! ${INSTALL_FUNC}; then + echoerror "Failed to run ${INSTALL_FUNC}()!!!" + exit 1 + fi +fi + +# Run any post install function. Only execute function if not in config mode only +if [ "$POST_INSTALL_FUNC" != "null" ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Running ${POST_INSTALL_FUNC}()" + if ! ${POST_INSTALL_FUNC}; then + echoerror "Failed to run ${POST_INSTALL_FUNC}()!!!" + exit 1 + fi +fi + +# Run any check services function, Only execute function if not in config mode only +if [ "$CHECK_SERVICES_FUNC" != "null" ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Running ${CHECK_SERVICES_FUNC}()" + if ! ${CHECK_SERVICES_FUNC}; then + echoerror "Failed to run ${CHECK_SERVICES_FUNC}()!!!" + exit 1 + fi +fi + +# Run any start daemons function +if [ "$STARTDAEMONS_INSTALL_FUNC" != "null" ] && [ ${_START_DAEMONS} -eq $BS_TRUE ]; then + echoinfo "Running ${STARTDAEMONS_INSTALL_FUNC}()" + echodebug "Waiting ${_SLEEP} seconds for processes to settle before checking for them" + sleep ${_SLEEP} + if ! ${STARTDAEMONS_INSTALL_FUNC}; then + echoerror "Failed to run ${STARTDAEMONS_INSTALL_FUNC}()!!!" + exit 1 + fi +fi + +# Check if the installed daemons are running or not +if [ "$DAEMONS_RUNNING_FUNC" != "null" ] && [ ${_START_DAEMONS} -eq $BS_TRUE ]; then + echoinfo "Running ${DAEMONS_RUNNING_FUNC}()" + echodebug "Waiting ${_SLEEP} seconds for processes to settle before checking for them" + sleep ${_SLEEP} # Sleep a little bit to let daemons start + if ! ${DAEMONS_RUNNING_FUNC}; then + echoerror "Failed to run ${DAEMONS_RUNNING_FUNC}()!!!" + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ "$_ECHO_DEBUG" -eq $BS_FALSE ]; then + echoerror "salt-$fname was not found running. Pass '-D' to ${__ScriptName} when bootstrapping for additional debugging information..." + continue + fi + + [ ! -f "$_SALT_ETC_DIR/$fname" ] && [ $fname != "syndic" ] && echodebug "$_SALT_ETC_DIR/$fname does not exist" + + echodebug "Running salt-$fname by hand outputs: $(nohup salt-$fname -l debug)" + + [ ! -f /var/log/salt/$fname ] && echodebug "/var/log/salt/$fname does not exist. Can't cat its contents!" && continue + + echodebug "DAEMON LOGS for $fname:" + echodebug "$(cat /var/log/salt/$fname)" + echo + done + + echodebug "Running Processes:" + echodebug "$(ps auxwww)" + + exit 1 + fi +fi + +# Done! +if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Salt installed!" +else + echoinfo "Salt configured!" +fi + +exit 0 + +# vim: set sts=4 ts=4 et From 3ac9f1800bb1860a33c7b909f332fb6b6201efdb Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Fri, 24 Jul 2020 22:04:30 +0000 Subject: [PATCH 013/376] Make sure we are searching all clusters when running rules --- salt/elastalert/files/rules/so/suricata_thehive.yaml | 2 +- salt/elastalert/files/rules/so/wazuh_thehive.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/elastalert/files/rules/so/suricata_thehive.yaml b/salt/elastalert/files/rules/so/suricata_thehive.yaml index cd887c9f9..fb6c6448d 100644 --- a/salt/elastalert/files/rules/so/suricata_thehive.yaml +++ b/salt/elastalert/files/rules/so/suricata_thehive.yaml @@ -9,7 +9,7 @@ es_host: {{es}} es_port: 9200 name: Suricata-Alert type: frequency -index: "so-ids-*" +index: "*:so-ids-*" num_events: 1 timeframe: minutes: 10 diff --git a/salt/elastalert/files/rules/so/wazuh_thehive.yaml b/salt/elastalert/files/rules/so/wazuh_thehive.yaml index ccb79e1e5..c01bb5894 100644 --- a/salt/elastalert/files/rules/so/wazuh_thehive.yaml +++ b/salt/elastalert/files/rules/so/wazuh_thehive.yaml @@ -9,7 +9,7 @@ es_host: {{es}} es_port: 9200 name: Wazuh-Alert type: frequency -index: "so-ossec-*" +index: "*:so-ossec-*" num_events: 1 timeframe: minutes: 10 From 958ee25f6db4c11124b2b2d3629b9ad4e3a9bded Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Mon, 27 Jul 2020 11:58:12 +0000 Subject: [PATCH 014/376] Move Wazuh from /opt/so/ to /nsm/wazuh --- salt/common/tools/sbin/so-allow | 2 +- salt/filebeat/init.sls | 4 ++-- salt/logstash/init.sls | 6 +++--- salt/wazuh/files/wazuh-manager-whitelist | 2 +- salt/wazuh/init.sls | 10 +++++----- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/salt/common/tools/sbin/so-allow b/salt/common/tools/sbin/so-allow index c6d3d6bf0..f902d659c 100755 --- a/salt/common/tools/sbin/so-allow +++ b/salt/common/tools/sbin/so-allow @@ -127,7 +127,7 @@ salt-call state.apply firewall queue=True if grep -q -R "wazuh: 1" $local_salt_dir/pillar/*; then # If analyst, add to Wazuh AR whitelist if [ "$FULLROLE" == "analyst" ]; then - WAZUH_MGR_CFG="/opt/so/wazuh/etc/ossec.conf" + WAZUH_MGR_CFG="/nsm/wazuh/etc/ossec.conf" if ! grep -q "$IP" $WAZUH_MGR_CFG ; then DATE=$(date) sed -i 's/<\/ossec_config>//' $WAZUH_MGR_CFG diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls index 6889b892f..0d1f521e3 100644 --- a/salt/filebeat/init.sls +++ b/salt/filebeat/init.sls @@ -60,8 +60,8 @@ so-filebeat: - /nsm:/nsm:ro - /opt/so/log/filebeat:/usr/share/filebeat/logs:rw - /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro - - /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro - - /opt/so/wazuh/logs/archives:/wazuh/archives:ro + - /nsm/wazuh/logs/alerts:/wazuh/alerts:ro + - /nsm/wazuh/logs/archives:/wazuh/archives:ro - /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro - /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro - /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index 61d6aecc1..8a3b539a2 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -169,8 +169,8 @@ so-logstash: {%- if grains['role'] == 'so-eval' %} - /nsm/zeek:/nsm/zeek:ro - /nsm/suricata:/suricata:ro - - /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro - - /opt/so/wazuh/logs/archives:/wazuh/archives:ro + - /nsm/wazuh/logs/alerts:/wazuh/alerts:ro + - /nsm/wazuh/logs/archives:/wazuh/archives:ro - /opt/so/log/fleet/:/osquery/logs:ro - /opt/so/log/strelka:/strelka:ro {%- endif %} @@ -184,4 +184,4 @@ so-logstash: {% endfor %} {% for TEMPLATE in TEMPLATES %} - file: es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }} -{% endfor %} \ No newline at end of file +{% endfor %} diff --git a/salt/wazuh/files/wazuh-manager-whitelist b/salt/wazuh/files/wazuh-manager-whitelist index d39d68e36..8a8bc9832 100755 --- a/salt/wazuh/files/wazuh-manager-whitelist +++ b/salt/wazuh/files/wazuh-manager-whitelist @@ -20,7 +20,7 @@ local_salt_dir=/opt/so/saltstack/local # Check if Wazuh enabled if [ {{ WAZUH_ENABLED }} ]; then - WAZUH_MGR_CFG="/opt/so/wazuh/etc/ossec.conf" + WAZUH_MGR_CFG="/nsm/wazuh/etc/ossec.conf" if ! grep -q "{{ MANAGERIP }}" $WAZUH_MGR_CFG ; then DATE=`date` sed -i 's/<\/ossec_config>//' $WAZUH_MGR_CFG diff --git a/salt/wazuh/init.sls b/salt/wazuh/init.sls index 2ae4ea715..22ba0940e 100644 --- a/salt/wazuh/init.sls +++ b/salt/wazuh/init.sls @@ -13,7 +13,7 @@ ossecm: user.present: - uid: 943 - gid: 945 - - home: /opt/so/conf/wazuh + - home: /nsm/wazuh - createhome: False - allow_uid_change: True - allow_gid_change: True @@ -23,7 +23,7 @@ ossecr: user.present: - uid: 944 - gid: 945 - - home: /opt/so/conf/wazuh + - home: /nsm/wazuh - createhome: False - allow_uid_change: True - allow_gid_change: True @@ -33,7 +33,7 @@ ossec: user.present: - uid: 945 - gid: 945 - - home: /opt/so/conf/wazuh + - home: /nsm/wazuh - createhome: False - allow_uid_change: True - allow_gid_change: True @@ -48,7 +48,7 @@ wazuhpkgs: wazuhdir: file.directory: - - name: /opt/so/wazuh + - name: /nsm/wazuh - user: 945 - group: 945 - makedirs: True @@ -94,7 +94,7 @@ so-wazuh: - 0.0.0.0:1515:1515/tcp - 0.0.0.0:55000:55000 - binds: - - /opt/so/wazuh:/var/ossec/data:rw + - /nsm/wazuh:/var/ossec/data:rw # Register the agent registertheagent: From 51e27cadc8075493d8b007f0c8bcc195959ce6da Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Mon, 27 Jul 2020 12:14:43 +0000 Subject: [PATCH 015/376] Add Wazuh Wazuh symlinks for cpnfig/rules --- salt/wazuh/init.sls | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/salt/wazuh/init.sls b/salt/wazuh/init.sls index 22ba0940e..dfd47c0f6 100644 --- a/salt/wazuh/init.sls +++ b/salt/wazuh/init.sls @@ -113,3 +113,22 @@ wazuhagentservice: service.running: - name: wazuh-agent - enable: True + +/opt/so/conf/wazuh: + file.symlink: + - target: /nsm/wazuh/etc + +hidsruledir: + file.directory: + - name: /opt/so/rules/hids + - user: 939 + - group: 939 + - makedirs: True + +/opt/so/rules/hids/local_rules.xml: + file.symlink: + - target: /nsm/wazuh/etc/rules/local_rules.xml + +/opt/so/rules/hids/ruleset: + file.symlink: + - target: /nsm/wazuh/ruleset From ac5aeb480131805eaf86c79b59c538a90c1d5076 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Mon, 27 Jul 2020 13:45:34 +0000 Subject: [PATCH 016/376] Bump Wazuh version --- salt/wazuh/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/wazuh/init.sls b/salt/wazuh/init.sls index dfd47c0f6..314a5f47f 100644 --- a/salt/wazuh/init.sls +++ b/salt/wazuh/init.sls @@ -42,7 +42,7 @@ wazuhpkgs: pkg.installed: - skip_suggestions: False - pkgs: - - wazuh-agent: 3.10.2-1 + - wazuh-agent: 3.13.1-1 - hold: True - update_holds: True From e81fd7464ba717c930ad86a88970e626ac788263 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Mon, 27 Jul 2020 13:49:17 +0000 Subject: [PATCH 017/376] Create default orguser if empty --- salt/thehive/scripts/cortex_init | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/thehive/scripts/cortex_init b/salt/thehive/scripts/cortex_init index 9fc1caf25..7eb50df5e 100644 --- a/salt/thehive/scripts/cortex_init +++ b/salt/thehive/scripts/cortex_init @@ -4,7 +4,7 @@ # {%- set CORTEXPASSWORD = salt['pillar.get']('static:cortexpassword', 'cortexchangeme') %} # {%- set CORTEXKEY = salt['pillar.get']('static:cortexkey', '') %} # {%- set CORTEXORGNAME = salt['pillar.get']('static:cortexorgname', '') %} -# {%- set CORTEXORGUSER = salt['pillar.get']('static:cortexorguser', '') %} +# {%- set CORTEXORGUSER = salt['pillar.get']('static:cortexorguser', 'soadmin') %} # {%- set CORTEXORGUSERKEY = salt['pillar.get']('static:cortexorguserkey', '') %} default_salt_dir=/opt/so/saltstack/default From 7606cc0ad09b24994f562ed51130b857e6c25c21 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 27 Jul 2020 15:51:31 -0400 Subject: [PATCH 018/376] changes to ssl state for salt 3001 --- salt/ca/init.sls | 28 ++++----- salt/ssl/init.sls | 155 ++++++++++++++++++++++++++++++++++++---------- 2 files changed, 136 insertions(+), 47 deletions(-) diff --git a/salt/ca/init.sls b/salt/ca/init.sls index da442cc2a..0f7a9cbee 100644 --- a/salt/ca/init.sls +++ b/salt/ca/init.sls @@ -10,12 +10,16 @@ file.directory: [] pki_private_key: - x509.private_key_managed: - - name: /etc/pki/ca.key - - bits: 4096 - - passphrase: - - cipher: aes_256_cbc - - backup: True + x509.private_key_managed: + - name: /etc/pki/ca.key + - bits: 4096 + - passphrase: + - cipher: aes_256_cbc + - backup: True + {% if salt['file.file_exists']('/etc/pki/ca.key') -%} + - prereq: + - x509: /etc/pki/ca.crt + {%- endif %} /etc/pki/ca.crt: x509.certificate_managed: @@ -32,22 +36,18 @@ pki_private_key: - days_valid: 3650 - days_remaining: 0 - backup: True - - managed_private_key: - name: /etc/pki/ca.key - bits: 4096 - backup: True - require: - file: /etc/pki -send_x509_pem_entries_to_mine: +x509_pem_entries: module.run: - mine.send: - - func: x509.get_pem_entries - - glob_path: /etc/pki/ca.crt + name: x509.get_pem_entries + glob_path: /etc/pki/ca.crt cakeyperms: file.managed: - replace: False - name: /etc/pki/ca.key - mode: 640 - - group: 939 + - group: 939 \ No newline at end of file diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index efa3032dc..d76ebcb57 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -37,6 +37,19 @@ m2cryptopkgs: - python-m2crypto {% endif %} +/etc/pki/influxdb.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/influxdb.key') -%} + - prereq: + - x509: /etc/pki/influxdb.crt + {%- endif %} + # Create a cert for the talking to influxdb /etc/pki/influxdb.crt: x509.certificate_managed: @@ -47,10 +60,10 @@ m2cryptopkgs: - days_remaining: 0 - days_valid: 820 - backup: True - - managed_private_key: - name: /etc/pki/influxdb.key - bits: 4096 - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/influxdb.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' influxkeyperms: file.managed: @@ -61,6 +74,19 @@ influxkeyperms: {% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone'] %} +/etc/pki/filebeat.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/filebeat.key') -%} + - prereq: + - x509: /etc/pki/filebeat.crt + {%- endif %} + # Request a cert and drop it where it needs to go to be distributed /etc/pki/filebeat.crt: x509.certificate_managed: @@ -75,13 +101,14 @@ influxkeyperms: - days_remaining: 0 - days_valid: 820 - backup: True - - managed_private_key: - name: /etc/pki/filebeat.key - bits: 4096 - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/filebeat.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' cmd.run: - name: "/usr/bin/openssl pkcs8 -in /etc/pki/filebeat.key -topk8 -out /etc/pki/filebeat.p8 -nocrypt" + fbperms: file.managed: - replace: False @@ -113,6 +140,19 @@ fbcrtlink: - name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.crt - target: /etc/pki/filebeat.crt +/etc/pki/registry.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/registry.key') -%} + - prereq: + - x509: /etc/pki/registry.crt + {%- endif %} + # Create a cert for the docker registry /etc/pki/registry.crt: x509.certificate_managed: @@ -123,10 +163,10 @@ fbcrtlink: - days_remaining: 0 - days_valid: 820 - backup: True - - managed_private_key: - name: /etc/pki/registry.key - bits: 4096 - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/registry.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' regkeyperms: file.managed: @@ -135,6 +175,19 @@ regkeyperms: - mode: 640 - group: 939 +/etc/pki/managerssl.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/managerssl.key') -%} + - prereq: + - x509: /etc/pki/managerssl.crt + {%- endif %} + # Create a cert for the reverse proxy /etc/pki/managerssl.crt: x509.certificate_managed: @@ -146,10 +199,10 @@ regkeyperms: - days_remaining: 0 - days_valid: 820 - backup: True - - managed_private_key: - name: /etc/pki/managerssl.key - bits: 4096 - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/managerssl.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' msslkeyperms: file.managed: @@ -166,6 +219,11 @@ msslkeyperms: - days_remaining: 0 - days_valid: 820 - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/fleet.key') -%} + - prereq: + - x509: /etc/pki/fleet.crt + {%- endif %} /etc/pki/fleet.crt: x509.certificate_managed: @@ -175,10 +233,10 @@ msslkeyperms: - days_remaining: 0 - days_valid: 820 - backup: True - - managed_private_key: - name: /etc/pki/fleet.key - bits: 4096 - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/managerssl.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' fleetkeyperms: file.managed: @@ -195,6 +253,19 @@ fbcertdir: - name: /opt/so/conf/filebeat/etc/pki - makedirs: True +/etc/pki/filebeat.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/filebeat.key') -%} + - prereq: + - x509: /etc/pki/filebeat.crt + {%- endif %} + # Request a cert and drop it where it needs to go to be distributed /opt/so/conf/filebeat/etc/pki/filebeat.crt: x509.certificate_managed: @@ -209,10 +280,10 @@ fbcertdir: - days_remaining: 0 - days_valid: 820 - backup: True - - managed_private_key: - name: /opt/so/conf/filebeat/etc/pki/filebeat.key - bits: 4096 - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/filebeat.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' # Convert the key to pkcs#8 so logstash will work correctly. filebeatpkcs: @@ -238,6 +309,19 @@ chownfilebeatp8: {% if grains['role'] == 'so-fleet' %} +/etc/pki/managerssl.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/managerssl.key') -%} + - prereq: + - x509: /etc/pki/managerssl.crt + {%- endif %} + # Create a cert for the reverse proxy /etc/pki/managerssl.crt: x509.certificate_managed: @@ -249,10 +333,10 @@ chownfilebeatp8: - days_remaining: 0 - days_valid: 820 - backup: True - - managed_private_key: - name: /etc/pki/managerssl.key - bits: 4096 - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/managerssl.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' msslkeyperms: file.managed: @@ -264,11 +348,16 @@ msslkeyperms: # Create a private key and cert for Fleet /etc/pki/fleet.key: x509.private_key_managed: - - CN: {{ HOSTNAME }} + - CN: {{ manager }} - bits: 4096 - days_remaining: 0 - days_valid: 820 - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/fleet.key') -%} + - prereq: + - x509: /etc/pki/fleet.crt + {%- endif %} /etc/pki/fleet.crt: x509.certificate_managed: @@ -278,10 +367,10 @@ msslkeyperms: - days_remaining: 0 - days_valid: 820 - backup: True - - managed_private_key: - name: /etc/pki/fleet.key - bits: 4096 - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/managerssl.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' fleetkeyperms: file.managed: From e811718ebcb584753d692944dd005d00f71b6526 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 27 Jul 2020 17:53:02 -0400 Subject: [PATCH 019/376] change to salt 3001.1, fix dupe state name, add git branch option to soup --- pillar/salt/master.sls | 2 +- pillar/salt/minion.sls | 2 +- salt/common/tools/sbin/soup | 16 +++++++++++----- salt/ssl/init.sls | 6 +++--- 4 files changed, 16 insertions(+), 10 deletions(-) diff --git a/pillar/salt/master.sls b/pillar/salt/master.sls index a34a96b9e..531f0ddb4 100644 --- a/pillar/salt/master.sls +++ b/pillar/salt/master.sls @@ -1,4 +1,4 @@ #version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched salt: master: - version: 3001 \ No newline at end of file + version: 3001.1 \ No newline at end of file diff --git a/pillar/salt/minion.sls b/pillar/salt/minion.sls index 4978a4a73..6abec03f5 100644 --- a/pillar/salt/minion.sls +++ b/pillar/salt/minion.sls @@ -1,4 +1,4 @@ #version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched salt: minion: - version: 3001 \ No newline at end of file + version: 3001.1 \ No newline at end of file diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index ca4bc518b..280a9abb1 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -20,6 +20,7 @@ UPDATE_DIR=/tmp/sogh/securityonion INSTALLEDVERSION=$(cat /etc/soversion) INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'}) default_salt_dir=/opt/so/saltstack/default +SOUP_BRANCH=$1 manager_check() { # Check to see if this is a manager @@ -44,11 +45,11 @@ clone_to_tmp() { # Make a temp location for the files mkdir -p /tmp/sogh cd /tmp/sogh - SOUP_BRANCH="" - if [ -n "$BRANCH" ]; then - SOUP_BRANCH="-b $BRANCH" + if [ -n "$SOUP_BRANCH" ]; then + git clone -b $SOUP_BRANCH https://github.com/Security-Onion-Solutions/securityonion.git + else + git clone https://github.com/Security-Onion-Solutions/securityonion.git fi - git clone $SOUP_BRANCH https://github.com/Security-Onion-Solutions/securityonion.git cd /tmp if [ ! -f $UPDATE_DIR/VERSION ]; then echo "Update was unable to pull from github. Please check your internet." @@ -151,7 +152,12 @@ update_version() { upgrade_check() { # Let's make sure we actually need to update. - NEWVERSION=$(cat $UPDATE_DIR/VERSION) + if [ -n "$SOUP_BRANCH" ]; then + NEWVERSION="$SOUP_BRANCH" + else + NEWVERSION=$(cat $UPDATE_DIR/VERSION) + fi + if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then echo "You are already running the latest version of Security Onion." exit 0 diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index d76ebcb57..a382a4ed2 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -253,7 +253,7 @@ fbcertdir: - name: /opt/so/conf/filebeat/etc/pki - makedirs: True -/etc/pki/filebeat.key: +/opt/so/conf/filebeat/etc/pki/filebeat.key: x509.private_key_managed: - CN: {{ manager }} - bits: 4096 @@ -261,9 +261,9 @@ fbcertdir: - days_valid: 820 - backup: True - new: True - {% if salt['file.file_exists']('/etc/pki/filebeat.key') -%} + {% if salt['file.file_exists']('/opt/so/conf/filebeat/etc/pki/filebeat.key') -%} - prereq: - - x509: /etc/pki/filebeat.crt + - x509: /opt/so/conf/filebeat/etc/pki/filebeat.crt {%- endif %} # Request a cert and drop it where it needs to go to be distributed From f42a39ca690cce6d13e675104c3851f379edc1f8 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 27 Jul 2020 18:08:27 -0400 Subject: [PATCH 020/376] allow soup to continue update if branch is specified --- salt/common/tools/sbin/soup | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 280a9abb1..d4ec9c0ab 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -152,13 +152,8 @@ update_version() { upgrade_check() { # Let's make sure we actually need to update. - if [ -n "$SOUP_BRANCH" ]; then - NEWVERSION="$SOUP_BRANCH" - else - NEWVERSION=$(cat $UPDATE_DIR/VERSION) - fi - - if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then + NEWVERSION=$(cat $UPDATE_DIR/VERSION) + if [ "$INSTALLEDVERSION" == "$NEWVERSION" ] && [ -z "$SOUP_BRANCH" ]; then echo "You are already running the latest version of Security Onion." exit 0 fi @@ -209,6 +204,7 @@ echo "" echo "Verifying we have the latest script" verify_latest_update_script echo "" + echo "Let's see if we need to update" upgrade_check From 254dcdb2f0e6af9a120a22e2c0030f9921008d48 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 27 Jul 2020 18:19:26 -0400 Subject: [PATCH 021/376] prevent dockers from redownloading if we are updating soup to a branch --- pillar/salt/master.sls | 2 +- pillar/salt/minion.sls | 2 +- salt/common/tools/sbin/soup | 14 +++++++++----- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/pillar/salt/master.sls b/pillar/salt/master.sls index 531f0ddb4..a34a96b9e 100644 --- a/pillar/salt/master.sls +++ b/pillar/salt/master.sls @@ -1,4 +1,4 @@ #version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched salt: master: - version: 3001.1 \ No newline at end of file + version: 3001 \ No newline at end of file diff --git a/pillar/salt/minion.sls b/pillar/salt/minion.sls index 6abec03f5..4978a4a73 100644 --- a/pillar/salt/minion.sls +++ b/pillar/salt/minion.sls @@ -1,4 +1,4 @@ #version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched salt: minion: - version: 3001.1 \ No newline at end of file + version: 3001 \ No newline at end of file diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index d4ec9c0ab..1a7d947dd 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -225,11 +225,15 @@ upgrade_check_salt echo "Making pillar changes" pillar_changes echo "" -echo "Cleaning up old dockers" -clean_dockers -echo "" -echo "Updating docker to $NEWVERSION" -update_dockers + +if [ "$INSTALLEDVERSION" != "$NEWVERSION" ]; then + echo "Cleaning up old dockers" + clean_dockers + echo "" + echo "Updating docker to $NEWVERSION" + update_dockers +fi + echo "" echo "Copying new code" copy_new_files From fb453a0d9c8899da2c910c8c762878a6e8e822b6 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 28 Jul 2020 08:13:03 -0400 Subject: [PATCH 022/376] change sed delimiters in soup --- salt/common/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 1a7d947dd..a2fb0e5eb 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -147,7 +147,7 @@ update_version() { # Update the version to the latest echo "Updating the version file." echo $NEWVERSION > /etc/soversion - sed -i "s/$INSTALLEDVERSION/$NEWVERSION/g" /opt/so/saltstack/local/pillar/static.sls + sed -i "s|$INSTALLEDVERSION|$NEWVERSION|g" /opt/so/saltstack/local/pillar/static.sls } upgrade_check() { From 55e60cb74919c6eb886a949fcaa54fb3aee775d8 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Tue, 28 Jul 2020 11:03:33 -0400 Subject: [PATCH 023/376] initial refactor - beats/sysmon parsing --- salt/elasticsearch/files/ingest/beats.common | 49 +---------------- salt/elasticsearch/files/ingest/sysmon | 52 +++++++++++++++++++ salt/elasticsearch/files/ingest/win.eventlogs | 13 +++++ 3 files changed, 67 insertions(+), 47 deletions(-) create mode 100644 salt/elasticsearch/files/ingest/sysmon create mode 100644 salt/elasticsearch/files/ingest/win.eventlogs diff --git a/salt/elasticsearch/files/ingest/beats.common b/salt/elasticsearch/files/ingest/beats.common index cafbc9e94..4e358582e 100644 --- a/salt/elasticsearch/files/ingest/beats.common +++ b/salt/elasticsearch/files/ingest/beats.common @@ -1,53 +1,8 @@ { "description" : "beats.common", "processors" : [ - {"community_id": {"if": "ctx.winlog.event_data?.Protocol != null", "field":["winlog.event_data.SourceIp","winlog.event_data.SourcePort","winlog.event_data.DestinationIp","winlog.event_data.DestinationPort","winlog.event_data.Protocol"],"target_field":"network.community_id"}}, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "field": "event.module", "value": "sysmon", "override": true } }, - { "set": { "if": "ctx.winlog?.channel != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } }, - { "set": { "if": "ctx.agent?.type != null", "field": "module", "value": "{{agent.type}}", "override": true } }, - { "set": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "field": "event.dataset", "value": "{{winlog.channel}}", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.category", "value": "host,process,network", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.category", "value": "host,process", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.dataset", "value": "process_creation", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 2", "field": "event.dataset", "value": "process_changed_file", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.dataset", "value": "network_connection", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 5", "field": "event.dataset", "value": "process_terminated", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 6", "field": "event.dataset", "value": "driver_loaded", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 7", "field": "event.dataset", "value": "image_loaded", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 8", "field": "event.dataset", "value": "create_remote_thread", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 9", "field": "event.dataset", "value": "raw_file_access_read", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 10", "field": "event.dataset", "value": "process_access", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 11", "field": "event.dataset", "value": "file_create", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 12", "field": "event.dataset", "value": "registry_create_delete", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 13", "field": "event.dataset", "value": "registry_value_set", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 14", "field": "event.dataset", "value": "registry_key_value_rename", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 15", "field": "event.dataset", "value": "file_create_stream_hash", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 16", "field": "event.dataset", "value": "config_change", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 22", "field": "event.dataset", "value": "dns_query", "override": true } }, - { "rename": { "field": "agent.hostname", "target_field": "agent.name", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.DestinationHostname", "target_field": "destination.hostname", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.DestinationIp", "target_field": "destination.ip", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.DestinationPort", "target_field": "destination.port", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.Image", "target_field": "process.executable", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.ProcessID", "target_field": "process.pid", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.ProcessGuid", "target_field": "process.entity_id", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.CommandLine", "target_field": "process.command_line", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.CurrentDirectory", "target_field": "process.working_directory", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.Description", "target_field": "process.pe.description", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.Product", "target_field": "process.pe.product", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.OriginalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.FileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.ParentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.ParentImage", "target_field": "process.parent.executable", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.ParentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.ParentProcessId", "target_field": "process.ppid", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.Protocol", "target_field": "network.transport", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.SourceHostname", "target_field": "source.hostname", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.SourceIp", "target_field": "source.ip", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.SourcePort", "target_field": "source.port", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.targetFilename", "target_field": "file.target", "ignore_missing": true } }, + { "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } }, + { "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } }, { "pipeline": { "name": "common" } } ] } \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/sysmon b/salt/elasticsearch/files/ingest/sysmon new file mode 100644 index 000000000..de6112d89 --- /dev/null +++ b/salt/elasticsearch/files/ingest/sysmon @@ -0,0 +1,52 @@ +{ + "description" : "sysmon", + "processors" : [ + {"community_id": {"if": "ctx.winlog.event_data?.Protocol != null", "field":["winlog.event_data.SourceIp","winlog.event_data.SourcePort","winlog.event_data.DestinationIp","winlog.event_data.DestinationPort","winlog.event_data.Protocol"],"target_field":"network.community_id"}}, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "field": "event.module", "value": "sysmon", "override": true } }, + { "set": { "if": "ctx.winlog?.channel != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } }, + { "set": { "if": "ctx.agent?.type != null", "field": "module", "value": "{{agent.type}}", "override": true } }, + { "set": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "field": "event.dataset", "value": "{{winlog.channel}}", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.category", "value": "host,process,network", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.category", "value": "host,process", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.dataset", "value": "process_creation", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 2", "field": "event.dataset", "value": "process_changed_file", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.dataset", "value": "network_connection", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 5", "field": "event.dataset", "value": "process_terminated", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 6", "field": "event.dataset", "value": "driver_loaded", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 7", "field": "event.dataset", "value": "image_loaded", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 8", "field": "event.dataset", "value": "create_remote_thread", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 9", "field": "event.dataset", "value": "raw_file_access_read", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 10", "field": "event.dataset", "value": "process_access", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 11", "field": "event.dataset", "value": "file_create", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 12", "field": "event.dataset", "value": "registry_create_delete", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 13", "field": "event.dataset", "value": "registry_value_set", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 14", "field": "event.dataset", "value": "registry_key_value_rename", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 15", "field": "event.dataset", "value": "file_create_stream_hash", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 16", "field": "event.dataset", "value": "config_change", "override": true } }, + { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 22", "field": "event.dataset", "value": "dns_query", "override": true } }, + { "rename": { "field": "agent.hostname", "target_field": "agent.name", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.DestinationHostname", "target_field": "destination.hostname", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.DestinationIp", "target_field": "destination.ip", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.DestinationPort", "target_field": "destination.port", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.Image", "target_field": "process.executable", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.ProcessID", "target_field": "process.pid", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.ProcessGuid", "target_field": "process.entity_id", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.CommandLine", "target_field": "process.command_line", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.CurrentDirectory", "target_field": "process.working_directory", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.Description", "target_field": "process.pe.description", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.Product", "target_field": "process.pe.product", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.OriginalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.FileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.ParentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.ParentImage", "target_field": "process.parent.executable", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.ParentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.ParentProcessId", "target_field": "process.ppid", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.Protocol", "target_field": "network.transport", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.SourceHostname", "target_field": "source.hostname", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.SourceIp", "target_field": "source.ip", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.SourcePort", "target_field": "source.port", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.targetFilename", "target_field": "file.target", "ignore_missing": true } } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/win.eventlogs b/salt/elasticsearch/files/ingest/win.eventlogs new file mode 100644 index 000000000..acdf97263 --- /dev/null +++ b/salt/elasticsearch/files/ingest/win.eventlogs @@ -0,0 +1,13 @@ +{ + "description" : "win.eventlogs", + "processors" : [ + + { "set": { "if": "ctx.winlog?.channel != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } }, + { "set": { "if": "ctx.agent?.type != null", "field": "module", "value": "{{agent.type}}", "override": true } }, + { "set": { "if": "ctx.winlog?.channel != null", "field": "event.dataset", "value": "{{winlog.channel}}", "override": true } }, + { "rename": { "field": "agent.hostname", "target_field": "agent.name", "ignore_missing": true } }, + + { "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } }, + ] +} \ No newline at end of file From 73a1a0540451a3eb2379e12946de17f0f5387252 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 28 Jul 2020 13:11:38 -0400 Subject: [PATCH 024/376] change back sed delimiters, last highstate log level to info --- salt/common/tools/sbin/soup | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index a2fb0e5eb..28aef1e6e 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +exec 3>&1 4>&2 +trap 'exec 2>&4 1>&3' 0 1 2 3 +exec 1>/opt/so/logs/soup.log 2>&1 + . /usr/sbin/so-common UPDATE_DIR=/tmp/sogh/securityonion INSTALLEDVERSION=$(cat /etc/soversion) @@ -70,7 +74,7 @@ copy_new_files() { highstate() { # Run a highstate but first cancel a running one. salt-call saltutil.kill_all_jobs - salt-call state.highstate + salt-call state.highstate -l info } pillar_changes() { @@ -147,7 +151,7 @@ update_version() { # Update the version to the latest echo "Updating the version file." echo $NEWVERSION > /etc/soversion - sed -i "s|$INSTALLEDVERSION|$NEWVERSION|g" /opt/so/saltstack/local/pillar/static.sls + sed -i "s/$INSTALLEDVERSION/$NEWVERSION/g" /opt/so/saltstack/local/pillar/static.sls } upgrade_check() { From d8375cce14e13c7a43a324e09805bbe70e27d602 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 28 Jul 2020 13:15:47 -0400 Subject: [PATCH 025/376] touch soup log --- salt/common/tools/sbin/soup | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 28aef1e6e..2af479547 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -15,6 +15,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +touch /opt/so/logs/soup.log exec 3>&1 4>&2 trap 'exec 2>&4 1>&3' 0 1 2 3 exec 1>/opt/so/logs/soup.log 2>&1 From 77acb8f34829bef151b74ecef26889319fc1d071 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 28 Jul 2020 13:20:01 -0400 Subject: [PATCH 026/376] change ot /opt/so/log --- salt/common/tools/sbin/soup | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 2af479547..079b3fe72 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -15,10 +15,9 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -touch /opt/so/logs/soup.log exec 3>&1 4>&2 trap 'exec 2>&4 1>&3' 0 1 2 3 -exec 1>/opt/so/logs/soup.log 2>&1 +exec 1>/opt/so/log/soup.log 2>&1 . /usr/sbin/so-common UPDATE_DIR=/tmp/sogh/securityonion From 2067cc118fd7bc7d17ab312eade5dee695da36d2 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 28 Jul 2020 13:25:43 -0400 Subject: [PATCH 027/376] remove broken logging --- salt/common/tools/sbin/soup | 4 ---- 1 file changed, 4 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 079b3fe72..a50359e8d 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -15,10 +15,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -exec 3>&1 4>&2 -trap 'exec 2>&4 1>&3' 0 1 2 3 -exec 1>/opt/so/log/soup.log 2>&1 - . /usr/sbin/so-common UPDATE_DIR=/tmp/sogh/securityonion INSTALLEDVERSION=$(cat /etc/soversion) From 307945e2601ecdd242aa28e8f11a84c5e9f1366a Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 28 Jul 2020 13:51:28 -0400 Subject: [PATCH 028/376] dont state salt-minion service, allow salt state to start it during highstate --- salt/common/tools/sbin/soup | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index a50359e8d..3a03a43d7 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -245,10 +245,10 @@ update_version echo "" echo "Starting Salt Master service" systemctl start salt-master -echo "" -echo "Starting Salt Minion service" -systemctl start salt-minion -echo "" +#echo "" +#echo "Starting Salt Minion service" +#systemctl start salt-minion +#echo "" echo "" From c00b452f8d8e21fc9128ac4bfdc1b62008c915c4 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 28 Jul 2020 15:10:16 -0400 Subject: [PATCH 029/376] change module.run for ca state --- salt/ca/init.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/ca/init.sls b/salt/ca/init.sls index 0f7a9cbee..dcec40d9a 100644 --- a/salt/ca/init.sls +++ b/salt/ca/init.sls @@ -42,8 +42,8 @@ pki_private_key: x509_pem_entries: module.run: - mine.send: - name: x509.get_pem_entries - glob_path: /etc/pki/ca.crt + - name: x509.get_pem_entries + - glob_path: /etc/pki/ca.crt cakeyperms: file.managed: From b1c09a9b72ffb5b3a88beb42da1e9fd94dcdb8b6 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Tue, 28 Jul 2020 15:23:17 -0400 Subject: [PATCH 030/376] Typo fix - ingest parser - win.eventlogs --- salt/elasticsearch/files/ingest/win.eventlogs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/salt/elasticsearch/files/ingest/win.eventlogs b/salt/elasticsearch/files/ingest/win.eventlogs index acdf97263..b6022f294 100644 --- a/salt/elasticsearch/files/ingest/win.eventlogs +++ b/salt/elasticsearch/files/ingest/win.eventlogs @@ -1,13 +1,11 @@ { "description" : "win.eventlogs", "processors" : [ - { "set": { "if": "ctx.winlog?.channel != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } }, { "set": { "if": "ctx.agent?.type != null", "field": "module", "value": "{{agent.type}}", "override": true } }, { "set": { "if": "ctx.winlog?.channel != null", "field": "event.dataset", "value": "{{winlog.channel}}", "override": true } }, { "rename": { "field": "agent.hostname", "target_field": "agent.name", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } } ] -} \ No newline at end of file +} From 4d5c8e5c2bd944f8f75196b1a1badd54967d92ac Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 28 Jul 2020 16:22:42 -0400 Subject: [PATCH 031/376] add salt minion state to install/upgrade salt-minion --- salt/salt/minion.sls | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index c95ff10e8..3a00ae661 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -1 +1,6 @@ -#Future state for Salt minions \ No newline at end of file +#Future state for Salt minions +{% set saltversion = salt['pillar.get']('salt:minion:version') %} + +install_salt_minion: + cmd.run: + - name: yum versionlock delete "salt-*" && sh bootstrap-salt.sh -F -x python3 stable {{ saltversion }} && yum versionlock add "salt-*" \ No newline at end of file From bfae439c9019ea30b6ba12c49fe6108c65f88aec Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 28 Jul 2020 16:37:14 -0400 Subject: [PATCH 032/376] salt state distribute bootstrap script --- salt/salt/init.sls | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/salt/init.sls b/salt/salt/init.sls index a11246cbb..ca08aab78 100644 --- a/salt/salt/init.sls +++ b/salt/salt/init.sls @@ -8,6 +8,11 @@ saltpymodules: - python-m2crypto {% endif %} +salt_bootstrap: + file.managed: + - name: /usr/sbin/bootstrap-salt.sh + - source: salt://salt/scripts/bootstrap-salt.sh + - mode: 755 salt_minion_service: service.running: From 8905869db221aab18c730033d4731fb6cf676ff1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 28 Jul 2020 16:58:44 -0400 Subject: [PATCH 033/376] move salt pillars to defaults --- salt/common/tools/sbin/soup | 2 +- pillar/salt/master.sls => salt/salt/master.defaults.yaml | 0 pillar/salt/minion.sls => salt/salt/minion.defaults.yaml | 0 salt/salt/minion.sls | 4 +++- 4 files changed, 4 insertions(+), 2 deletions(-) rename pillar/salt/master.sls => salt/salt/master.defaults.yaml (100%) rename pillar/salt/minion.sls => salt/salt/minion.defaults.yaml (100%) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 3a03a43d7..76e37c062 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -160,7 +160,7 @@ upgrade_check() { } upgrade_check_salt() { - NEWSALTVERSION=$(grep version: $UPDATE_DIR/pillar/salt/master.sls | awk {'print $2'}) + NEWSALTVERSION=$(grep version: $UPDATE_DIR/salt/salt/master.defaults.yaml | awk {'print $2'}) if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then echo "You are already running the correct version of Salt for Security Onion." else diff --git a/pillar/salt/master.sls b/salt/salt/master.defaults.yaml similarity index 100% rename from pillar/salt/master.sls rename to salt/salt/master.defaults.yaml diff --git a/pillar/salt/minion.sls b/salt/salt/minion.defaults.yaml similarity index 100% rename from pillar/salt/minion.sls rename to salt/salt/minion.defaults.yaml diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index 3a00ae661..f2fd029a4 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -1,5 +1,7 @@ #Future state for Salt minions -{% set saltversion = salt['pillar.get']('salt:minion:version') %} +{% from 'salt/minion.defaults.yaml' import salt %} +{% set saltversion = salt.salt.minion.version %} + install_salt_minion: cmd.run: From f056a0a17b96b290ee8c313cf4b2de87777a8bbf Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 28 Jul 2020 17:09:53 -0400 Subject: [PATCH 034/376] use import_yaml --- salt/salt/minion.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index f2fd029a4..4364e2612 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -1,5 +1,5 @@ #Future state for Salt minions -{% from 'salt/minion.defaults.yaml' import salt %} +{% import_yaml 'salt/minion.defaults.yaml' as salt %} {% set saltversion = salt.salt.minion.version %} From e7b9e001e1ed485c1a88dfe06066aab1b9ab3468 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Tue, 28 Jul 2020 22:08:00 -0400 Subject: [PATCH 035/376] mysql init.sls - change startup time from 2 min to 15min Closes https://github.com/Security-Onion-Solutions/securityonion/issues/1106 --- salt/mysql/init.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/mysql/init.sls b/salt/mysql/init.sls index c4caa5fcd..78240fe2f 100644 --- a/salt/mysql/init.sls +++ b/salt/mysql/init.sls @@ -89,7 +89,7 @@ so-mysql: - /opt/so/conf/mysql/etc cmd.run: - name: until nc -z {{ MAINIP }} 3306; do sleep 1; done - - timeout: 120 + - timeout: 900 - onchanges: - docker_container: so-mysql -{% endif %} \ No newline at end of file +{% endif %} From 7d432091e2d8e7a68c1fe14a043bff2b6bcafba8 Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 29 Jul 2020 08:35:07 -0400 Subject: [PATCH 036/376] Remove LS syslog port binding --- pillar/logstash/init.sls | 1 - 1 file changed, 1 deletion(-) diff --git a/pillar/logstash/init.sls b/pillar/logstash/init.sls index 6d51d0471..c2dfd9cfd 100644 --- a/pillar/logstash/init.sls +++ b/pillar/logstash/init.sls @@ -1,7 +1,6 @@ logstash: docker_options: port_bindings: - - 0.0.0.0:514:514 - 0.0.0.0:5044:5044 - 0.0.0.0:5644:5644 - 0.0.0.0:6050:6050 From e3da326fcb5a03791d0d1eb5d34e2a6a2fe4aa43 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 29 Jul 2020 09:27:18 -0400 Subject: [PATCH 037/376] Remove non used pillar items --- setup/so-functions | 3 --- 1 file changed, 3 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index c955c5f8b..5d5c9f585 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1510,9 +1510,6 @@ sensor_pillar() { echo " suriprocs: $BASICSURI" >> "$pillar_file" fi printf '%s\n'\ - " zeekbpf:"\ - " pcapbpf:"\ - " nidsbpf:"\ " manager: $MSRV"\ " mtu: $MTU"\ " uniqueid: $(date '+%s')" >> "$pillar_file" From b67e3507d3371296842031325d393c74f5648843 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 29 Jul 2020 10:13:30 -0400 Subject: [PATCH 038/376] always update and clean dockers --- salt/common/tools/sbin/soup | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 76e37c062..24a8e1278 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -226,13 +226,11 @@ echo "Making pillar changes" pillar_changes echo "" -if [ "$INSTALLEDVERSION" != "$NEWVERSION" ]; then - echo "Cleaning up old dockers" - clean_dockers - echo "" - echo "Updating docker to $NEWVERSION" - update_dockers -fi +echo "Cleaning up old dockers" +clean_dockers +echo "" +echo "Updating docker to $NEWVERSION" +update_dockers echo "" echo "Copying new code" From dca3855f81cf1ca76480b2b75ac5a879603ee741 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 29 Jul 2020 10:50:11 -0400 Subject: [PATCH 039/376] remove always update if branch specified --- salt/common/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 24a8e1278..fc67a2157 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -153,7 +153,7 @@ update_version() { upgrade_check() { # Let's make sure we actually need to update. NEWVERSION=$(cat $UPDATE_DIR/VERSION) - if [ "$INSTALLEDVERSION" == "$NEWVERSION" ] && [ -z "$SOUP_BRANCH" ]; then + if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then echo "You are already running the latest version of Security Onion." exit 0 fi From 9b29dff04f782c97ab87d95797c94819256d3cec Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 29 Jul 2020 11:40:45 -0400 Subject: [PATCH 040/376] only generate p8 files if the key used for genetation changes --- salt/ssl/init.sls | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index a382a4ed2..82dbb3a7b 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -107,6 +107,8 @@ influxkeyperms: - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/filebeat.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' cmd.run: - name: "/usr/bin/openssl pkcs8 -in /etc/pki/filebeat.key -topk8 -out /etc/pki/filebeat.p8 -nocrypt" + - onchanges: + - x509: /etc/pki/filebeat.key fbperms: @@ -289,6 +291,8 @@ fbcertdir: filebeatpkcs: cmd.run: - name: "/usr/bin/openssl pkcs8 -in /opt/so/conf/filebeat/etc/pki/filebeat.key -topk8 -out /opt/so/conf/filebeat/etc/pki/filebeat.p8 -passout pass:" + - onchanges: + - x509: /opt/so/conf/filebeat/etc/pki/filebeat.key filebeatkeyperms: file.managed: From b9d0bd86ca18635d4f1175867039171982666ab6 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 29 Jul 2020 13:27:06 -0400 Subject: [PATCH 041/376] fbkeylink and fbcertlink owned by socore:socore --- salt/ssl/init.sls | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 82dbb3a7b..3430fedef 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -136,11 +136,15 @@ fbkeylink: file.symlink: - name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.p8 - target: /etc/pki/filebeat.p8 + - user: socore + - group: socore fbcrtlink: file.symlink: - name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.crt - target: /etc/pki/filebeat.crt + - user: socore + - group: socore /etc/pki/registry.key: x509.private_key_managed: From 0de6e86cdb8131c471297cbe04e2355b4ac97e17 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 29 Jul 2020 13:39:55 -0400 Subject: [PATCH 042/376] dont run booststrap-salt if the proper version is installed --- salt/salt/minion.sls | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index 4364e2612..10512134c 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -5,4 +5,9 @@ install_salt_minion: cmd.run: - - name: yum versionlock delete "salt-*" && sh bootstrap-salt.sh -F -x python3 stable {{ saltversion }} && yum versionlock add "salt-*" \ No newline at end of file + {% if grains.saltversion != saltversion %} + - name: yum versionlock delete "salt-*" && sh bootstrap-salt.sh -F -x python3 stable {{ saltversion }} && yum versionlock add "salt-*" + {% else %} + - name: echo 'Already running Salt Minon version {{ saltversion }}' + {% endif %} + \ No newline at end of file From 9db390023be344edd44b17f29cebe39163a4d57a Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 29 Jul 2020 13:51:46 -0400 Subject: [PATCH 043/376] Increase timeout from 10s to 30s --- salt/wazuh/files/agent/wazuh-register-agent | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/wazuh/files/agent/wazuh-register-agent b/salt/wazuh/files/agent/wazuh-register-agent index f2fd8693f..bed0ba57f 100755 --- a/salt/wazuh/files/agent/wazuh-register-agent +++ b/salt/wazuh/files/agent/wazuh-register-agent @@ -135,7 +135,7 @@ shift $(($OPTIND - 1)) # fi # Default action -> try to register the agent -sleep 10s +sleep 30s STATUS=$(curl -s -k -u $USER:$PASSWORD $PROTOCOL://$API_IP:$API_PORT/agents/$AGENT_ID | jq .data.status | sed s'/"//g') if [[ $STATUS == "Active" ]]; then echo "Agent $AGENT_ID already registered!" From c48ba8abaf2b22483f6e5368d35c731c70188d13 Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 29 Jul 2020 13:52:12 -0400 Subject: [PATCH 044/376] Re-arrange config --- salt/wazuh/init.sls | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/salt/wazuh/init.sls b/salt/wazuh/init.sls index 314a5f47f..2695febd5 100644 --- a/salt/wazuh/init.sls +++ b/salt/wazuh/init.sls @@ -46,13 +46,6 @@ wazuhpkgs: - hold: True - update_holds: True -wazuhdir: - file.directory: - - name: /nsm/wazuh - - user: 945 - - group: 945 - - makedirs: True - # Add Wazuh agent conf wazuhagentconf: file.managed: @@ -62,6 +55,13 @@ wazuhagentconf: - group: 945 - template: jinja +wazuhdir: + file.directory: + - name: /nsm/wazuh + - user: 945 + - group: 945 + - makedirs: True + # Wazuh agent registration script wazuhagentregister: file.managed: From b8c06538184dbb2b76eea043719c0316d99d03cc Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 29 Jul 2020 14:18:11 -0400 Subject: [PATCH 045/376] soup upgrade salt on minions - add batch size option --- salt/common/tools/sbin/soup | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index fc67a2157..e7561ecaa 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -21,6 +21,7 @@ INSTALLEDVERSION=$(cat /etc/soversion) INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'}) default_salt_dir=/opt/so/saltstack/default SOUP_BRANCH=$1 +BATCHSIZE = 5 manager_check() { # Check to see if this is a manager @@ -196,6 +197,20 @@ verify_latest_update_script() { fi } +while getopts ":b" opt; do + case ${opt} in + b ) # process option b + if [[ $OPTARG =~ ^?[0-9]+$ ]] && [[ $OPTARG -gt 0 ]]; then + BATCHSIZE = $OPTARG + else + echo "Batch size must be a number greater than 0" + fi + ;; + \? ) echo "Usage: cmd [-b]" + ;; + esac +done + echo "Checking to see if this is a manager" manager_check echo "Cloning latest code to a temporary location" @@ -254,3 +269,7 @@ echo "Running a highstate to complete upgrade" highstate echo "" echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete." +echo "" +echo "Upgrading the remaining Security Onion nodes from $INSTALLEDSALTVERSION to $NEWSALTVERSION" +salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion +echo "" From c9498452186d645d5ad6afec0ee32623106d0d76 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 29 Jul 2020 14:20:17 -0400 Subject: [PATCH 046/376] only try to upgrade salt on grid if salt upgraded on manager --- salt/common/tools/sbin/soup | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index e7561ecaa..05d934999 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -165,6 +165,7 @@ upgrade_check_salt() { if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then echo "You are already running the correct version of Salt for Security Onion." else + SALTUPGRADED = True echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION" echo "" # If CentOS @@ -269,7 +270,10 @@ echo "Running a highstate to complete upgrade" highstate echo "" echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete." -echo "" -echo "Upgrading the remaining Security Onion nodes from $INSTALLEDSALTVERSION to $NEWSALTVERSION" -salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion -echo "" + +if [ SALTUPGRADED ]; then + echo "" + echo "Upgrading the remaining Security Onion nodes from $INSTALLEDSALTVERSION to $NEWSALTVERSION" + salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion + echo "" +fi From 8a44d4752b0dd95b1438e8f280bda4c03b10cdc7 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 29 Jul 2020 14:26:57 -0400 Subject: [PATCH 047/376] fix var def --- salt/common/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 05d934999..f2cc034b6 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -21,7 +21,7 @@ INSTALLEDVERSION=$(cat /etc/soversion) INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'}) default_salt_dir=/opt/so/saltstack/default SOUP_BRANCH=$1 -BATCHSIZE = 5 +BATCHSIZE=5 manager_check() { # Check to see if this is a manager From 171aa1178a1a9ef2f9b858dd534fbe3a32b76dff Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 29 Jul 2020 14:36:42 -0400 Subject: [PATCH 048/376] fix vars and if statement --- salt/common/tools/sbin/soup | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index f2cc034b6..851f62363 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -165,7 +165,7 @@ upgrade_check_salt() { if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then echo "You are already running the correct version of Salt for Security Onion." else - SALTUPGRADED = True + SALTUPGRADED=True echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION" echo "" # If CentOS @@ -202,7 +202,7 @@ while getopts ":b" opt; do case ${opt} in b ) # process option b if [[ $OPTARG =~ ^?[0-9]+$ ]] && [[ $OPTARG -gt 0 ]]; then - BATCHSIZE = $OPTARG + BATCHSIZE=$OPTARG else echo "Batch size must be a number greater than 0" fi @@ -271,7 +271,7 @@ highstate echo "" echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete." -if [ SALTUPGRADED ]; then +if [ "$SALTUPGRADED" = True ]; then echo "" echo "Upgrading the remaining Security Onion nodes from $INSTALLEDSALTVERSION to $NEWSALTVERSION" salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion From 8c466f548b45f3ee70a6cda24b93b3c7f16a81ed Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 29 Jul 2020 14:38:42 -0400 Subject: [PATCH 049/376] update wording --- salt/common/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 851f62363..7efdbb22e 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -273,7 +273,7 @@ echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete." if [ "$SALTUPGRADED" = True ]; then echo "" - echo "Upgrading the remaining Security Onion nodes from $INSTALLEDSALTVERSION to $NEWSALTVERSION" + echo "Upgrading Salt on the remaining Security Onion nodes from $INSTALLEDSALTVERSION to $NEWSALTVERSION." salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion echo "" fi From 5a814f8312731e49419969f56c3f1c79f631037d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 29 Jul 2020 14:41:58 -0400 Subject: [PATCH 050/376] change condidtional statement --- salt/common/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 7efdbb22e..8a10231b5 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -271,7 +271,7 @@ highstate echo "" echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete." -if [ "$SALTUPGRADED" = True ]; then +if [[ "$SALTUPGRADED" == "True" ]]; then echo "" echo "Upgrading Salt on the remaining Security Onion nodes from $INSTALLEDSALTVERSION to $NEWSALTVERSION." salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion From 03144446c8993207e1ff1ec18801acdf5c5869df Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 29 Jul 2020 14:59:00 -0400 Subject: [PATCH 051/376] revert branch to original code --- salt/common/tools/sbin/soup | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 8a10231b5..dbf02b4ad 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -20,7 +20,6 @@ UPDATE_DIR=/tmp/sogh/securityonion INSTALLEDVERSION=$(cat /etc/soversion) INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'}) default_salt_dir=/opt/so/saltstack/default -SOUP_BRANCH=$1 BATCHSIZE=5 manager_check() { @@ -46,11 +45,11 @@ clone_to_tmp() { # Make a temp location for the files mkdir -p /tmp/sogh cd /tmp/sogh - if [ -n "$SOUP_BRANCH" ]; then - git clone -b $SOUP_BRANCH https://github.com/Security-Onion-Solutions/securityonion.git - else - git clone https://github.com/Security-Onion-Solutions/securityonion.git + SOUP_BRANCH="" + if [ -n "$BRANCH" ]; then + SOUP_BRANCH="-b $BRANCH" fi + git clone $SOUP_BRANCH https://github.com/Security-Onion-Solutions/securityonion.git cd /tmp if [ ! -f $UPDATE_DIR/VERSION ]; then echo "Update was unable to pull from github. Please check your internet." From 22b757f1126b8d29afc4d8035e248953a4322761 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 29 Jul 2020 15:36:35 -0400 Subject: [PATCH 052/376] dont install new minion if already installed --- salt/salt/minion.sls | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index 10512134c..8e7766738 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -5,9 +5,8 @@ install_salt_minion: cmd.run: - {% if grains.saltversion != saltversion %} + {% if grains.saltversion|string != saltversion|string %} - name: yum versionlock delete "salt-*" && sh bootstrap-salt.sh -F -x python3 stable {{ saltversion }} && yum versionlock add "salt-*" {% else %} - name: echo 'Already running Salt Minon version {{ saltversion }}' {% endif %} - \ No newline at end of file From 3e78c88114af0b35cd0e1ec741de0c71cbe9626d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 29 Jul 2020 15:52:48 -0400 Subject: [PATCH 053/376] update salt top to run salt.minion state if defined version not installed. only apply other states if proper version installed --- salt/top.sls | 49 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 16 deletions(-) diff --git a/salt/top.sls b/salt/top.sls index 5f316dd15..b5669ff32 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -7,22 +7,30 @@ {%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%} {%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%} {%- set STRELKA = salt['pillar.get']('strelka:enabled', '0') -%} +{% import_yaml 'salt/minion.defaults.yaml' as salt %} +{% set saltversion = salt.salt.minion.version %} base: - 'os:CentOS': - - match: grain + 'not G@saltversion:{{saltversion}}': + - match: compound + - salt.minion + + 'G@os:CentOS and G@saltversion:{{saltversion}}': + - match: compound - yum - yum.packages - '*': + '* and G@saltversion:{{saltversion}}': + - match: compound - salt - docker - patch.os.schedule - motd - '*_helix': + '*_helix and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - registry @@ -39,7 +47,8 @@ base: - filebeat - schedule - '*_sensor': + '*_sensor and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - common @@ -61,7 +70,8 @@ base: {%- endif %} - schedule - '*_eval': + '*_eval and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - registry @@ -117,7 +127,8 @@ base: {%- endif %} - '*_manager': + '*_manager and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - registry @@ -162,7 +173,8 @@ base: - domainstats {%- endif %} - '*_standalone': + '*_standalone and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - registry @@ -220,7 +232,7 @@ base: # Search node logic - '*_node and I@node:node_type:parser': + '*_node and I@node:node_type:parser and G@saltversion:{{saltversion}}': - match: compound - common - firewall @@ -230,7 +242,7 @@ base: {%- endif %} - schedule - '*_node and I@node:node_type:hot': + '*_node and I@node:node_type:hot and G@saltversion:{{saltversion}}': - match: compound - common - firewall @@ -241,7 +253,7 @@ base: {%- endif %} - schedule - '*_node and I@node:node_type:warm': + '*_node and I@node:node_type:warm and G@saltversion:{{saltversion}}': - match: compound - common - firewall @@ -251,7 +263,8 @@ base: {%- endif %} - schedule - '*_searchnode': + '*_searchnode and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - common @@ -269,7 +282,8 @@ base: {%- endif %} - schedule - '*_managersensor': + '*_managersensor and G@saltversion:{{saltversion}}': + - match: compound - common - nginx - telegraf @@ -283,7 +297,8 @@ base: {%- endif %} - schedule - '*_managersearch': + '*_managersearch and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - registry @@ -329,7 +344,8 @@ base: - domainstats {%- endif %} - '*_heavynode': + '*_heavynode and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - common @@ -353,7 +369,8 @@ base: - filebeat - schedule - '*_fleet': + '*_fleet and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - common From 14584b28e1812c7c67a7ba97899eead9ad15ba56 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 29 Jul 2020 16:04:47 -0400 Subject: [PATCH 054/376] include salt state in salt.minion, manager salt-minion service in salt.minion state; --- salt/salt/init.sls | 9 +-------- salt/salt/minion.sls | 9 ++++++++- salt/top.sls | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/salt/salt/init.sls b/salt/salt/init.sls index ca08aab78..2caae81cd 100644 --- a/salt/salt/init.sls +++ b/salt/salt/init.sls @@ -1,20 +1,13 @@ - - {% if grains['os'] != 'CentOS' %} saltpymodules: pkg.installed: - pkgs: - python-docker - python-m2crypto - {% endif %} +{% endif %} salt_bootstrap: file.managed: - name: /usr/sbin/bootstrap-salt.sh - source: salt://salt/scripts/bootstrap-salt.sh - mode: 755 - -salt_minion_service: - service.running: - - name: salt-minion - - enable: True diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index 8e7766738..331efbc53 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -1,4 +1,6 @@ -#Future state for Salt minions +include: + - salt + {% import_yaml 'salt/minion.defaults.yaml' as salt %} {% set saltversion = salt.salt.minion.version %} @@ -10,3 +12,8 @@ install_salt_minion: {% else %} - name: echo 'Already running Salt Minon version {{ saltversion }}' {% endif %} + +salt_minion_service: + service.running: + - name: salt-minion + - enable: True diff --git a/salt/top.sls b/salt/top.sls index b5669ff32..599f67dca 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -24,7 +24,7 @@ base: '* and G@saltversion:{{saltversion}}': - match: compound - - salt + - salt.minion - docker - patch.os.schedule - motd From 2fab00458b0ddcd1fa083584a8c77dfb3bb207f1 Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 30 Jul 2020 10:23:00 -0400 Subject: [PATCH 055/376] Add randomized play secrets for Cortex + TheHive --- setup/so-functions | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 5d5c9f585..2f1ea7198 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -875,8 +875,10 @@ generate_passwords(){ FLEETPASS=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) FLEETJWT=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) HIVEKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + HIVEPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) CORTEXKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) CORTEXORGUSERKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + CORTEXPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) SENSORONIKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) KRATOSKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) } @@ -1027,12 +1029,14 @@ manager_static() { " hiveuser: $WEBUSER"\ " hivepassword: '$WEBPASSWD1'"\ " hivekey: $HIVEKEY"\ + " hiveplaysecret: $HIVEPLAYSECRET"\ " cortexuser: $WEBUSER"\ " cortexpassword: '$WEBPASSWD1'"\ " cortexkey: $CORTEXKEY"\ " cortexorgname: SecurityOnion"\ - " cortexorguser: $WEBUSER"\ + " cortexorguser: soadmin"\ " cortexorguserkey: $CORTEXORGUSERKEY"\ + " cortexplaysecret: $CORTEXPLAYSECRET"\ " fleet_custom_hostname: "\ " fleet_manager: False"\ " fleet_node: False"\ From b6a053070f7d99d325cb655f941f07a3950e008d Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 30 Jul 2020 10:25:07 -0400 Subject: [PATCH 056/376] Change TheHive play secret --- salt/thehive/etc/application.conf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/thehive/etc/application.conf b/salt/thehive/etc/application.conf index f06c3f7c6..8aaf7a9a5 100644 --- a/salt/thehive/etc/application.conf +++ b/salt/thehive/etc/application.conf @@ -1,10 +1,11 @@ {%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %} {%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %} +{%- set HIVEPLAYSECRET = salt['pillar.get']('static:hiveplaysecret', '') %} # Secret Key # The secret key is used to secure cryptographic functions. # WARNING: If you deploy your application on several servers, make sure to use the same key. -play.http.secret.key="letsdewdis" +play.http.secret.key="{{ HIVEPLAYSECRET }}" play.http.context=/thehive/ search.uri = "http://{{ MANAGERIP }}:9400" # Elasticsearch From c58ee8a37daf366646464ec2f8c0b99ecb363ce9 Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 30 Jul 2020 10:25:53 -0400 Subject: [PATCH 057/376] Add Cortex play secret --- salt/thehive/etc/cortex-application.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/thehive/etc/cortex-application.conf b/salt/thehive/etc/cortex-application.conf index b9cbe20cc..c8e96ee3e 100644 --- a/salt/thehive/etc/cortex-application.conf +++ b/salt/thehive/etc/cortex-application.conf @@ -1,4 +1,5 @@ {%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %} +{%- set CORTEXPLAYSECRET = salt['pillar.get']('static:cortexplaysecret', '') %} # Secret Key # The secret key is used to secure cryptographic functions. From 4282930f0838019a0bea2d0fed1045e5c6c729d9 Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 30 Jul 2020 10:26:49 -0400 Subject: [PATCH 058/376] Update cortex-application.conf --- salt/thehive/etc/cortex-application.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/thehive/etc/cortex-application.conf b/salt/thehive/etc/cortex-application.conf index c8e96ee3e..c7e52d954 100644 --- a/salt/thehive/etc/cortex-application.conf +++ b/salt/thehive/etc/cortex-application.conf @@ -4,7 +4,7 @@ # Secret Key # The secret key is used to secure cryptographic functions. # WARNING: If you deploy your application on several servers, make sure to use the same key. -play.http.secret.key="letsdewdis" +play.http.secret.key="{{ CORTEXPLAYSECRET }}" play.http.context=/cortex/ search.uri = "http://{{ MANAGERIP }}:9400" From 3d4a96fae0b6c2c33276762815f6d59d15e3bc48 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 11:16:37 -0400 Subject: [PATCH 059/376] update ssl state unless , check and upgrade salt minion if needed during install --- salt/common/tools/sbin/soup | 4 ---- salt/ssl/init.sls | 6 +++--- setup/so-setup | 6 ++++-- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index dbf02b4ad..c344090e1 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -258,10 +258,6 @@ update_version echo "" echo "Starting Salt Master service" systemctl start salt-master -#echo "" -#echo "Starting Salt Minion service" -#systemctl start salt-minion -#echo "" echo "" diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 3430fedef..dfbd4c12a 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -242,7 +242,7 @@ msslkeyperms: - unless: # https://github.com/saltstack/salt/issues/52167 # Will trigger 5 days (432000 sec) from cert expiration - - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/managerssl.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/fleet.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' fleetkeyperms: file.managed: @@ -289,7 +289,7 @@ fbcertdir: - unless: # https://github.com/saltstack/salt/issues/52167 # Will trigger 5 days (432000 sec) from cert expiration - - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/filebeat.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' + - 'enddate=$(date -d "$(openssl x509 -in /opt/so/conf/filebeat/etc/pki/filebeat.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' # Convert the key to pkcs#8 so logstash will work correctly. filebeatpkcs: @@ -378,7 +378,7 @@ msslkeyperms: - unless: # https://github.com/saltstack/salt/issues/52167 # Will trigger 5 days (432000 sec) from cert expiration - - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/managerssl.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/fleet.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' fleetkeyperms: file.managed: diff --git a/setup/so-setup b/setup/so-setup index 80d028662..ea8c0e7bc 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -457,6 +457,8 @@ fi if [[ $is_minion ]]; then set_progress_str 20 'Accepting Salt key on manager' accept_salt_key_remote >> $setup_log 2>&1 + set_progress_str 21 'Checking if the Salt Minion needs to be updated' + salt-call state.apply salt.minion -l info >> $setup_log 2>&1 fi if [[ $is_manager ]]; then @@ -464,10 +466,10 @@ fi salt-key -ya "$MINION_ID" >> $setup_log 2>&1 fi - set_progress_str 21 'Copying minion pillars to manager' + set_progress_str 22 'Copying minion pillars to manager' copy_minion_tmp_files >> $setup_log 2>&1 - set_progress_str 22 'Generating CA and checking in' + set_progress_str 23 'Generating CA and checking in' salt_checkin >> $setup_log 2>&1 if [[ $is_manager || $is_helix ]]; then From 728afdcaaf7041ffbebe0dcb1032a3afcd2558eb Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 11:18:27 -0400 Subject: [PATCH 060/376] exit soup if batch size invalid --- salt/common/tools/sbin/soup | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index c344090e1..0eeef490a 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -204,6 +204,7 @@ while getopts ":b" opt; do BATCHSIZE=$OPTARG else echo "Batch size must be a number greater than 0" + exit 1 fi ;; \? ) echo "Usage: cmd [-b]" From 2222bce77baacd50e25b61e143d65593b2e65cf2 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 11:22:12 -0400 Subject: [PATCH 061/376] update regex --- salt/common/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 0eeef490a..3f1aea956 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -200,7 +200,7 @@ verify_latest_update_script() { while getopts ":b" opt; do case ${opt} in b ) # process option b - if [[ $OPTARG =~ ^?[0-9]+$ ]] && [[ $OPTARG -gt 0 ]]; then + if [[ $OPTARG =~ ^[0-9]+$ ]] && [[ $OPTARG -gt 0 ]]; then BATCHSIZE=$OPTARG else echo "Batch size must be a number greater than 0" From e9d889f719aac78c6feef81c2f72300d729e1490 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 11:33:19 -0400 Subject: [PATCH 062/376] fix regex --- salt/common/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 3f1aea956..597bf99df 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -200,7 +200,7 @@ verify_latest_update_script() { while getopts ":b" opt; do case ${opt} in b ) # process option b - if [[ $OPTARG =~ ^[0-9]+$ ]] && [[ $OPTARG -gt 0 ]]; then + if [[ $OPTARG =~ '^[0-9]+$' ]] && [[ $OPTARG -gt 0 ]]; then BATCHSIZE=$OPTARG else echo "Batch size must be a number greater than 0" From 1c5e6fa10f393bfede9db595b9444b069c8b6b45 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 11:39:58 -0400 Subject: [PATCH 063/376] change if for optargs --- salt/common/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 597bf99df..d47c0f834 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -200,7 +200,7 @@ verify_latest_update_script() { while getopts ":b" opt; do case ${opt} in b ) # process option b - if [[ $OPTARG =~ '^[0-9]+$' ]] && [[ $OPTARG -gt 0 ]]; then + if [[ $OPTARG =~ '^[0-9]+$' ]]; then BATCHSIZE=$OPTARG else echo "Batch size must be a number greater than 0" From de0b34a66b49eecf89a8668e3a727233bba1def6 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 11:43:18 -0400 Subject: [PATCH 064/376] change if for optargs --- salt/common/tools/sbin/soup | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index d47c0f834..d2205842c 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -200,7 +200,8 @@ verify_latest_update_script() { while getopts ":b" opt; do case ${opt} in b ) # process option b - if [[ $OPTARG =~ '^[0-9]+$' ]]; then + re='^[0-9]+$' + if [[ $OPTARG =~ $re ]]; then BATCHSIZE=$OPTARG else echo "Batch size must be a number greater than 0" From c099f3c5ec7237ea98e5aa9fc3804b3457253123 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 11:49:34 -0400 Subject: [PATCH 065/376] change if for optargs --- salt/common/tools/sbin/soup | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index d2205842c..bfa8b1130 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -200,8 +200,7 @@ verify_latest_update_script() { while getopts ":b" opt; do case ${opt} in b ) # process option b - re='^[0-9]+$' - if [[ $OPTARG =~ $re ]]; then + if [[ "$OPTARG" =~ ^[0-9]+$ ]]; then BATCHSIZE=$OPTARG else echo "Batch size must be a number greater than 0" From 9570efbf8e8c7db784f6c7612ff42f2c2f70b16f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 12:15:09 -0400 Subject: [PATCH 066/376] fix opt check --- salt/common/tools/sbin/soup | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index bfa8b1130..b61715063 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -192,20 +192,20 @@ verify_latest_update_script() { cp $UPDATE_DIR/salt/common/tools/sbin/soup $default_salt_dir/salt/common/tools/sbin/ salt-call state.apply common queue=True echo "" - echo "soup has been updated. Please run soup again" + echo "soup has been updated. Please run soup again." exit 0 fi } while getopts ":b" opt; do - case ${opt} in + case "$opt" in b ) # process option b - if [[ "$OPTARG" =~ ^[0-9]+$ ]]; then - BATCHSIZE=$OPTARG - else - echo "Batch size must be a number greater than 0" - exit 1 - fi + shift + BATCHSIZE=$1 + if ! [[ "$BATCHSIZE" =~ ^[0-9]+$ ]]; then + echo "Batch size must be a number greater than 0" + exit 1 + fi ;; \? ) echo "Usage: cmd [-b]" ;; From 8a8705f469ab4f2f1f57ea629f1c71edc17ea17d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 12:41:09 -0400 Subject: [PATCH 067/376] move when we check for salt minion update in setup --- setup/so-setup | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/setup/so-setup b/setup/so-setup index ea8c0e7bc..e4af88205 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -457,8 +457,6 @@ fi if [[ $is_minion ]]; then set_progress_str 20 'Accepting Salt key on manager' accept_salt_key_remote >> $setup_log 2>&1 - set_progress_str 21 'Checking if the Salt Minion needs to be updated' - salt-call state.apply salt.minion -l info >> $setup_log 2>&1 fi if [[ $is_manager ]]; then @@ -466,9 +464,14 @@ fi salt-key -ya "$MINION_ID" >> $setup_log 2>&1 fi - set_progress_str 22 'Copying minion pillars to manager' + set_progress_str 21 'Copying minion pillars to manager' copy_minion_tmp_files >> $setup_log 2>&1 + if [[ $is_minion ]]; then + set_progress_str 22 'Checking if the Salt Minion needs to be updated' + salt-call state.apply salt.minion -l info >> $setup_log 2>&1 + fi + set_progress_str 23 'Generating CA and checking in' salt_checkin >> $setup_log 2>&1 From a562d70fe205aa3d487e5a3c17841c3593384a49 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 13:18:59 -0400 Subject: [PATCH 068/376] stop salt minion first then salt master --- salt/common/tools/sbin/soup | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index b61715063..4d223b9c8 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -228,12 +228,12 @@ upgrade_check echo "" echo "Performing Upgrade from $INSTALLEDVERSION to $NEWVERSION" echo "" -echo "Stopping Salt Master service" -systemctl stop salt-master -echo "" echo "Stopping Salt Minion service" systemctl stop salt-minion echo "" +echo "Stopping Salt Master service" +systemctl stop salt-master +echo "" echo "Checking for Salt updates" upgrade_check_salt From 6812d3f5c5632cdece5b916744628458ba6dfa28 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 13:35:09 -0400 Subject: [PATCH 069/376] change output wording, add periods --- salt/common/tools/sbin/soup | 45 ++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 4d223b9c8..4629705bf 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -26,7 +26,7 @@ manager_check() { # Check to see if this is a manager MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}') if [[ "$MANAGERCHECK" =~ ^('so-eval'|'so-manager'|'so-standalone'|'so-managersearch')$ ]]; then - echo "This is a manager. We can proceed" + echo "This is a manager. We can proceed." else echo "Please run soup on the manager. The manager controls all updates." exit 0 @@ -75,7 +75,7 @@ highstate() { pillar_changes() { # This function is to add any new pillar items if needed. - echo "Checking to see if pillar changes are needed" + echo "Checking to see if pillar changes are needed." } @@ -145,7 +145,7 @@ update_dockers() { update_version() { # Update the version to the latest - echo "Updating the version file." + echo "Updating the Security Onion version file." echo $NEWVERSION > /etc/soversion sed -i "s/$INSTALLEDVERSION/$NEWVERSION/g" /opt/so/saltstack/local/pillar/static.sls } @@ -165,16 +165,16 @@ upgrade_check_salt() { echo "You are already running the correct version of Salt for Security Onion." else SALTUPGRADED=True - echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION" + echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION." echo "" # If CentOS - echo "Removing yum versionlock for Salt" + echo "Removing yum versionlock for Salt." echo "" yum versionlock delete "salt-*" - echo "Updating Salt packages and restarting services" + echo "Updating Salt packages and restarting services." echo "" sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION" - echo "Applying yum versionlock for Salt" + echo "Applying yum versionlock for Salt." echo "" yum versionlock add "salt-*" # Else do Ubuntu things @@ -203,7 +203,7 @@ while getopts ":b" opt; do shift BATCHSIZE=$1 if ! [[ "$BATCHSIZE" =~ ^[0-9]+$ ]]; then - echo "Batch size must be a number greater than 0" + echo "Batch size must be a number greater than 0." exit 1 fi ;; @@ -212,57 +212,56 @@ while getopts ":b" opt; do esac done -echo "Checking to see if this is a manager" +echo "Checking to see if this is a manager." manager_check -echo "Cloning latest code to a temporary location" +echo "Cloning Security Onion github repo into $UPDATE_DIR." clone_to_tmp echo "" -echo "Verifying we have the latest script" +echo "Verifying we have the latest soup script." verify_latest_update_script echo "" -echo "Let's see if we need to update" +echo "Let's see if we need to update Security Onion." upgrade_check echo "" -echo "Performing Upgrade from $INSTALLEDVERSION to $NEWVERSION" +echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION." echo "" -echo "Stopping Salt Minion service" +echo "Stopping Salt Minion service." systemctl stop salt-minion echo "" -echo "Stopping Salt Master service" +echo "Stopping Salt Master service." systemctl stop salt-master echo "" -echo "Checking for Salt updates" +echo "Checking for Salt master and minion updates." upgrade_check_salt -echo "Making pillar changes" +echo "Making pillar changes." pillar_changes echo "" -echo "Cleaning up old dockers" +echo "Cleaning up old dockers." clean_dockers echo "" -echo "Updating docker to $NEWVERSION" +echo "Updating dockers to $NEWVERSION." update_dockers echo "" -echo "Copying new code" +echo "Copying new Security Onion code from $UPDATE_DIR to $default_salt_dir." copy_new_files echo "" -echo "Updating version" update_version echo "" -echo "Starting Salt Master service" +echo "Starting Salt Master service." systemctl start salt-master echo "" -echo "Running a highstate to complete upgrade" +echo "Running a highstate to complete the upgrade." highstate echo "" echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete." From 4bf4634762791e852cbb50d42a6ddfcaad7bec3b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 13:47:21 -0400 Subject: [PATCH 070/376] ensure yum versionlock with a state rather than cmd.run state --- salt/salt/minion.sls | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index 331efbc53..9dc34a810 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -1,18 +1,24 @@ include: - salt - + {% import_yaml 'salt/minion.defaults.yaml' as salt %} {% set saltversion = salt.salt.minion.version %} - +{% if grains.os|lower == 'centos' %} install_salt_minion: cmd.run: {% if grains.saltversion|string != saltversion|string %} - - name: yum versionlock delete "salt-*" && sh bootstrap-salt.sh -F -x python3 stable {{ saltversion }} && yum versionlock add "salt-*" + - name: yum versionlock delete "salt-*" && sh bootstrap-salt.sh -F -x python3 stable {{ saltversion }} {% else %} - name: echo 'Already running Salt Minon version {{ saltversion }}' {% endif %} +versionlock_salt_minion: + module.run: + - pkg.hold: + - name: "salt-*" +{% endif %} + salt_minion_service: service.running: - name: salt-minion From a4fc2cbd4283d5a66c4a8498148595137e9f1969 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 13:50:22 -0400 Subject: [PATCH 071/376] caps --- salt/common/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 4629705bf..505354bb6 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -234,7 +234,7 @@ echo "" echo "Stopping Salt Master service." systemctl stop salt-master echo "" -echo "Checking for Salt master and minion updates." +echo "Checking for Salt Master and Minion updates." upgrade_check_salt From 1492d132caebea47e161d16fec170941b3dece67 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 16:00:50 -0400 Subject: [PATCH 072/376] add ability to upgrade salt minion and master for ubuntu --- salt/common/tools/sbin/soup | 61 ++++++++++++++++++++++++++----------- salt/salt/master.sls | 12 +++++++- salt/salt/minion.sls | 35 +++++++++++++-------- 3 files changed, 78 insertions(+), 30 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 505354bb6..c65bf136b 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -19,8 +19,9 @@ UPDATE_DIR=/tmp/sogh/securityonion INSTALLEDVERSION=$(cat /etc/soversion) INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'}) -default_salt_dir=/opt/so/saltstack/default +DEFAULT_SALT_DIR=/opt/so/saltstack/default BATCHSIZE=5 +SOUP_LOG=/root/soup.log manager_check() { # Check to see if this is a manager @@ -60,13 +61,24 @@ clone_to_tmp() { copy_new_files() { # Copy new files over to the salt dir cd /tmp/sogh/securityonion - rsync -a salt $default_salt_dir/ - rsync -a pillar $default_salt_dir/ - chown -R socore:socore $default_salt_dir/ - chmod 755 $default_salt_dir/pillar/firewall/addfirewall.sh + rsync -a salt $DEFAULT_SALT_DIR/ + rsync -a pillar $DEFAULT_SALT_DIR/ + chown -R socore:socore $DEFAULT_SALT_DIR/ + chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh cd /tmp } +detect_os() { + # Detect Base OS + echo "Detecting Base OS" >> "$SOUP_LOG" 2>&1 + if [ -f /etc/redhat-release ]; then + OS="centos" + elif [ -f /etc/os-release ]; then + OS="ubuntu" + fi + echo "Found OS: $OS" >> "$SOUP_LOG" 2>&1 +} + highstate() { # Run a highstate but first cancel a running one. salt-call saltutil.kill_all_jobs @@ -76,7 +88,6 @@ highstate() { pillar_changes() { # This function is to add any new pillar items if needed. echo "Checking to see if pillar changes are needed." - } update_dockers() { @@ -168,16 +179,32 @@ upgrade_check_salt() { echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION." echo "" # If CentOS - echo "Removing yum versionlock for Salt." - echo "" - yum versionlock delete "salt-*" - echo "Updating Salt packages and restarting services." - echo "" - sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION" - echo "Applying yum versionlock for Salt." - echo "" - yum versionlock add "salt-*" + if [ "$OS" == "centos" ]; then + echo "Removing yum versionlock for Salt." + echo "" + yum versionlock delete "salt-*" + echo "Updating Salt packages and restarting services." + echo "" + sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION" + echo "Applying yum versionlock for Salt." + echo "" + yum versionlock add "salt-*" # Else do Ubuntu things + elif [ "$OS" == "ubuntu" ]; then + echo "Removing apt hold for Salt." + echo "" + apt-mark unhold "salt" + apt-mark unhold "salt-master" + apt-mark unhold "salt-minion" + echo "Updating Salt packages and restarting services." + echo "" + sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION" + echo "Applying apt hold for Salt." + echo "" + apt-mark hold "salt" + apt-mark hold "salt-master" + apt-mark hold "salt-minion" + fi fi } @@ -189,7 +216,7 @@ verify_latest_update_script() { echo "This version of the soup script is up to date. Proceeding." else echo "You are not running the latest soup version. Updating soup." - cp $UPDATE_DIR/salt/common/tools/sbin/soup $default_salt_dir/salt/common/tools/sbin/ + cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/ salt-call state.apply common queue=True echo "" echo "soup has been updated. Please run soup again." @@ -249,7 +276,7 @@ echo "Updating dockers to $NEWVERSION." update_dockers echo "" -echo "Copying new Security Onion code from $UPDATE_DIR to $default_salt_dir." +echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR." copy_new_files echo "" update_version diff --git a/salt/salt/master.sls b/salt/salt/master.sls index 69f6ad89a..8b719d692 100644 --- a/salt/salt/master.sls +++ b/salt/salt/master.sls @@ -1 +1,11 @@ -#Future state for Salt masters \ No newline at end of file +salt_master_package: + pkg.installed: + - pkgs: + - salt + - salt-master + - hold: True + +salt_minion_service: + service.running: + - name: salt-master + - enable: True \ No newline at end of file diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index 9dc34a810..6a66b2fbe 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -2,22 +2,33 @@ include: - salt {% import_yaml 'salt/minion.defaults.yaml' as salt %} -{% set saltversion = salt.salt.minion.version %} +{% set SALTVERSION = salt.salt.minion.version %} + +{% if grains.saltversion|string != SALTVERSION|string %} + {% if grains.os|lower == 'centos' %} + {% set UPGRADECOMMAND = 'yum versionlock delete "salt-*" && sh bootstrap-salt.sh -F -x python3 stable {{ SALTVERSION }}' %} + {% elif grains.os|lower == 'ubuntu' %} + {% set UPGRADECOMMAND = 'apt-mark unhold salt && apt-mark unhold salt-minion && sh bootstrap-salt.sh -F -x python3 stable {{ SALTVERSION }}' %} + {% endif %} +{% else %} + {% set UPGRADECOMMAND = 'echo "Already running Salt Minon version {{ SALTVERSION }}"' %} +{% endif %} -{% if grains.os|lower == 'centos' %} install_salt_minion: cmd.run: - {% if grains.saltversion|string != saltversion|string %} - - name: yum versionlock delete "salt-*" && sh bootstrap-salt.sh -F -x python3 stable {{ saltversion }} - {% else %} - - name: echo 'Already running Salt Minon version {{ saltversion }}' - {% endif %} + - name: {{ UPGRADECOMMAND }} -versionlock_salt_minion: - module.run: - - pkg.hold: - - name: "salt-*" -{% endif %} +#versionlock_salt_minion: +# module.run: +# - pkg.hold: +# - name: "salt-*" + +salt_minion_package: + pkg.installed: + - pkgs: + - salt + - salt-minion + - hold: True salt_minion_service: service.running: From cc48b55acff6316d52634467531121ca860b17b8 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 16:06:01 -0400 Subject: [PATCH 073/376] change state name --- salt/salt/master.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/salt/master.sls b/salt/salt/master.sls index 8b719d692..481be743a 100644 --- a/salt/salt/master.sls +++ b/salt/salt/master.sls @@ -5,7 +5,7 @@ salt_master_package: - salt-master - hold: True -salt_minion_service: +salt_master_service: service.running: - name: salt-master - enable: True \ No newline at end of file From 8180f2cd939e22b7da6f9892af37e6958df1e85b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 16:13:38 -0400 Subject: [PATCH 074/376] remove quotes --- salt/salt/minion.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index 6a66b2fbe..8e7f6203a 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -11,7 +11,7 @@ include: {% set UPGRADECOMMAND = 'apt-mark unhold salt && apt-mark unhold salt-minion && sh bootstrap-salt.sh -F -x python3 stable {{ SALTVERSION }}' %} {% endif %} {% else %} - {% set UPGRADECOMMAND = 'echo "Already running Salt Minon version {{ SALTVERSION }}"' %} + {% set UPGRADECOMMAND = 'echo Already running Salt Minon version {{ SALTVERSION }}' %} {% endif %} install_salt_minion: From 914d890a51c83840b4f8309d42df1d027c33fac4 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 16:21:01 -0400 Subject: [PATCH 075/376] fix UPGRADECOMMAND --- salt/salt/minion.sls | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index 8e7f6203a..b43a28d1e 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -6,12 +6,12 @@ include: {% if grains.saltversion|string != SALTVERSION|string %} {% if grains.os|lower == 'centos' %} - {% set UPGRADECOMMAND = 'yum versionlock delete "salt-*" && sh bootstrap-salt.sh -F -x python3 stable {{ SALTVERSION }}' %} + {% set UPGRADECOMMAND = 'yum versionlock delete "salt-*" && sh bootstrap-salt.sh -F -x python3 stable ' ~ {{ SALTVERSION }} %} {% elif grains.os|lower == 'ubuntu' %} - {% set UPGRADECOMMAND = 'apt-mark unhold salt && apt-mark unhold salt-minion && sh bootstrap-salt.sh -F -x python3 stable {{ SALTVERSION }}' %} + {% set UPGRADECOMMAND = 'apt-mark unhold salt && apt-mark unhold salt-minion && sh bootstrap-salt.sh -F -x python3 stable ' ~ {{ SALTVERSION }} %} {% endif %} {% else %} - {% set UPGRADECOMMAND = 'echo Already running Salt Minon version {{ SALTVERSION }}' %} + {% set UPGRADECOMMAND = 'echo Already running Salt Minon version ' ~ {{ SALTVERSION }} %} {% endif %} install_salt_minion: From f209deac982de6d3cebbd91baf8efa6c8b101d71 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 16:25:45 -0400 Subject: [PATCH 076/376] call detect_os function --- salt/common/tools/sbin/soup | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index c65bf136b..ca6003a9e 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -70,7 +70,7 @@ copy_new_files() { detect_os() { # Detect Base OS - echo "Detecting Base OS" >> "$SOUP_LOG" 2>&1 + echo "Determining Base OS." >> "$SOUP_LOG" 2>&1 if [ -f /etc/redhat-release ]; then OS="centos" elif [ -f /etc/os-release ]; then @@ -240,7 +240,10 @@ while getopts ":b" opt; do done echo "Checking to see if this is a manager." +echo "" manager_check +detect_os +echo "" echo "Cloning Security Onion github repo into $UPDATE_DIR." clone_to_tmp echo "" From de7f67ff2f414e35311ca42c1673a2eb96c50236 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 16:31:37 -0400 Subject: [PATCH 077/376] fix UPGRADECOMMAND --- salt/salt/minion.sls | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index b43a28d1e..3159034f2 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -6,12 +6,12 @@ include: {% if grains.saltversion|string != SALTVERSION|string %} {% if grains.os|lower == 'centos' %} - {% set UPGRADECOMMAND = 'yum versionlock delete "salt-*" && sh bootstrap-salt.sh -F -x python3 stable ' ~ {{ SALTVERSION }} %} + {% set UPGRADECOMMAND = 'yum versionlock delete "salt-*" && sh bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} {% elif grains.os|lower == 'ubuntu' %} - {% set UPGRADECOMMAND = 'apt-mark unhold salt && apt-mark unhold salt-minion && sh bootstrap-salt.sh -F -x python3 stable ' ~ {{ SALTVERSION }} %} + {% set UPGRADECOMMAND = 'apt-mark unhold salt && apt-mark unhold salt-minion && sh bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} {% endif %} {% else %} - {% set UPGRADECOMMAND = 'echo Already running Salt Minon version ' ~ {{ SALTVERSION }} %} + {% set UPGRADECOMMAND = 'echo Already running Salt Minon version ' ~ SALTVERSION %} {% endif %} install_salt_minion: From 4e01ef279530e05ccb3f8036e4c214f301affaad Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Thu, 30 Jul 2020 16:34:48 -0400 Subject: [PATCH 078/376] Fleet - Update osquery config for 4.4 windows_events --- salt/fleet/files/packs/osquery-config.conf | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/fleet/files/packs/osquery-config.conf b/salt/fleet/files/packs/osquery-config.conf index 2558efd88..4ce82cb8d 100644 --- a/salt/fleet/files/packs/osquery-config.conf +++ b/salt/fleet/files/packs/osquery-config.conf @@ -22,6 +22,8 @@ spec: distributed_tls_max_attempts: 3 distributed_tls_read_endpoint: /api/v1/osquery/distributed/read distributed_tls_write_endpoint: /api/v1/osquery/distributed/write + enable_windows_events_publisher: true + enable_windows_events_subscriber: true logger_plugin: tls logger_tls_endpoint: /api/v1/osquery/log logger_tls_period: 10 From 8d044084e18ceceb13ff4fd25666a5ee21a8c64c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 16:41:21 -0400 Subject: [PATCH 079/376] try to log soup --- salt/common/tools/sbin/soup | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index ca6003a9e..e3ac46d3e 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -22,6 +22,7 @@ INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'}) DEFAULT_SALT_DIR=/opt/so/saltstack/default BATCHSIZE=5 SOUP_LOG=/root/soup.log +exec 3>&1 1>>${SOUP_LOG} 2>&1 manager_check() { # Check to see if this is a manager @@ -224,6 +225,7 @@ verify_latest_update_script() { fi } +main () { while getopts ":b" opt; do case "$opt" in b ) # process option b @@ -302,3 +304,7 @@ if [[ "$SALTUPGRADED" == "True" ]]; then salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion echo "" fi + +} + +main "$@" | tee /dev/fd/3 \ No newline at end of file From 2ad17dfd06984a4db5e22edc35a736b192f9a6d1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 16:42:59 -0400 Subject: [PATCH 080/376] dont append --- salt/common/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index e3ac46d3e..cca81adc2 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -22,7 +22,7 @@ INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'}) DEFAULT_SALT_DIR=/opt/so/saltstack/default BATCHSIZE=5 SOUP_LOG=/root/soup.log -exec 3>&1 1>>${SOUP_LOG} 2>&1 +exec 3>&1 1>${SOUP_LOG} 2>&1 manager_check() { # Check to see if this is a manager From da9dc42a47d3c35727e186870fdc521206636dd1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 16:47:40 -0400 Subject: [PATCH 081/376] more logging --- salt/common/tools/sbin/soup | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index cca81adc2..691a09085 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -244,6 +244,8 @@ done echo "Checking to see if this is a manager." echo "" manager_check +echo "Found that Security Onion $INSTALLEDVERSION is currently installed." +echo "" detect_os echo "" echo "Cloning Security Onion github repo into $UPDATE_DIR." From 7287f5f935f6a0b70fcaf7a6560a1b61c346b175 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 Jul 2020 17:01:17 -0400 Subject: [PATCH 082/376] wordsmithing --- salt/common/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 691a09085..70b1b2414 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -295,7 +295,7 @@ systemctl start salt-master echo "" -echo "Running a highstate to complete the upgrade." +echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes." highstate echo "" echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete." From d6f89cb09af7653cc4b21b971171e4a83ae8d68d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 31 Jul 2020 12:37:19 -0400 Subject: [PATCH 083/376] fix ubuntu salt-common package name --- salt/common/tools/sbin/soup | 4 ++-- salt/salt/map.jinja | 21 +++++++++++++++++++++ salt/salt/minion.sls | 15 +++------------ 3 files changed, 26 insertions(+), 14 deletions(-) create mode 100644 salt/salt/map.jinja diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 70b1b2414..764a61efa 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -194,7 +194,7 @@ upgrade_check_salt() { elif [ "$OS" == "ubuntu" ]; then echo "Removing apt hold for Salt." echo "" - apt-mark unhold "salt" + apt-mark unhold "salt-common" apt-mark unhold "salt-master" apt-mark unhold "salt-minion" echo "Updating Salt packages and restarting services." @@ -202,7 +202,7 @@ upgrade_check_salt() { sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION" echo "Applying apt hold for Salt." echo "" - apt-mark hold "salt" + apt-mark hold "salt-common" apt-mark hold "salt-master" apt-mark hold "salt-minion" fi diff --git a/salt/salt/map.jinja b/salt/salt/map.jinja new file mode 100644 index 000000000..099c20c99 --- /dev/null +++ b/salt/salt/map.jinja @@ -0,0 +1,21 @@ +{% import_yaml 'salt/minion.defaults.yaml' as salt %} +{% set SALTVERSION = salt.salt.minion.version %} + +{% set SALTPACKAGES = salt['grains.filter_by']({ + 'Ubuntu': { + 'common': 'salt-common', + }, + 'Centos': { + 'common': 'salt', + }, +}) %} + +{% if grains.saltversion|string != SALTVERSION|string %} + {% if grains.os|lower == 'centos' %} + {% set UPGRADECOMMAND = 'yum versionlock delete "salt-*" && sh bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} + {% elif grains.os|lower == 'ubuntu' %} + {% set UPGRADECOMMAND = 'apt-mark unhold salt && apt-mark unhold salt-minion && sh bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} + {% endif %} +{% else %} + {% set UPGRADECOMMAND = 'echo Already running Salt Minon version ' ~ SALTVERSION %} +{% endif %} \ No newline at end of file diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index 3159034f2..2f22f3367 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -1,18 +1,9 @@ include: - salt -{% import_yaml 'salt/minion.defaults.yaml' as salt %} -{% set SALTVERSION = salt.salt.minion.version %} +{% from 'salt/map.jinja' import SALTPACKAGES with context %} +{% from 'salt/map.jinja' import UPGRADECOMMAND with context %} -{% if grains.saltversion|string != SALTVERSION|string %} - {% if grains.os|lower == 'centos' %} - {% set UPGRADECOMMAND = 'yum versionlock delete "salt-*" && sh bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} - {% elif grains.os|lower == 'ubuntu' %} - {% set UPGRADECOMMAND = 'apt-mark unhold salt && apt-mark unhold salt-minion && sh bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} - {% endif %} -{% else %} - {% set UPGRADECOMMAND = 'echo Already running Salt Minon version ' ~ SALTVERSION %} -{% endif %} install_salt_minion: cmd.run: @@ -26,7 +17,7 @@ install_salt_minion: salt_minion_package: pkg.installed: - pkgs: - - salt + - {{ SALTPACKAGES.common }} - salt-minion - hold: True From 173f945fc05dee71b661375423acec142a855c23 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 31 Jul 2020 13:01:37 -0400 Subject: [PATCH 084/376] remove comma --- salt/salt/map.jinja | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/salt/map.jinja b/salt/salt/map.jinja index 099c20c99..828ce3ce7 100644 --- a/salt/salt/map.jinja +++ b/salt/salt/map.jinja @@ -3,10 +3,10 @@ {% set SALTPACKAGES = salt['grains.filter_by']({ 'Ubuntu': { - 'common': 'salt-common', + 'common': 'salt-common' }, 'Centos': { - 'common': 'salt', + 'common': 'salt' }, }) %} From dd865f6a687bc677b3207b2c8044a0a6fe8ecfd4 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 31 Jul 2020 13:10:37 -0400 Subject: [PATCH 085/376] change map --- salt/salt/map.jinja | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/salt/salt/map.jinja b/salt/salt/map.jinja index 828ce3ce7..5730f0303 100644 --- a/salt/salt/map.jinja +++ b/salt/salt/map.jinja @@ -2,12 +2,8 @@ {% set SALTVERSION = salt.salt.minion.version %} {% set SALTPACKAGES = salt['grains.filter_by']({ - 'Ubuntu': { - 'common': 'salt-common' - }, - 'Centos': { - 'common': 'salt' - }, + 'Ubuntu': {'common': 'salt-common'}, + 'Centos': {'common': 'salt'}, }) %} {% if grains.saltversion|string != SALTVERSION|string %} From d7ad2fbfd7d28eea47bb5520e1a43ad9232f3b68 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 31 Jul 2020 13:17:56 -0400 Subject: [PATCH 086/376] move include --- salt/salt/minion.sls | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index 2f22f3367..a1311ebeb 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -1,9 +1,8 @@ -include: - - salt - {% from 'salt/map.jinja' import SALTPACKAGES with context %} {% from 'salt/map.jinja' import UPGRADECOMMAND with context %} +include: + - salt install_salt_minion: cmd.run: From 1e1d6a395d3c92e613503151cdb609abe9f1609f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 31 Jul 2020 13:25:37 -0400 Subject: [PATCH 087/376] cant get grains.filter_by to work for some reason --- salt/salt/map.jinja | 9 +++++---- salt/salt/minion.sls | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/salt/salt/map.jinja b/salt/salt/map.jinja index 5730f0303..39c8a33a8 100644 --- a/salt/salt/map.jinja +++ b/salt/salt/map.jinja @@ -1,10 +1,11 @@ {% import_yaml 'salt/minion.defaults.yaml' as salt %} {% set SALTVERSION = salt.salt.minion.version %} -{% set SALTPACKAGES = salt['grains.filter_by']({ - 'Ubuntu': {'common': 'salt-common'}, - 'Centos': {'common': 'salt'}, -}) %} +{% if grains.os|lower == 'ubuntu' %} + {% set COMMON = 'salt-common' %} +{% elif grains.os|lower == 'centos' %} + {% set COMMON = 'salt' %} +{% endif %} {% if grains.saltversion|string != SALTVERSION|string %} {% if grains.os|lower == 'centos' %} diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index a1311ebeb..b2d3a2913 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -1,4 +1,4 @@ -{% from 'salt/map.jinja' import SALTPACKAGES with context %} +{% from 'salt/map.jinja' import COMMON with context %} {% from 'salt/map.jinja' import UPGRADECOMMAND with context %} include: @@ -16,7 +16,7 @@ install_salt_minion: salt_minion_package: pkg.installed: - pkgs: - - {{ SALTPACKAGES.common }} + - {{ COMMON }} - salt-minion - hold: True From 13c9fa308948ea3ddfcf3bba715f4f3eee31d593 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 31 Jul 2020 13:32:12 -0400 Subject: [PATCH 088/376] test minion upgrade at end --- salt/common/tools/sbin/soup | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 764a61efa..eb281baae 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -300,6 +300,7 @@ highstate echo "" echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete." +SALTUPGRADED="True" if [[ "$SALTUPGRADED" == "True" ]]; then echo "" echo "Upgrading Salt on the remaining Security Onion nodes from $INSTALLEDSALTVERSION to $NEWSALTVERSION." From e3581bb76e1a384f860880737eec90914757f91c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 31 Jul 2020 13:36:21 -0400 Subject: [PATCH 089/376] change to salt-common --- salt/salt/map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/salt/map.jinja b/salt/salt/map.jinja index 39c8a33a8..2f202e1a6 100644 --- a/salt/salt/map.jinja +++ b/salt/salt/map.jinja @@ -11,7 +11,7 @@ {% if grains.os|lower == 'centos' %} {% set UPGRADECOMMAND = 'yum versionlock delete "salt-*" && sh bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} {% elif grains.os|lower == 'ubuntu' %} - {% set UPGRADECOMMAND = 'apt-mark unhold salt && apt-mark unhold salt-minion && sh bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} + {% set UPGRADECOMMAND = 'apt-mark unhold salt-common && apt-mark unhold salt-minion && sh bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} {% endif %} {% else %} {% set UPGRADECOMMAND = 'echo Already running Salt Minon version ' ~ SALTVERSION %} From d16d2b6551b081528e204d1cd9206921813c491d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 31 Jul 2020 13:42:06 -0400 Subject: [PATCH 090/376] full path to salt bootstrap --- salt/salt/map.jinja | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/salt/map.jinja b/salt/salt/map.jinja index 2f202e1a6..5b882a3a3 100644 --- a/salt/salt/map.jinja +++ b/salt/salt/map.jinja @@ -9,9 +9,9 @@ {% if grains.saltversion|string != SALTVERSION|string %} {% if grains.os|lower == 'centos' %} - {% set UPGRADECOMMAND = 'yum versionlock delete "salt-*" && sh bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} + {% set UPGRADECOMMAND = 'yum versionlock delete "salt-*" && sh /usr/sbin/bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} {% elif grains.os|lower == 'ubuntu' %} - {% set UPGRADECOMMAND = 'apt-mark unhold salt-common && apt-mark unhold salt-minion && sh bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} + {% set UPGRADECOMMAND = 'apt-mark unhold salt-common && apt-mark unhold salt-minion && sh /usr/sbin/bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} {% endif %} {% else %} {% set UPGRADECOMMAND = 'echo Already running Salt Minon version ' ~ SALTVERSION %} From d971d0772007c2225be20d30d861462ef093fb55 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Fri, 31 Jul 2020 16:06:15 -0400 Subject: [PATCH 091/376] Osquery & WLB Parsing Update for WEL & Sysmon --- salt/elasticsearch/files/ingest/common | 4 +- .../files/ingest/osquery.query_result | 76 +++---------------- salt/elasticsearch/files/ingest/sysmon | 43 +++++------ salt/elasticsearch/files/ingest/win.eventlogs | 6 +- 4 files changed, 34 insertions(+), 95 deletions(-) diff --git a/salt/elasticsearch/files/ingest/common b/salt/elasticsearch/files/ingest/common index 9db5a039b..b255ad86c 100644 --- a/salt/elasticsearch/files/ingest/common +++ b/salt/elasticsearch/files/ingest/common @@ -42,8 +42,8 @@ { "set": { "if": "ctx.event?.severity == 3", "field": "event.severity_label", "value": "high", "override": true } }, { "set": { "if": "ctx.event?.severity == 4", "field": "event.severity_label", "value": "critical", "override": true } }, { "rename": { "field": "module", "target_field": "event.module", "ignore_failure": true, "ignore_missing": true } }, - { "rename": { "field": "dataset", "target_field": "event.dataset", "ignore_failure": true, "ignore_missing": true } }, - { "rename": { "field": "category", "target_field": "event.category", "ignore_missing": true } }, + { "rename": { "field": "dataset", "target_field": "event.dataset", "ignore_failure": true, "ignore_missing": true } }, + { "rename": { "field": "category", "target_field": "event.category", "ignore_failure": true, "ignore_missing": true } }, { "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_failure": true, "ignore_missing": true } }, { "lowercase": { "field": "event.dataset", "ignore_failure": true, "ignore_missing": true } }, { "convert": { "field": "destination.port", "type": "integer", "ignore_failure": true, "ignore_missing": true } }, diff --git a/salt/elasticsearch/files/ingest/osquery.query_result b/salt/elasticsearch/files/ingest/osquery.query_result index 80ed32d73..2005252b6 100644 --- a/salt/elasticsearch/files/ingest/osquery.query_result +++ b/salt/elasticsearch/files/ingest/osquery.query_result @@ -2,78 +2,24 @@ "description" : "osquery", "processors" : [ { "json": { "field": "message", "target_field": "message2", "ignore_failure": true } }, + { "gsub": { "field": "message2.columns.data", "pattern": "\\\\xC2\\\\xAE", "replacement": "", "ignore_missing": true } }, - { "json": { "field": "message2.columns.data", "target_field": "message2.columns.winlog", "ignore_failure": true } }, + { "rename": { "if": "ctx.message2.columns?.eventid != null", "field": "message2.columns", "target_field": "winlog", "ignore_missing": true } }, + { "json": { "field": "winlog.data", "target_field": "temp", "ignore_failure": true } }, + { "rename": { "field": "temp.Data", "target_field": "winlog.event_data", "ignore_missing": true } }, + { "rename": { "field": "winlog.source", "target_field": "winlog.channel", "ignore_missing": true } }, + { "rename": { "field": "winlog.eventid", "target_field": "winlog.event_id", "ignore_missing": true } }, + { "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } }, + { "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } }, + { "script": { "lang": "painless", "source": "def dict = ['result': new HashMap()]; for (entry in ctx['message2'].entrySet()) { dict['result'][entry.getKey()] = entry.getValue(); } ctx['osquery'] = dict; " } }, - { "rename": { "field": "osquery.result.hostIdentifier", "target_field": "osquery.result.host_identifier", "ignore_missing": true } }, - { "rename": { "field": "osquery.result.calendarTime", "target_field": "osquery.result.calendar_time", "ignore_missing": true } }, - { "rename": { "field": "osquery.result.unixTime", "target_field": "osquery.result.unix_time", "ignore_missing": true } }, - { "json": { "field": "message", "target_field": "message3", "ignore_failure": true } }, - { "gsub": { "field": "message3.columns.data", "pattern": "\\\\xC2\\\\xAE", "replacement": "", "ignore_missing": true } }, - { "json": { "field": "message3.columns.data", "target_field": "message3.columns.winlog", "ignore_failure": true } }, - { "rename": { "field": "message3.columns.username", "target_field": "user.name", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.uid", "target_field": "user.uid", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.gid", "target_field": "user.gid", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.shell", "target_field": "user.shell", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.cmdline", "target_field": "process.command_line", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.pid", "target_field": "process.pid", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.parent", "target_field": "process.ppid", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.cwd", "target_field": "process.working_directory", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.community_id", "target_field": "network.community_id", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.local_address", "target_field": "local.ip", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.local_port", "target_field": "local.port", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.remote_address", "target_field": "remote.ip", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.remote_port", "target_field": "remote.port", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.process_name", "target_field": "process.name", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.eventid", "target_field": "event.code", "ignore_missing": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational'", "field": "event.module", "value": "sysmon", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } }, - { "set": { "if": "ctx.message3.columns?.source != 'Microsoft-Windows-Sysmon/Operational'", "field": "event.dataset", "value": "{{message3.columns.source}}", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.dataset", "value": "process_creation", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 2", "field": "event.dataset", "value": "process_changed_file", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.dataset", "value": "network_connection", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 5", "field": "event.dataset", "value": "process_terminated", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 6", "field": "event.dataset", "value": "driver_loaded", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 7", "field": "event.dataset", "value": "image_loaded", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 8", "field": "event.dataset", "value": "create_remote_thread", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 9", "field": "event.dataset", "value": "raw_file_access_read", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 10", "field": "event.dataset", "value": "process_access", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 11", "field": "event.dataset", "value": "file_create", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 12", "field": "event.dataset", "value": "registry_create_delete", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 13", "field": "event.dataset", "value": "registry_value_set", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 14", "field": "event.dataset", "value": "registry_key_value_rename", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 15", "field": "event.dataset", "value": "file_create_stream_hash", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 16", "field": "event.dataset", "value": "config_change", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 22", "field": "event.dataset", "value": "dns_query", "override": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.SubjectUserName", "target_field": "user.name", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.destinationHostname", "target_field": "destination.hostname", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.destinationIp", "target_field": "destination.ip", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.destinationPort", "target_field": "destination.port", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.Image", "target_field": "process.executable", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.ProcessID", "target_field": "process.pid", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.ProcessGuid", "target_field": "process.entity_id", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.CommandLine", "target_field": "process.command_line", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.CurrentDirectory", "target_field": "process.working_directory", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.Description", "target_field": "process.pe.description", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.Product", "target_field": "process.pe.product", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.OriginalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.FileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.ParentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.ParentImage", "target_field": "process.parent.executable", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.ParentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.ParentProcessId", "target_field": "process.ppid", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.User", "target_field": "user.name", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.parentImage", "target_field": "parent_image_path", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.sourceHostname", "target_field": "source.hostname", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.sourceIp", "target_field": "source_ip", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.sourcePort", "target_field": "source.port", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.targetFilename", "target_field": "file.target", "ignore_missing": true } }, - { "remove": { "field": [ "message3"], "ignore_failure": false } }, + { "set": { "field": "event.module", "value": "osquery" } }, + { "set": { "field": "event.dataset", "value": "{{osquery.result.name}}"} }, { "pipeline": { "name": "common" } } ] } \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/sysmon b/salt/elasticsearch/files/ingest/sysmon index de6112d89..6e5f9e60f 100644 --- a/salt/elasticsearch/files/ingest/sysmon +++ b/salt/elasticsearch/files/ingest/sysmon @@ -2,29 +2,26 @@ "description" : "sysmon", "processors" : [ {"community_id": {"if": "ctx.winlog.event_data?.Protocol != null", "field":["winlog.event_data.SourceIp","winlog.event_data.SourcePort","winlog.event_data.DestinationIp","winlog.event_data.DestinationPort","winlog.event_data.Protocol"],"target_field":"network.community_id"}}, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "field": "event.module", "value": "sysmon", "override": true } }, - { "set": { "if": "ctx.winlog?.channel != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } }, - { "set": { "if": "ctx.agent?.type != null", "field": "module", "value": "{{agent.type}}", "override": true } }, - { "set": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "field": "event.dataset", "value": "{{winlog.channel}}", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.category", "value": "host,process,network", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.category", "value": "host,process", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.dataset", "value": "process_creation", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 2", "field": "event.dataset", "value": "process_changed_file", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.dataset", "value": "network_connection", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 5", "field": "event.dataset", "value": "process_terminated", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 6", "field": "event.dataset", "value": "driver_loaded", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 7", "field": "event.dataset", "value": "image_loaded", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 8", "field": "event.dataset", "value": "create_remote_thread", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 9", "field": "event.dataset", "value": "raw_file_access_read", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 10", "field": "event.dataset", "value": "process_access", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 11", "field": "event.dataset", "value": "file_create", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 12", "field": "event.dataset", "value": "registry_create_delete", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 13", "field": "event.dataset", "value": "registry_value_set", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 14", "field": "event.dataset", "value": "registry_key_value_rename", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 15", "field": "event.dataset", "value": "file_create_stream_hash", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 16", "field": "event.dataset", "value": "config_change", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 22", "field": "event.dataset", "value": "dns_query", "override": true } }, - { "rename": { "field": "agent.hostname", "target_field": "agent.name", "ignore_missing": true } }, + { "set": { "field": "event.code", "value": "{{winlog.event_id}}", "override": true } }, + { "set": { "field": "event.module", "value": "sysmon", "override": true } }, + { "set": { "if": "ctx.event?.code == '3'", "field": "event.category", "value": "host,process,network", "override": true } }, + { "set": { "if": "ctx.event?.code == '1'", "field": "event.category", "value": "host,process", "override": true } }, + { "set": { "if": "ctx.event?.code == '1'", "field": "event.dataset", "value": "process_creation", "override": true } }, + { "set": { "if": "ctx.event?.code == '2'", "field": "event.dataset", "value": "process_changed_file", "override": true } }, + { "set": { "if": "ctx.event?.code == '3'", "field": "event.dataset", "value": "network_connection", "override": true } }, + { "set": { "if": "ctx.event?.code == '5'", "field": "event.dataset", "value": "process_terminated", "override": true } }, + { "set": { "if": "ctx.event?.code == '6'", "field": "event.dataset", "value": "driver_loaded", "override": true } }, + { "set": { "if": "ctx.event?.code == '7'", "field": "event.dataset", "value": "image_loaded", "override": true } }, + { "set": { "if": "ctx.event?.code == '8'", "field": "event.dataset", "value": "create_remote_thread", "override": true } }, + { "set": { "if": "ctx.event?.code == '9'", "field": "event.dataset", "value": "raw_file_access_read", "override": true } }, + { "set": { "if": "ctx.event?.code == '10'", "field": "event.dataset", "value": "process_access", "override": true } }, + { "set": { "if": "ctx.event?.code == '11'", "field": "event.dataset", "value": "file_create", "override": true } }, + { "set": { "if": "ctx.event?.code == '12'", "field": "event.dataset", "value": "registry_create_delete", "override": true } }, + { "set": { "if": "ctx.event?.code == '13'", "field": "event.dataset", "value": "registry_value_set", "override": true } }, + { "set": { "if": "ctx.event?.code == '14'", "field": "event.dataset", "value": "registry_key_value_rename", "override": true } }, + { "set": { "if": "ctx.event?.code == '15'", "field": "event.dataset", "value": "file_create_stream_hash", "override": true } }, + { "set": { "if": "ctx.event?.code == '16'", "field": "event.dataset", "value": "config_change", "override": true } }, + { "set": { "if": "ctx.event?.code == '22'", "field": "event.dataset", "value": "dns_query", "override": true } }, { "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.DestinationHostname", "target_field": "destination.hostname", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.DestinationIp", "target_field": "destination.ip", "ignore_missing": true } }, diff --git a/salt/elasticsearch/files/ingest/win.eventlogs b/salt/elasticsearch/files/ingest/win.eventlogs index acdf97263..962286d3a 100644 --- a/salt/elasticsearch/files/ingest/win.eventlogs +++ b/salt/elasticsearch/files/ingest/win.eventlogs @@ -1,13 +1,9 @@ { "description" : "win.eventlogs", "processors" : [ - { "set": { "if": "ctx.winlog?.channel != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } }, - { "set": { "if": "ctx.agent?.type != null", "field": "module", "value": "{{agent.type}}", "override": true } }, { "set": { "if": "ctx.winlog?.channel != null", "field": "event.dataset", "value": "{{winlog.channel}}", "override": true } }, - { "rename": { "field": "agent.hostname", "target_field": "agent.name", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } } ] } \ No newline at end of file From ecafbc60147862109fca451cd5a5bca6bc29e5df Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Fri, 31 Jul 2020 20:12:25 +0000 Subject: [PATCH 092/376] Add AWS Standalone Defaults --- setup/automation/aws_standalone_defaults | 77 ++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 setup/automation/aws_standalone_defaults diff --git a/setup/automation/aws_standalone_defaults b/setup/automation/aws_standalone_defaults new file mode 100644 index 000000000..3bf630bd1 --- /dev/null +++ b/setup/automation/aws_standalone_defaults @@ -0,0 +1,77 @@ +#!/bin/bash + +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +TESTING=true + +address_type=DHCP +ADMINUSER=onionuser +ADMINPASS1=onionuser +ADMINPASS2=onionuser +ALLOW_CIDR=0.0.0.0/0 +ALLOW_ROLE=a +BASICZEEK=7 +BASICSURI=7 +# BLOGS= +BNICS=ens6 +ZEEKVERSION=ZEEK +# CURCLOSEDAYS= +# EVALADVANCED=BASIC +GRAFANA=1 +# HELIXAPIKEY= +HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 +HNSENSOR=inherit +HOSTNAME=standalone +install_type=STANDALONE +# LSINPUTBATCHCOUNT= +# LSINPUTTHREADS= +# LSPIPELINEBATCH= +# LSPIPELINEWORKERS= +MANAGERADV=BASIC +MANAGERUPDATES=1 +# MDNS= +# MGATEWAY= +# MIP= +# MMASK= +MNIC=ens5 +# MSEARCH= +# MSRV= +# MTU= +NIDS=Suricata +# NODE_ES_HEAP_SIZE= +# NODE_LS_HEAP_SIZE= +NODESETUP=NODEBASIC +NSMSETUP=BASIC +NODEUPDATES=MANAGER +# OINKCODE= +OSQUERY=1 +# PATCHSCHEDULEDAYS= +# PATCHSCHEDULEHOURS= +PATCHSCHEDULENAME=auto +PLAYBOOK=1 +# REDIRECTHOST= +REDIRECTINFO=HOSTNAME +RULESETUP=ETOPEN +# SHARDCOUNT= +SKIP_REBOOT=1 +SOREMOTEPASS1=onionuser +SOREMOTEPASS2=onionuser +STRELKA=1 +THEHIVE=1 +WAZUH=1 +WEBUSER=onionuser@somewhere.invalid +WEBPASSWD1=0n10nus3r +WEBPASSWD2=0n10nus3r From f47128824e53c5555a32c3d849018d1cfe921d7c Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Sun, 2 Aug 2020 09:04:29 -0400 Subject: [PATCH 093/376] Before finishing setup, rescan the log file and root mailbox for errors --- setup/so-setup | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup/so-setup b/setup/so-setup index e4af88205..68ca99824 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -631,6 +631,8 @@ fi success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}') if [[ $success != 0 ]]; then SO_ERROR=1; fi +# Check entire setup log for errors or unexpected salt states and ensure cron jobs are not reporting errors to root's mailbox +if grep -q -E "ERROR|Result: False" $setup_log || [[ -s /var/spool/mail/root ]]; then SO_ERROR=1; fi if [[ -n $SO_ERROR ]]; then echo "Errors detected during setup; skipping post-setup steps to allow for analysis of failures." >> $setup_log 2>&1 From 7e2917fc99fd6814b8b4f0087b75d94ba42bec82 Mon Sep 17 00:00:00 2001 From: weslambert Date: Mon, 3 Aug 2020 10:31:03 -0400 Subject: [PATCH 094/376] Reboot after finished with setup --- setup/automation/aws_standalone_defaults | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/automation/aws_standalone_defaults b/setup/automation/aws_standalone_defaults index 3bf630bd1..3e27bd9e2 100644 --- a/setup/automation/aws_standalone_defaults +++ b/setup/automation/aws_standalone_defaults @@ -66,7 +66,7 @@ PLAYBOOK=1 REDIRECTINFO=HOSTNAME RULESETUP=ETOPEN # SHARDCOUNT= -SKIP_REBOOT=1 +SKIP_REBOOT=0 SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 From fb887f7d9ee58fd2ddb103138e04dd59e7fd7151 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 3 Aug 2020 10:47:24 -0400 Subject: [PATCH 095/376] iunstall saltstack 3001 during setup --- salt/salt/master.defaults.yaml | 1 + salt/salt/minion.defaults.yaml | 1 + setup/so-functions | 28 ++++++++++++++-------------- setup/yum_repos/salt-2019-2-5.repo | 6 ------ setup/yum_repos/salt-latest.repo | 7 ------- setup/yum_repos/saltstack.repo | 6 ++++++ 6 files changed, 22 insertions(+), 27 deletions(-) delete mode 100644 setup/yum_repos/salt-2019-2-5.repo delete mode 100644 setup/yum_repos/salt-latest.repo create mode 100644 setup/yum_repos/saltstack.repo diff --git a/salt/salt/master.defaults.yaml b/salt/salt/master.defaults.yaml index a34a96b9e..c366ae6ce 100644 --- a/salt/salt/master.defaults.yaml +++ b/salt/salt/master.defaults.yaml @@ -1,4 +1,5 @@ #version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched +# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions salt: master: version: 3001 \ No newline at end of file diff --git a/salt/salt/minion.defaults.yaml b/salt/salt/minion.defaults.yaml index 4978a4a73..cd061237b 100644 --- a/salt/salt/minion.defaults.yaml +++ b/salt/salt/minion.defaults.yaml @@ -1,4 +1,5 @@ #version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched +# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions salt: minion: version: 3001 \ No newline at end of file diff --git a/setup/so-functions b/setup/so-functions index 2f1ea7198..cb9c75437 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1251,14 +1251,15 @@ reserve_group_ids() { groupadd -g 946 cyberchef } +# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and salt/salt/master.defaults.yaml and salt/salt/minion.defaults.yaml saltify() { # Install updates and Salt if [ $OS = 'centos' ]; then set_progress_str 5 'Installing Salt repo' { - sudo rpm --import https://repo.saltstack.com/py3/redhat/7/x86_64/archive/2019.2.5/SALTSTACK-GPG-KEY.pub; - cp ./yum_repos/salt-2019-2-5.repo /etc/yum.repos.d/salt-2019-2-5.repo; + sudo rpm --import https://repo.saltstack.com/py3/redhat/7/x86_64/3001/SALTSTACK-GPG-KEY.pub; + cp ./yum_repos/saltstack.repo /etc/yum.repos.d/saltstack.repo; } >> "$setup_log" 2>&1 set_progress_str 6 'Installing various dependencies' yum -y install wget nmap-ncat >> "$setup_log" 2>&1 @@ -1269,12 +1270,12 @@ saltify() { yum -y install sqlite argon2 curl mariadb-devel >> "$setup_log" 2>&1 # Download Ubuntu Keys in case manager updates = 1 mkdir -p /opt/so/gpg >> "$setup_log" 2>&1 - wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 + wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3001/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1 wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1 cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo >> "$setup_log" 2>&1 set_progress_str 7 'Installing salt-master' - yum -y install salt-master-2019.2.5 >> "$setup_log" 2>&1 + yum -y install salt-master-3001 >> "$setup_log" 2>&1 systemctl enable salt-master >> "$setup_log" 2>&1 ;; *) @@ -1284,8 +1285,7 @@ saltify() { cp ./public_keys/salt.pem /etc/pki/rpm-gpg/saltstack-signing-key; # Copy repo files over - cp ./yum_repos/salt-latest.repo /etc/yum.repos.d/salt-latest.repo; - cp ./yum_repos/salt-2019-2-5.repo /etc/yum.repos.d/salt-2019-2-5.repo; + cp ./yum_repos/saltstack.repo /etc/yum.repos.d/saltstack.repo; } >> "$setup_log" 2>&1 fi ;; @@ -1295,7 +1295,7 @@ saltify() { set_progress_str 8 'Installing salt-minion & python modules' { yum -y install epel-release - yum -y install salt-minion-2019.2.5\ + yum -y install salt-minion-3001\ python3\ python36-docker\ python36-dateutil\ @@ -1338,8 +1338,8 @@ saltify() { 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE') # TODO: should this also be HELIXSENSOR? # Add saltstack repo(s) - wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/2019.2.5/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1 - echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/2019.2.5 $OSVER main" > /etc/apt/sources.list.d/saltstack2019.list 2>> "$setup_log" + wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3001/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1 + echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3001 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" # Add Docker repo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - >> "$setup_log" 2>&1 @@ -1347,7 +1347,7 @@ saltify() { # Get gpg keys mkdir -p /opt/so/gpg >> "$setup_log" 2>&1 - wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/latest/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 + wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/archive/3001/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1 wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1 @@ -1360,7 +1360,7 @@ saltify() { set_progress_str 6 'Installing various dependencies' apt-get -y install sqlite3 argon2 libssl-dev >> "$setup_log" 2>&1 set_progress_str 7 'Installing salt-master' - apt-get -y install salt-master=2019.2.5+ds-1 >> "$setup_log" 2>&1 + apt-get -y install salt-master=3001+ds-1 >> "$setup_log" 2>&1 apt-mark hold salt-master >> "$setup_log" 2>&1 ;; *) @@ -1371,14 +1371,14 @@ saltify() { echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1 apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1 - echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/2019.2.5/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" + echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3001/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log" ;; esac apt-get update >> "$setup_log" 2>&1 set_progress_str 8 'Installing salt-minion & python modules' - apt-get -y install salt-minion=2019.2.5+ds-1\ - salt-common=2019.2.5+ds-1 >> "$setup_log" 2>&1 + apt-get -y install salt-minion=3001+ds-1\ + salt-common=3001+ds-1 >> "$setup_log" 2>&1 apt-mark hold salt-minion salt-common >> "$setup_log" 2>&1 if [ "$OSVER" != 'xenial' ]; then apt-get -y install python3-dateutil python3-m2crypto python3-mysqldb >> "$setup_log" 2>&1 diff --git a/setup/yum_repos/salt-2019-2-5.repo b/setup/yum_repos/salt-2019-2-5.repo deleted file mode 100644 index e456fdd87..000000000 --- a/setup/yum_repos/salt-2019-2-5.repo +++ /dev/null @@ -1,6 +0,0 @@ -[saltstack-repo] -name=SaltStack repo for RHEL/CentOS $releasever PY3 -baseurl=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/2019.2.5/ -enabled=1 -gpgcheck=1 -gpgkey=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/2019.2.5/SALTSTACK-GPG-KEY.pub diff --git a/setup/yum_repos/salt-latest.repo b/setup/yum_repos/salt-latest.repo deleted file mode 100644 index 709053a9b..000000000 --- a/setup/yum_repos/salt-latest.repo +++ /dev/null @@ -1,7 +0,0 @@ -[salt-latest] -name=SaltStack Latest Release Channel for RHEL/Centos $releasever -baseurl=https://repo.saltstack.com/py3/redhat/7/$basearch/latest -failovermethod=priority -enabled=1 -gpgcheck=1 -gpgkey=https://repo.saltstack.com/py3/redhat/$releasever/$basearch/latest/SALTSTACK-GPG-KEY.pub \ No newline at end of file diff --git a/setup/yum_repos/saltstack.repo b/setup/yum_repos/saltstack.repo new file mode 100644 index 000000000..f04f02be0 --- /dev/null +++ b/setup/yum_repos/saltstack.repo @@ -0,0 +1,6 @@ +[saltstack-repo] +name=SaltStack repo for RHEL/CentOS $releasever PY3 +baseurl=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001/ +enabled=1 +gpgcheck=1 +gpgkey=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001/SALTSTACK-GPG-KEY.pub From d1641aa0d825454c2eaed29c1fdcfb04f0d08533 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 3 Aug 2020 15:49:18 -0400 Subject: [PATCH 096/376] chown /var/ossec dir to match the needful user/group ownership for ossec-agentd --- salt/wazuh/init.sls | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/salt/wazuh/init.sls b/salt/wazuh/init.sls index 2695febd5..09c4e258b 100644 --- a/salt/wazuh/init.sls +++ b/salt/wazuh/init.sls @@ -46,6 +46,15 @@ wazuhpkgs: - hold: True - update_holds: True +wazuhvarossecdir: + file.directory: + - name: /var/ossec + - user: ossec + - group: ossec + - recurse: + - user + - group + # Add Wazuh agent conf wazuhagentconf: file.managed: From 7c1120e47d309bad331913113b829e036fd87969 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 3 Aug 2020 18:48:01 -0400 Subject: [PATCH 097/376] Fix grafana monitor interface. --- setup/so-functions | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index cb9c75437..ad4b4252f 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1599,13 +1599,13 @@ set_initial_firewall_policy() { $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP" case "$install_type" in 'EVAL') - $default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE True + $default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE" True ;; 'MANAGERSEARCH') $default_salt_dir/pillar/data/addtotab.sh managersearchtab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" ;; 'STANDALONE') - $default_salt_dir/pillar/data/addtotab.sh standalonetab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE + $default_salt_dir/pillar/data/addtotab.sh standalonetab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE" ;; esac ;; @@ -1619,7 +1619,7 @@ set_initial_firewall_policy() { case "$install_type" in 'SENSOR') ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost sensor "$MAINIP" - ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE + ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE" ;; 'SEARCHNODE') ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP" @@ -1628,7 +1628,7 @@ set_initial_firewall_policy() { 'HEAVYNODE') ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP" ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP" - ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE + ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE" ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" ;; 'FLEET') From 2290c28a07d838c22e2edfae71f9cfef28531ea5 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Tue, 4 Aug 2020 03:49:59 +0000 Subject: [PATCH 098/376] AWS defaults modifications --- setup/automation/aws_eval_defaults | 77 ++++++++++++++++++++++++ setup/automation/aws_standalone_defaults | 2 +- 2 files changed, 78 insertions(+), 1 deletion(-) create mode 100644 setup/automation/aws_eval_defaults diff --git a/setup/automation/aws_eval_defaults b/setup/automation/aws_eval_defaults new file mode 100644 index 000000000..e038bf29d --- /dev/null +++ b/setup/automation/aws_eval_defaults @@ -0,0 +1,77 @@ +#!/bin/bash + +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +TESTING=true + +address_type=DHCP +ADMINUSER=onionuser +ADMINPASS1=onionuser +ADMINPASS2=onionuser +ALLOW_CIDR=0.0.0.0/0 +ALLOW_ROLE=a +BASICZEEK=7 +BASICSURI=7 +# BLOGS= +BNICS=ens6 +ZEEKVERSION=ZEEK +# CURCLOSEDAYS= +# EVALADVANCED=BASIC +GRAFANA=1 +# HELIXAPIKEY= +HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 +HNSENSOR=inherit +HOSTNAME=eval-aws +install_type=EVAL +# LSINPUTBATCHCOUNT= +# LSINPUTTHREADS= +# LSPIPELINEBATCH= +# LSPIPELINEWORKERS= +MANAGERADV=BASIC +MANAGERUPDATES=1 +# MDNS= +# MGATEWAY= +# MIP= +# MMASK= +MNIC=ens5 +# MSEARCH= +# MSRV= +# MTU= +NIDS=Suricata +# NODE_ES_HEAP_SIZE= +# NODE_LS_HEAP_SIZE= +NODESETUP=NODEBASIC +NSMSETUP=BASIC +NODEUPDATES=MANAGER +# OINKCODE= +OSQUERY=1 +# PATCHSCHEDULEDAYS= +# PATCHSCHEDULEHOURS= +PATCHSCHEDULENAME=auto +PLAYBOOK=1 +# REDIRECTHOST= +REDIRECTINFO=HOSTNAME +RULESETUP=ETOPEN +# SHARDCOUNT= +SKIP_REBOOT=0 +SOREMOTEPASS1=onionuser +SOREMOTEPASS2=onionuser +STRELKA=1 +THEHIVE=1 +WAZUH=1 +WEBUSER=onionuser@somewhere.invalid +WEBPASSWD1=0n10nus3r +WEBPASSWD2=0n10nus3r diff --git a/setup/automation/aws_standalone_defaults b/setup/automation/aws_standalone_defaults index 3e27bd9e2..25d3da0e0 100644 --- a/setup/automation/aws_standalone_defaults +++ b/setup/automation/aws_standalone_defaults @@ -34,7 +34,7 @@ GRAFANA=1 # HELIXAPIKEY= HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNSENSOR=inherit -HOSTNAME=standalone +HOSTNAME=standalone-aws install_type=STANDALONE # LSINPUTBATCHCOUNT= # LSINPUTTHREADS= From 46f70c254ce013494247774dac7a09a5ad59a4c0 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Tue, 4 Aug 2020 14:11:50 +0000 Subject: [PATCH 099/376] Add AWS defaults file for manager --- setup/automation/aws_manager_defaults | 77 +++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 setup/automation/aws_manager_defaults diff --git a/setup/automation/aws_manager_defaults b/setup/automation/aws_manager_defaults new file mode 100644 index 000000000..2ca5c2a04 --- /dev/null +++ b/setup/automation/aws_manager_defaults @@ -0,0 +1,77 @@ +#!/bin/bash + +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +TESTING=true + +address_type=DHCP +ADMINUSER=onionuser +ADMINPASS1=onionuser +ADMINPASS2=onionuser +ALLOW_CIDR=0.0.0.0/0 +ALLOW_ROLE=a +BASICZEEK=7 +BASICSURI=7 +# BLOGS= +BNICS=ens6 +ZEEKVERSION=ZEEK +# CURCLOSEDAYS= +# EVALADVANCED=BASIC +GRAFANA=1 +# HELIXAPIKEY= +HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 +HNSENSOR=inherit +HOSTNAME=manager-aws +install_type=MANAGER +# LSINPUTBATCHCOUNT= +# LSINPUTTHREADS= +# LSPIPELINEBATCH= +# LSPIPELINEWORKERS= +MANAGERADV=BASIC +MANAGERUPDATES=1 +# MDNS= +# MGATEWAY= +# MIP= +# MMASK= +MNIC=ens5 +# MSEARCH= +# MSRV= +# MTU= +NIDS=Suricata +# NODE_ES_HEAP_SIZE= +# NODE_LS_HEAP_SIZE= +NODESETUP=NODEBASIC +NSMSETUP=BASIC +NODEUPDATES=MANAGER +# OINKCODE= +OSQUERY=1 +# PATCHSCHEDULEDAYS= +# PATCHSCHEDULEHOURS= +PATCHSCHEDULENAME=auto +PLAYBOOK=1 +# REDIRECTHOST= +REDIRECTINFO=HOSTNAME +RULESETUP=ETOPEN +# SHARDCOUNT= +SKIP_REBOOT=0 +SOREMOTEPASS1=onionuser +SOREMOTEPASS2=onionuser +STRELKA=1 +THEHIVE=1 +WAZUH=1 +WEBUSER=onionuser@somewhere.invalid +WEBPASSWD1=0n10nus3r +WEBPASSWD2=0n10nus3r From 549bf7ba196ad8559d667d478a7ba12cb86d66bc Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 4 Aug 2020 10:17:43 -0400 Subject: [PATCH 100/376] Activate minio --- salt/minio/init.sls | 8 -------- salt/top.sls | 2 ++ 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/salt/minio/init.sls b/salt/minio/init.sls index 2d5941301..fa9d2f2de 100644 --- a/salt/minio/init.sls +++ b/salt/minio/init.sls @@ -31,14 +31,6 @@ miniodatadir: - group: 939 - makedirs: True -#redisconfsync: -# file.recurse: -# - name: /opt/so/conf/redis/etc -# - source: salt://redis/etc -# - user: 939 -# - group: 939 -# - template: jinja - minio/minio: docker_image.present diff --git a/salt/top.sls b/salt/top.sls index 599f67dca..ff2fbfb0e 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -150,6 +150,7 @@ base: - wazuh {%- endif %} - logstash + - minio - kibana - elastalert - filebeat @@ -197,6 +198,7 @@ base: - wazuh {%- endif %} - logstash + - minio - kibana - pcap - suricata From 24ed92c9dc9b597523f041a17965944383b010a1 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 4 Aug 2020 15:54:03 -0400 Subject: [PATCH 101/376] minio and change to global --- pillar/docker/config.sls | 6 ++--- pillar/logstash/manager.sls | 2 +- pillar/logstash/search.sls | 2 +- pillar/top.sls | 18 +++++++-------- salt/common/maps/so-status.map.jinja | 8 +++---- salt/common/tools/sbin/so-elastic-clear | 2 +- salt/common/tools/sbin/so-features-enable | 6 ++--- salt/common/tools/sbin/so-import-pcap | 6 ++--- .../common/tools/sbin/so-kibana-config-export | 6 ++--- salt/common/tools/sbin/soup | 2 +- salt/curator/init.sls | 4 ++-- salt/deprecated-launcher/init.sls | 2 +- salt/domainstats/init.sls | 2 +- .../files/rules/so/suricata_thehive.yaml | 6 ++--- .../files/rules/so/wazuh_thehive.yaml | 6 ++--- salt/elastalert/init.sls | 4 ++-- salt/elasticsearch/init.sls | 4 ++-- salt/filebeat/etc/filebeat.yml | 8 +++---- salt/filebeat/init.sls | 6 ++--- salt/firewall/assigned_hostgroups.map.yaml | 8 +++++++ salt/firewall/portgroups.yaml | 3 +++ salt/fleet/event_gen-packages.sls | 10 ++++----- salt/fleet/event_update-custom-hostname.sls | 2 +- salt/fleet/init.sls | 6 ++--- salt/fleet/install_package.sls | 10 ++++----- salt/freqserver/init.sls | 2 +- salt/grafana/etc/datasources/influxdb.yaml | 2 +- salt/grafana/init.sls | 4 ++-- salt/idstools/init.sls | 4 ++-- salt/influxdb/init.sls | 4 ++-- salt/kibana/bin/so-kibana-config-load | 4 ++-- salt/kibana/init.sls | 4 ++-- salt/logstash/init.sls | 4 ++-- .../config/so/0899_input_minio.conf.jinja | 22 +++++++++++++++++++ .../config/so/0900_input_redis.conf.jinja | 2 +- .../config/so/9998_output_minio.conf.jinja | 17 ++++++++++++++ .../config/so/9999_output_redis.conf.jinja | 2 +- salt/manager/init.sls | 6 ++--- salt/minio/init.sls | 18 ++++++++++----- salt/mysql/init.sls | 8 +++---- salt/nginx/etc/nginx.conf.so-eval | 6 ++--- salt/nginx/etc/nginx.conf.so-manager | 6 ++--- salt/nginx/etc/nginx.conf.so-managersearch | 6 ++--- salt/nginx/etc/nginx.conf.so-standalone | 6 ++--- salt/nginx/files/navigator_config.json | 2 +- salt/nginx/init.sls | 8 +++---- salt/nodered/files/nodered_load_flows | 2 +- salt/nodered/files/so_flows.json | 6 ++--- salt/nodered/init.sls | 2 +- salt/pcap/files/sensoroni.json | 2 +- salt/pcap/init.sls | 4 ++-- salt/playbook/init.sls | 4 ++-- salt/reactor/fleet.sls | 2 +- salt/redis/init.sls | 4 ++-- salt/soc/files/soc/soc.json | 4 ++-- salt/soc/init.sls | 4 ++-- salt/soctopus/files/SOCtopus.conf | 4 ++-- .../files/templates/es-generic.template | 2 +- .../soctopus/files/templates/generic.template | 6 ++--- .../soctopus/files/templates/osquery.template | 6 ++--- salt/soctopus/init.sls | 6 ++--- salt/ssl/init.sls | 4 ++-- salt/strelka/files/backend/backend.yaml | 2 +- salt/strelka/files/filestream/filestream.yaml | 2 +- salt/strelka/files/frontend/frontend.yaml | 2 +- salt/strelka/files/manager/manager.yaml | 2 +- salt/strelka/init.sls | 6 ++--- salt/suricata/init.sls | 6 ++--- salt/suricata/suricata_config.map.jinja | 4 ++-- salt/tcpreplay/init.sls | 4 ++-- salt/telegraf/init.sls | 4 ++-- salt/thehive/etc/application.conf | 6 ++--- salt/thehive/etc/cortex-application.conf | 4 ++-- salt/thehive/init.sls | 4 ++-- salt/thehive/scripts/cortex_init | 20 ++++++++--------- salt/thehive/scripts/hive_init | 12 +++++----- salt/top.sls | 8 +++---- salt/wazuh/files/agent/ossec.conf | 2 +- salt/wazuh/files/agent/wazuh-register-agent | 2 +- salt/wazuh/files/wazuh-manager-whitelist | 4 ++-- salt/wazuh/init.sls | 4 ++-- salt/yum/etc/yum.conf.jinja | 2 +- salt/zeek/init.sls | 4 ++-- setup/so-functions | 21 +++++++++--------- setup/so-setup | 6 ++--- 85 files changed, 262 insertions(+), 207 deletions(-) create mode 100644 salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja create mode 100644 salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja diff --git a/pillar/docker/config.sls b/pillar/docker/config.sls index 4d70fd517..647151eef 100644 --- a/pillar/docker/config.sls +++ b/pillar/docker/config.sls @@ -1,11 +1,11 @@ -{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%} -{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%} +{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%} +{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%} {% set WAZUH = salt['pillar.get']('manager:wazuh', '0') %} {% set THEHIVE = salt['pillar.get']('manager:thehive', '0') %} {% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %} {% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %} {% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %} -{% set ZEEKVER = salt['pillar.get']('static:zeekversion', 'COMMUNITY') %} +{% set ZEEKVER = salt['pillar.get']('global:zeekversion', 'COMMUNITY') %} {% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %} eval: diff --git a/pillar/logstash/manager.sls b/pillar/logstash/manager.sls index 9c16d2625..861b8f665 100644 --- a/pillar/logstash/manager.sls +++ b/pillar/logstash/manager.sls @@ -4,4 +4,4 @@ logstash: config: - so/0009_input_beats.conf - so/0010_input_hhbeats.conf - - so/9999_output_redis.conf.jinja + - so/9998_output_minio.conf.jinja diff --git a/pillar/logstash/search.sls b/pillar/logstash/search.sls index 486deb408..cad849153 100644 --- a/pillar/logstash/search.sls +++ b/pillar/logstash/search.sls @@ -2,7 +2,7 @@ logstash: pipelines: search: config: - - so/0900_input_redis.conf.jinja + - so/0899_input_minio.conf.jinja - so/9000_output_zeek.conf.jinja - so/9002_output_import.conf.jinja - so/9034_output_syslog.conf.jinja diff --git a/pillar/top.sls b/pillar/top.sls index 889f0b63f..c11b66eaa 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -14,14 +14,14 @@ base: - elasticsearch.search '*_sensor': - - static + - global - zeeklogs - healthcheck.sensor - minions.{{ grains.id }} '*_manager or *_managersearch': - match: compound - - static + - global - data.* - secrets - minions.{{ grains.id }} @@ -36,7 +36,7 @@ base: - secrets - healthcheck.eval - elasticsearch.eval - - static + - global - minions.{{ grains.id }} '*_standalone': @@ -48,20 +48,20 @@ base: - zeeklogs - secrets - healthcheck.standalone - - static + - global - minions.{{ grains.id }} '*_node': - - static + - global - minions.{{ grains.id }} '*_heavynode': - - static + - global - zeeklogs - minions.{{ grains.id }} '*_helix': - - static + - global - fireeye - zeeklogs - logstash @@ -69,13 +69,13 @@ base: - minions.{{ grains.id }} '*_fleet': - - static + - global - data.* - secrets - minions.{{ grains.id }} '*_searchnode': - - static + - global - logstash - logstash.search - elasticsearch.search diff --git a/salt/common/maps/so-status.map.jinja b/salt/common/maps/so-status.map.jinja index 93f5f3d13..21dd14ec9 100644 --- a/salt/common/maps/so-status.map.jinja +++ b/salt/common/maps/so-status.map.jinja @@ -20,7 +20,7 @@ {% if role in ['eval', 'managersearch', 'manager', 'standalone'] %} {{ append_containers('manager', 'grafana', 0) }} - {{ append_containers('static', 'fleet_manager', 0) }} + {{ append_containers('global', 'fleet_manager', 0) }} {{ append_containers('manager', 'wazuh', 0) }} {{ append_containers('manager', 'thehive', 0) }} {{ append_containers('manager', 'playbook', 0) }} @@ -29,11 +29,11 @@ {% endif %} {% if role in ['eval', 'heavynode', 'sensor', 'standalone'] %} - {{ append_containers('static', 'strelka', 0) }} + {{ append_containers('global', 'strelka', 0) }} {% endif %} {% if role in ['heavynode', 'standalone'] %} - {{ append_containers('static', 'zeekversion', 'SURICATA') }} + {{ append_containers('global', 'zeekversion', 'SURICATA') }} {% endif %} {% if role == 'searchnode' %} @@ -41,5 +41,5 @@ {% endif %} {% if role == 'sensor' %} - {{ append_containers('static', 'zeekversion', 'SURICATA') }} + {{ append_containers('global', 'zeekversion', 'SURICATA') }} {% endif %} \ No newline at end of file diff --git a/salt/common/tools/sbin/so-elastic-clear b/salt/common/tools/sbin/so-elastic-clear index 04c153f85..15b1041e1 100755 --- a/salt/common/tools/sbin/so-elastic-clear +++ b/salt/common/tools/sbin/so-elastic-clear @@ -14,7 +14,7 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%} +{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') -%} . /usr/sbin/so-common SKIP=0 diff --git a/salt/common/tools/sbin/so-features-enable b/salt/common/tools/sbin/so-features-enable index c94aebcba..070ecedc0 100755 --- a/salt/common/tools/sbin/so-features-enable +++ b/salt/common/tools/sbin/so-features-enable @@ -29,9 +29,9 @@ manager_check() { } manager_check -VERSION=$(grep soversion $local_salt_dir/pillar/static.sls | cut -d':' -f2|sed 's/ //g') -# Modify static.sls to enable Features -sed -i 's/features: False/features: True/' $local_salt_dir/pillar/static.sls +VERSION=$(grep soversion $local_salt_dir/pillar/global.sls | cut -d':' -f2|sed 's/ //g') +# Modify global.sls to enable Features +sed -i 's/features: False/features: True/' $local_salt_dir/pillar/global.sls SUFFIX="-features" TRUSTED_CONTAINERS=( \ "so-elasticsearch:$VERSION$SUFFIX" \ diff --git a/salt/common/tools/sbin/so-import-pcap b/salt/common/tools/sbin/so-import-pcap index aef6e98d8..6e2d98daa 100755 --- a/salt/common/tools/sbin/so-import-pcap +++ b/salt/common/tools/sbin/so-import-pcap @@ -16,9 +16,9 @@ # along with this program. If not, see . {% set MANAGER = salt['grains.get']('master') %} -{% set VERSION = salt['pillar.get']('static:soversion') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} -{%- set MANAGERIP = salt['pillar.get']('static:managerip') -%} +{% set VERSION = salt['pillar.get']('global:soversion') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} +{%- set MANAGERIP = salt['pillar.get']('global:managerip') -%} . /usr/sbin/so-common diff --git a/salt/common/tools/sbin/so-kibana-config-export b/salt/common/tools/sbin/so-kibana-config-export index 8ee3f59b5..6542c3f04 100755 --- a/salt/common/tools/sbin/so-kibana-config-export +++ b/salt/common/tools/sbin/so-kibana-config-export @@ -1,8 +1,8 @@ #!/bin/bash # -# {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager', False) -%} -# {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%} -# {%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', '') %} +# {%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager', False) -%} +# {%- set FLEET_NODE = salt['pillar.get']('global:fleet_node', False) -%} +# {%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', '') %} # {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %} # # Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index eb281baae..48d9314a3 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -159,7 +159,7 @@ update_version() { # Update the version to the latest echo "Updating the Security Onion version file." echo $NEWVERSION > /etc/soversion - sed -i "s/$INSTALLEDVERSION/$NEWVERSION/g" /opt/so/saltstack/local/pillar/static.sls + sed -i "s/$INSTALLEDVERSION/$NEWVERSION/g" /opt/so/saltstack/local/pillar/global.sls } upgrade_check() { diff --git a/salt/curator/init.sls b/salt/curator/init.sls index 8873f401a..b98eaf6cb 100644 --- a/salt/curator/init.sls +++ b/salt/curator/init.sls @@ -1,5 +1,5 @@ -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% if grains['role'] in ['so-eval', 'so-node', 'so-managersearch', 'so-heavynode', 'so-standalone'] %} # Curator diff --git a/salt/deprecated-launcher/init.sls b/salt/deprecated-launcher/init.sls index 3ba9ad3a6..3805be5d7 100644 --- a/salt/deprecated-launcher/init.sls +++ b/salt/deprecated-launcher/init.sls @@ -1,4 +1,4 @@ -{%- set FLEETSETUP = salt['pillar.get']('static:fleetsetup', '0') -%} +{%- set FLEETSETUP = salt['pillar.get']('global:fleetsetup', '0') -%} {%- if FLEETSETUP != 0 %} launcherpkg: diff --git a/salt/domainstats/init.sls b/salt/domainstats/init.sls index 8d329c785..764435e5f 100644 --- a/salt/domainstats/init.sls +++ b/salt/domainstats/init.sls @@ -13,7 +13,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} # Create the group dstatsgroup: diff --git a/salt/elastalert/files/rules/so/suricata_thehive.yaml b/salt/elastalert/files/rules/so/suricata_thehive.yaml index fb6c6448d..0135edadd 100644 --- a/salt/elastalert/files/rules/so/suricata_thehive.yaml +++ b/salt/elastalert/files/rules/so/suricata_thehive.yaml @@ -1,6 +1,6 @@ -{% set es = salt['pillar.get']('static:managerip', '') %} -{% set hivehost = salt['pillar.get']('static:managerip', '') %} -{% set hivekey = salt['pillar.get']('static:hivekey', '') %} +{% set es = salt['pillar.get']('global:managerip', '') %} +{% set hivehost = salt['pillar.get']('global:managerip', '') %} +{% set hivekey = salt['pillar.get']('global:hivekey', '') %} {% set MANAGER = salt['pillar.get']('manager:url_base', '') %} # Elastalert rule to forward Suricata alerts from Security Onion to a specified TheHive instance. diff --git a/salt/elastalert/files/rules/so/wazuh_thehive.yaml b/salt/elastalert/files/rules/so/wazuh_thehive.yaml index c01bb5894..8aa085566 100644 --- a/salt/elastalert/files/rules/so/wazuh_thehive.yaml +++ b/salt/elastalert/files/rules/so/wazuh_thehive.yaml @@ -1,6 +1,6 @@ -{% set es = salt['pillar.get']('static:managerip', '') %} -{% set hivehost = salt['pillar.get']('static:managerip', '') %} -{% set hivekey = salt['pillar.get']('static:hivekey', '') %} +{% set es = salt['pillar.get']('global:managerip', '') %} +{% set hivehost = salt['pillar.get']('global:managerip', '') %} +{% set hivekey = salt['pillar.get']('global:hivekey', '') %} {% set MANAGER = salt['pillar.get']('manager:url_base', '') %} # Elastalert rule to forward high level Wazuh alerts from Security Onion to a specified TheHive instance. diff --git a/salt/elastalert/init.sls b/salt/elastalert/init.sls index 5703b8717..c6c3afb2f 100644 --- a/salt/elastalert/init.sls +++ b/salt/elastalert/init.sls @@ -12,8 +12,8 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %} diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 909d30152..f3777481c 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -12,8 +12,8 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %} diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml index 825ffaf64..2b8a4118f 100644 --- a/salt/filebeat/etc/filebeat.yml +++ b/salt/filebeat/etc/filebeat.yml @@ -6,11 +6,11 @@ {%- set HOSTNAME = salt['grains.get']('host', '') %} -{%- set ZEEKVER = salt['pillar.get']('static:zeekversion', 'COMMUNITY') %} -{%- set WAZUHENABLED = salt['pillar.get']('static:wazuh', '0') %} +{%- set ZEEKVER = salt['pillar.get']('global:zeekversion', 'COMMUNITY') %} +{%- set WAZUHENABLED = salt['pillar.get']('global:wazuh', '0') %} {%- set STRELKAENABLED = salt['pillar.get']('strelka:enabled', '0') %} -{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%} -{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%} +{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%} +{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%} name: {{ HOSTNAME }} diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls index 0d1f521e3..a4fa36b14 100644 --- a/salt/filebeat/init.sls +++ b/salt/filebeat/init.sls @@ -11,10 +11,10 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} -{% set MANAGERIP = salt['pillar.get']('static:managerip', '') %} +{% set MANAGERIP = salt['pillar.get']('global:managerip', '') %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %} {% if FEATURES %} {% set FEATURES = "-features" %} diff --git a/salt/firewall/assigned_hostgroups.map.yaml b/salt/firewall/assigned_hostgroups.map.yaml index 2500c604a..7eb16a62a 100644 --- a/salt/firewall/assigned_hostgroups.map.yaml +++ b/salt/firewall/assigned_hostgroups.map.yaml @@ -15,6 +15,7 @@ role: - {{ portgroups.mysql }} - {{ portgroups.kibana }} - {{ portgroups.redis }} + - {{ portgroups.minio }} - {{ portgroups.influxdb }} - {{ portgroups.fleet_api }} - {{ portgroups.cortex }} @@ -38,6 +39,7 @@ role: search_node: portgroups: - {{ portgroups.redis }} + - {{ portgroups.minio }} - {{ portgroups.elasticsearch_node }} self: portgroups: @@ -99,6 +101,7 @@ role: - {{ portgroups.mysql }} - {{ portgroups.kibana }} - {{ portgroups.redis }} + - {{ portgroups.minio }} - {{ portgroups.influxdb }} - {{ portgroups.fleet_api }} - {{ portgroups.cortex }} @@ -122,6 +125,7 @@ role: search_node: portgroups: - {{ portgroups.redis }} + - {{ portgroups.minio }} - {{ portgroups.elasticsearch_node }} self: portgroups: @@ -180,6 +184,7 @@ role: - {{ portgroups.mysql }} - {{ portgroups.kibana }} - {{ portgroups.redis }} + - {{ portgroups.minio }} - {{ portgroups.influxdb }} - {{ portgroups.fleet_api }} - {{ portgroups.cortex }} @@ -203,6 +208,7 @@ role: search_node: portgroups: - {{ portgroups.redis }} + - {{ portgroups.minio }} - {{ portgroups.elasticsearch_node }} self: portgroups: @@ -261,6 +267,7 @@ role: - {{ portgroups.mysql }} - {{ portgroups.kibana }} - {{ portgroups.redis }} + - {{ portgroups.minio }} - {{ portgroups.influxdb }} - {{ portgroups.fleet_api }} - {{ portgroups.cortex }} @@ -284,6 +291,7 @@ role: search_node: portgroups: - {{ portgroups.redis }} + - {{ portgroups.minio }} - {{ portgroups.elasticsearch_node }} self: portgroups: diff --git a/salt/firewall/portgroups.yaml b/salt/firewall/portgroups.yaml index b8d86f253..5dee48755 100644 --- a/salt/firewall/portgroups.yaml +++ b/salt/firewall/portgroups.yaml @@ -45,6 +45,9 @@ firewall: kibana: tcp: - 5601 + minio: + tcp: + - 9595 mysql: tcp: - 3306 diff --git a/salt/fleet/event_gen-packages.sls b/salt/fleet/event_gen-packages.sls index 24b013704..bfcfd2a1d 100644 --- a/salt/fleet/event_gen-packages.sls +++ b/salt/fleet/event_gen-packages.sls @@ -1,10 +1,10 @@ {% set MANAGER = salt['grains.get']('master') %} {% set ENROLLSECRET = salt['pillar.get']('secrets:fleet_enroll-secret') %} -{% set CURRENTPACKAGEVERSION = salt['pillar.get']('static:fleet_packages-version') %} -{% set VERSION = salt['pillar.get']('static:soversion') %} -{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} -{%- set FLEETNODE = salt['pillar.get']('static:fleet_node') -%} +{% set CURRENTPACKAGEVERSION = salt['pillar.get']('global:fleet_packages-version') %} +{% set VERSION = salt['pillar.get']('global:soversion') %} +{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} +{%- set FLEETNODE = salt['pillar.get']('global:fleet_node') -%} {% if CUSTOM_FLEET_HOSTNAME != None and CUSTOM_FLEET_HOSTNAME != '' %} {% set HOSTNAME = CUSTOM_FLEET_HOSTNAME %} diff --git a/salt/fleet/event_update-custom-hostname.sls b/salt/fleet/event_update-custom-hostname.sls index 9278862ed..b404b2828 100644 --- a/salt/fleet/event_update-custom-hostname.sls +++ b/salt/fleet/event_update-custom-hostname.sls @@ -1,4 +1,4 @@ -{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %} +{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %} so/fleet: event.send: diff --git a/salt/fleet/init.sls b/salt/fleet/init.sls index 0b402a54b..b2a3bb516 100644 --- a/salt/fleet/init.sls +++ b/salt/fleet/init.sls @@ -1,8 +1,8 @@ {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%} {%- set FLEETPASS = salt['pillar.get']('secrets:fleet', None) -%} {%- set FLEETJWT = salt['pillar.get']('secrets:fleet_jwt', None) -%} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set FLEETARCH = salt['grains.get']('role') %} @@ -10,7 +10,7 @@ {% set MAININT = salt['pillar.get']('host:mainint') %} {% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %} {% else %} - {% set MAINIP = salt['pillar.get']('static:managerip') %} + {% set MAINIP = salt['pillar.get']('global:managerip') %} {% endif %} include: diff --git a/salt/fleet/install_package.sls b/salt/fleet/install_package.sls index d09de540c..9063464d8 100644 --- a/salt/fleet/install_package.sls +++ b/salt/fleet/install_package.sls @@ -1,8 +1,8 @@ -{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%} -{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%} -{%- set FLEETHOSTNAME = salt['pillar.get']('static:fleet_hostname', False) -%} -{%- set FLEETIP = salt['pillar.get']('static:fleet_ip', False) -%} -{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %} +{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%} +{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%} +{%- set FLEETHOSTNAME = salt['pillar.get']('global:fleet_hostname', False) -%} +{%- set FLEETIP = salt['pillar.get']('global:fleet_ip', False) -%} +{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %} {% if CUSTOM_FLEET_HOSTNAME != (None and '') %} diff --git a/salt/freqserver/init.sls b/salt/freqserver/init.sls index 08661f3da..f48b66cff 100644 --- a/salt/freqserver/init.sls +++ b/salt/freqserver/init.sls @@ -13,7 +13,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} # Create the user fservergroup: diff --git a/salt/grafana/etc/datasources/influxdb.yaml b/salt/grafana/etc/datasources/influxdb.yaml index c70fd7137..a10bed981 100644 --- a/salt/grafana/etc/datasources/influxdb.yaml +++ b/salt/grafana/etc/datasources/influxdb.yaml @@ -1,4 +1,4 @@ -{%- set MANAGER = salt['pillar.get']('static:managerip', '') %} +{%- set MANAGER = salt['pillar.get']('global:managerip', '') %} apiVersion: 1 deleteDatasources: diff --git a/salt/grafana/init.sls b/salt/grafana/init.sls index e63c9a9c4..eb446b2e0 100644 --- a/salt/grafana/init.sls +++ b/salt/grafana/init.sls @@ -1,7 +1,7 @@ {% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %} {% set MANAGER = salt['grains.get']('master') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %} diff --git a/salt/idstools/init.sls b/salt/idstools/init.sls index 3313fa901..93db83759 100644 --- a/salt/idstools/init.sls +++ b/salt/idstools/init.sls @@ -12,8 +12,8 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} # IDSTools Setup idstoolsdir: diff --git a/salt/influxdb/init.sls b/salt/influxdb/init.sls index 6d8ba4566..d35ab6cae 100644 --- a/salt/influxdb/init.sls +++ b/salt/influxdb/init.sls @@ -1,7 +1,7 @@ {% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %} {% set MANAGER = salt['grains.get']('master') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %} diff --git a/salt/kibana/bin/so-kibana-config-load b/salt/kibana/bin/so-kibana-config-load index 451e848a1..2e5d38ade 100644 --- a/salt/kibana/bin/so-kibana-config-load +++ b/salt/kibana/bin/so-kibana-config-load @@ -1,6 +1,6 @@ #!/bin/bash -# {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager', False) -%} -# {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%} +# {%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager', False) -%} +# {%- set FLEET_NODE = salt['pillar.get']('global:fleet_node', False) -%} # {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %} KIBANA_VERSION="7.6.1" diff --git a/salt/kibana/init.sls b/salt/kibana/init.sls index 9521c5bb1..a1dccd137 100644 --- a/salt/kibana/init.sls +++ b/salt/kibana/init.sls @@ -1,5 +1,5 @@ -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %} {% if FEATURES %} diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index 8a3b539a2..b63c1ce96 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -12,8 +12,8 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %} diff --git a/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja b/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja new file mode 100644 index 000000000..1f6bf03b4 --- /dev/null +++ b/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja @@ -0,0 +1,22 @@ +{%- if grains.role == 'so-heavynode' %} +{%- set MANAGER = salt['pillar.get']('elasticsearch:mainip', '') %} +{%- else %} +{%- set MANAGER = salt['pillar.get']('global:managerip', '') %} +{% endif -%} +{%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %} +{%- set access_key = salt['pillar.get']('global:access_key', '') %} +{%- set access_secret = salt['pillar.get']('global:access_secret', '') %} +input { + s3 { + access_key_id => "{{ access_key }}" + secret_access_key => "{{ access_secret }}" + endpoint => "http://{{ MANAGER }}:9595" + bucket => "logstash" + delete => true + interval => 10 + codec => json + additional_settings => { + "force_path_style" => true + } + } +} diff --git a/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja b/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja index 2ce204875..6e736f22f 100644 --- a/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja +++ b/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja @@ -1,7 +1,7 @@ {%- if grains.role == 'so-heavynode' %} {%- set MANAGER = salt['pillar.get']('elasticsearch:mainip', '') %} {%- else %} -{%- set MANAGER = salt['pillar.get']('static:managerip', '') %} +{%- set MANAGER = salt['pillar.get']('global:managerip', '') %} {% endif -%} {%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %} diff --git a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja new file mode 100644 index 000000000..a085ee587 --- /dev/null +++ b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja @@ -0,0 +1,17 @@ +{%- set MANAGER = salt['pillar.get']('global:managerip', '') -%} +{%- set access_key = salt['pillar.get']('global:access_key', '') %} +{%- set access_secret = salt['pillar.get']('global:access_secret', '') %} +output { + s3 { + access_key_id => "{{ access_key }}" + secret_access_key => "{{ access_secret}}" + endpoint => "http://{{ MANAGER }}:9595" + bucket => "logstash" + size_file => 2048 + time_file => 1 + codec => json + additional_settings => { + "force_path_style" => true + } + } +} diff --git a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja index 71ec9f639..239ca8cb6 100644 --- a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja +++ b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja @@ -1,4 +1,4 @@ -{% set MANAGER = salt['pillar.get']('static:managerip', '') %} +{% set MANAGER = salt['pillar.get']('global:managerip', '') %} {% set BATCH = salt['pillar.get']('logstash_settings:ls_pipeline_batch_size', 125) %} output { redis { diff --git a/salt/manager/init.sls b/salt/manager/init.sls index aef705724..3b4852542 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -12,10 +12,10 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} -{% set managerproxy = salt['pillar.get']('static:managerupdate', '0') %} +{% set managerproxy = salt['pillar.get']('global:managerupdate', '0') %} socore_own_saltstack: file.directory: diff --git a/salt/minio/init.sls b/salt/minio/init.sls index fa9d2f2de..438face99 100644 --- a/salt/minio/init.sls +++ b/salt/minio/init.sls @@ -13,8 +13,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set access_key = salt['pillar.get']('manager:access_key', '') %} -{% set access_secret = salt['pillar.get']('manager:access_secret', '') %} +{% set access_key = salt['pillar.get']('minio:access_key', '') %} +{% set access_secret = salt['pillar.get']('minio:access_secret', '') %} # Minio Setup minioconfdir: @@ -26,7 +26,14 @@ minioconfdir: miniodatadir: file.directory: - - name: /nsm/minio/data + - name: /nsm/minio/data/ + - user: 939 + - group: 939 + - makedirs: True + +logstashbucket: + file.directory: + - name: /nsm/minio/data/logstash - user: 939 - group: 939 - makedirs: True @@ -40,12 +47,11 @@ minio: - hostname: so-minio - user: socore - port_bindings: - - 0.0.0.0:9000:9000 + - 0.0.0.0:9595:9595 - environment: - MINIO_ACCESS_KEY: {{ access_key }} - MINIO_SECRET_KEY: {{ access_secret }} - binds: - /nsm/minio/data:/data:rw - /opt/so/conf/minio/etc:/root/.minio:rw - - entrypoint: "/usr/bin/docker-entrypoint.sh server /data" - - network_mode: so-elastic-net + - entrypoint: "/usr/bin/docker-entrypoint.sh server --address :9595 /data" diff --git a/salt/mysql/init.sls b/salt/mysql/init.sls index 78240fe2f..c9c6fde41 100644 --- a/salt/mysql/init.sls +++ b/salt/mysql/init.sls @@ -1,7 +1,7 @@ {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) %} -{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set MAINIP = salt['pillar.get']('elasticsearch:mainip') %} {% set FLEETARCH = salt['grains.get']('role') %} @@ -10,7 +10,7 @@ {% set MAININT = salt['pillar.get']('host:mainint') %} {% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %} {% else %} - {% set MAINIP = salt['pillar.get']('static:managerip') %} + {% set MAINIP = salt['pillar.get']('global:managerip') %} {% endif %} # MySQL Setup diff --git a/salt/nginx/etc/nginx.conf.so-eval b/salt/nginx/etc/nginx.conf.so-eval index 2998a5bf2..9c919c764 100644 --- a/salt/nginx/etc/nginx.conf.so-eval +++ b/salt/nginx/etc/nginx.conf.so-eval @@ -1,7 +1,7 @@ {%- set managerip = salt['pillar.get']('manager:mainip', '') %} -{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %} -{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %} -{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %} +{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} +{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} +{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} # For more information on configuration, see: # * Official English Documentation: http://nginx.org/en/docs/ # * Official Russian Documentation: http://nginx.org/ru/docs/ diff --git a/salt/nginx/etc/nginx.conf.so-manager b/salt/nginx/etc/nginx.conf.so-manager index bdb342cac..cf7545942 100644 --- a/salt/nginx/etc/nginx.conf.so-manager +++ b/salt/nginx/etc/nginx.conf.so-manager @@ -1,7 +1,7 @@ {%- set managerip = salt['pillar.get']('manager:mainip', '') %} -{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %} -{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %} -{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %} +{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} +{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} +{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} # For more information on configuration, see: # * Official English Documentation: http://nginx.org/en/docs/ # * Official Russian Documentation: http://nginx.org/ru/docs/ diff --git a/salt/nginx/etc/nginx.conf.so-managersearch b/salt/nginx/etc/nginx.conf.so-managersearch index cb7576923..4b9daba4e 100644 --- a/salt/nginx/etc/nginx.conf.so-managersearch +++ b/salt/nginx/etc/nginx.conf.so-managersearch @@ -1,7 +1,7 @@ {%- set managerip = salt['pillar.get']('manager:mainip', '') %} -{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %} -{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %} -{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %} +{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} +{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} +{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} # For more information on configuration, see: # * Official English Documentation: http://nginx.org/en/docs/ # * Official Russian Documentation: http://nginx.org/ru/docs/ diff --git a/salt/nginx/etc/nginx.conf.so-standalone b/salt/nginx/etc/nginx.conf.so-standalone index bdb342cac..cf7545942 100644 --- a/salt/nginx/etc/nginx.conf.so-standalone +++ b/salt/nginx/etc/nginx.conf.so-standalone @@ -1,7 +1,7 @@ {%- set managerip = salt['pillar.get']('manager:mainip', '') %} -{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %} -{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %} -{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %} +{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} +{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} +{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} # For more information on configuration, see: # * Official English Documentation: http://nginx.org/en/docs/ # * Official Russian Documentation: http://nginx.org/ru/docs/ diff --git a/salt/nginx/files/navigator_config.json b/salt/nginx/files/navigator_config.json index bd40e09ef..d54f13265 100644 --- a/salt/nginx/files/navigator_config.json +++ b/salt/nginx/files/navigator_config.json @@ -1,4 +1,4 @@ -{%- set ip = salt['pillar.get']('static:managerip', '') %} +{%- set ip = salt['pillar.get']('global:managerip', '') %} { "enterprise_attack_url": "https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json", diff --git a/salt/nginx/init.sls b/salt/nginx/init.sls index 53bb13eec..2e67a6b2c 100644 --- a/salt/nginx/init.sls +++ b/salt/nginx/init.sls @@ -1,8 +1,8 @@ -{% set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) %} -{% set FLEETNODE = salt['pillar.get']('static:fleet_node', False) %} +{% set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) %} +{% set FLEETNODE = salt['pillar.get']('global:fleet_node', False) %} {% set MANAGER = salt['grains.get']('master') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} # Drop the correct nginx config based on role nginxconfdir: diff --git a/salt/nodered/files/nodered_load_flows b/salt/nodered/files/nodered_load_flows index 985c1c49a..78bab818a 100644 --- a/salt/nodered/files/nodered_load_flows +++ b/salt/nodered/files/nodered_load_flows @@ -1,4 +1,4 @@ -{%- set ip = salt['pillar.get']('static:managerip', '') -%} +{%- set ip = salt['pillar.get']('global:managerip', '') -%} #!/bin/bash default_salt_dir=/opt/so/saltstack/default diff --git a/salt/nodered/files/so_flows.json b/salt/nodered/files/so_flows.json index ad780ceb9..a8a6e2c69 100644 --- a/salt/nodered/files/so_flows.json +++ b/salt/nodered/files/so_flows.json @@ -1,4 +1,4 @@ -{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%} -{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') -%} -{%- set CORTEXKEY = salt['pillar.get']('static:cortexkey', '') -%} +{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') -%} +{%- set HIVEKEY = salt['pillar.get']('global:hivekey', '') -%} +{%- set CORTEXKEY = salt['pillar.get']('global:cortexkey', '') -%} [{"id":"dca608c3.7d8af8","type":"tab","label":"TheHive - Webhook Events","disabled":false,"info":""},{"id":"4db74fa6.2556d","type":"tls-config","z":"","name":"","cert":"","key":"","ca":"","certname":"","keyname":"","caname":"","servername":"","verifyservercert":false},{"id":"aa6cf50d.a02fc8","type":"http in","z":"dca608c3.7d8af8","name":"TheHive Listener","url":"/thehive","method":"post","upload":false,"swaggerDoc":"","x":120,"y":780,"wires":[["2b92aebb.853dc2","2fce29bb.1b1376","82ad0f08.7a53f"]]},{"id":"2b92aebb.853dc2","type":"debug","z":"dca608c3.7d8af8","name":"","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"payload","targetType":"msg","x":470,"y":940,"wires":[]},{"id":"a4ecb84a.805958","type":"switch","z":"dca608c3.7d8af8","name":"Operation","property":"payload.operation","propertyType":"msg","rules":[{"t":"eq","v":"Creation","vt":"str"},{"t":"eq","v":"Update","vt":"str"},{"t":"eq","v":"Delete","vt":"str"}],"checkall":"false","repair":false,"outputs":3,"x":580,"y":780,"wires":[["f1e954fd.3c21d8"],["65928861.c90a48"],["a259a26c.a21"]],"outputLabels":["Creation","Update","Delete"]},{"id":"f1e954fd.3c21d8","type":"switch","z":"dca608c3.7d8af8","name":"Creation","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_task","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"},{"t":"eq","v":"case_artifact_job","vt":"str"},{"t":"eq","v":"alert","vt":"str"},{"t":"eq","v":"user","vt":"str"}],"checkall":"false","repair":false,"outputs":7,"x":900,"y":480,"wires":[["e88b4cc2.f6afe"],["8c54e39.a1b4f2"],["64203fe8.e0ad5"],["3511de51.889a02"],["14544a8b.b6b2f5"],["44c595a4.45d45c"],["3eb4bedf.6e20a2"]],"inputLabels":["Operation"],"outputLabels":["case","case_artifact","case_task","case_task_log","action","alert","user"],"info":"No webhook data is received for the following events:\n\n- Creation of Dashboard\n- Creation of Case Templates\n"},{"id":"65928861.c90a48","type":"switch","z":"dca608c3.7d8af8","name":"Update","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_artifact_job","vt":"str"},{"t":"eq","v":"case_task","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"},{"t":"eq","v":"alert","vt":"str"},{"t":"eq","v":"user","vt":"str"}],"checkall":"false","repair":false,"outputs":7,"x":900,"y":860,"wires":[["eebe1748.1cd348"],["d703adc0.12fd1"],["2b738415.408d4c"],["6d97371a.406348"],["4ae621e1.9ae6"],["5786cee2.98109"],["54077728.447648"]],"inputLabels":["Operation"],"outputLabels":["case","case_artifact",null,"case_task","case_task_log","alert","user"]},{"id":"a259a26c.a21","type":"switch","z":"dca608c3.7d8af8","name":"Delete","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"}],"checkall":"false","repair":false,"outputs":3,"x":890,"y":1200,"wires":[["60c8bcfb.eff1f4"],["df708bab.348308"],["e9a8650c.e20cc8"]],"outputLabels":["case","case_artifact",""],"info":"Deleting a case task doesnt actually trigger a delete event. It triggers an `update` event where the status = cancelled"},{"id":"54077728.447648","type":"switch","z":"dca608c3.7d8af8","name":"User","property":"payload.object.status","propertyType":"msg","rules":[{"t":"eq","v":"Locked","vt":"str"},{"t":"eq","v":"Ok","vt":"str"}],"checkall":"false","repair":false,"outputs":2,"x":1130,"y":980,"wires":[["9429d6c5.5ac788"],["4e3e091c.d35388"]]},{"id":"9429d6c5.5ac788","type":"function","z":"dca608c3.7d8af8","name":"status: Locked","func":"msg.topic = \"[The Hive] A user account was locked\";\nmsg.from = \"from@example.com\";\nmsg.to = \"to@example.com\";\nreturn msg;","outputs":1,"noerr":0,"x":1380,"y":972,"wires":[[]],"info":"- User account was locked"},{"id":"4e3e091c.d35388","type":"function","z":"dca608c3.7d8af8","name":"status: Ok","func":"msg.topic = \"[The Hive] A user account was changed\";\nmsg.from = \"from@example.com\";\nmsg.to = \"to@example.com\";\nreturn msg;","outputs":1,"noerr":0,"x":1360,"y":1020,"wires":[[]],"info":"- User account was unlocked\n- User description was changed\n- User role was changed\n- User API key was added\n- User API key was revoked\n"},{"id":"485f3be.1ffcfc4","type":"function","z":"dca608c3.7d8af8","name":"status: Open","func":"// Fires when a Case is updated AND status = open\n// This can include things like TLP/PAP changes\n\nreturn msg;","outputs":1,"noerr":0,"x":1370,"y":660,"wires":[[]]},{"id":"eebe1748.1cd348","type":"switch","z":"dca608c3.7d8af8","name":"case","property":"payload.object.status","propertyType":"msg","rules":[{"t":"eq","v":"Open","vt":"str"}],"checkall":"true","repair":false,"outputs":1,"x":1130,"y":740,"wires":[["485f3be.1ffcfc4","e4b7b4bf.2fb828"]],"info":"- A case was modified"},{"id":"8c54e39.a1b4f2","type":"switch","z":"dca608c3.7d8af8","name":"case_artifact: Run Analyzer","property":"payload.object.dataType","propertyType":"msg","rules":[{"t":"eq","v":"ip","vt":"str"},{"t":"eq","v":"domain","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":1600,"y":340,"wires":[["eb8cfeb7.a7118","a5dd8a8a.065b88"],["eb8cfeb7.a7118","a5dd8a8a.065b88"]],"info":"# References\n\n\n"},{"id":"2fce29bb.1b1376","type":"function","z":"dca608c3.7d8af8","name":"Add headers","func":"msg.thehive_url = 'https://{{ MANAGERIP }}/thehive';\nmsg.cortex_url = 'https://{{ MANAGERIP }}/cortex';\nmsg.cortex_id = 'CORTEX-SERVER-ID';\nreturn msg;","outputs":1,"noerr":0,"x":350,"y":780,"wires":[["a4ecb84a.805958"]]},{"id":"e4b7b4bf.2fb828","type":"function","z":"dca608c3.7d8af8","name":"status: Resolved","func":"// Fires when a case is closed (resolved)\n\nreturn msg;","outputs":1,"noerr":0,"x":1390,"y":720,"wires":[[]]},{"id":"e88b4cc2.f6afe","type":"function","z":"dca608c3.7d8af8","name":"case","func":"// Fires when a case is created\n// or when a responder is generated against a case\n\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":320,"wires":[[]]},{"id":"64203fe8.e0ad5","type":"function","z":"dca608c3.7d8af8","name":"case_task","func":"// Fires when a case task is created\nreturn msg;","outputs":1,"noerr":0,"x":1140,"y":400,"wires":[[]]},{"id":"3511de51.889a02","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"// Fires when a case task log is created\n\nreturn msg;","outputs":1,"noerr":0,"x":1163,"y":440,"wires":[[]]},{"id":"14544a8b.b6b2f5","type":"function","z":"dca608c3.7d8af8","name":"case_artifact_job","func":"// Fires when a Responder or Analyzser is Run on an existing observable\n\nreturn msg;","outputs":1,"noerr":0,"x":1173,"y":480,"wires":[[]]},{"id":"2b738415.408d4c","type":"function","z":"dca608c3.7d8af8","name":"case_artifact_job","func":"\nreturn msg;","outputs":1,"noerr":0,"x":1170,"y":820,"wires":[[]]},{"id":"3eb4bedf.6e20a2","type":"function","z":"dca608c3.7d8af8","name":"user","func":"// Fires when a user is created\n\nreturn msg;","outputs":1,"noerr":0,"x":1133,"y":560,"wires":[[]]},{"id":"d703adc0.12fd1","type":"function","z":"dca608c3.7d8af8","name":"case_artifact","func":"// Fires when an artifact is updated\nreturn msg;","outputs":1,"noerr":0,"x":1150,"y":780,"wires":[[]]},{"id":"6d97371a.406348","type":"function","z":"dca608c3.7d8af8","name":"case_task","func":"// Fires when a case task is updated\nreturn msg;","outputs":1,"noerr":0,"x":1140,"y":860,"wires":[[]]},{"id":"4ae621e1.9ae6","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"//Fires when a case_task_log is updated\n\nreturn msg;","outputs":1,"noerr":0,"x":1160,"y":900,"wires":[[]]},{"id":"60c8bcfb.eff1f4","type":"function","z":"dca608c3.7d8af8","name":"case","func":"//Fires when a case is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":1160,"wires":[[]]},{"id":"df708bab.348308","type":"function","z":"dca608c3.7d8af8","name":"case_artifact","func":"//Fires when a case_artifact is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1150,"y":1200,"wires":[[]]},{"id":"e9a8650c.e20cc8","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"//Fires when a case_task_log is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1160,"y":1240,"wires":[[]]},{"id":"5786cee2.98109","type":"function","z":"dca608c3.7d8af8","name":"alert","func":"//Fires when an alert is updated\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":940,"wires":[[]]},{"id":"44c595a4.45d45c","type":"change","z":"dca608c3.7d8af8","d":true,"name":"Convert Alert Msg to Artifacts","rules":[{"t":"move","p":"payload.object.artifacts","pt":"msg","to":"payload","tot":"msg"}],"action":"","property":"","from":"","to":"","reg":false,"x":1200,"y":520,"wires":[["6dcca25e.04bd2c"]]},{"id":"6dcca25e.04bd2c","type":"split","z":"dca608c3.7d8af8","name":"Split Artifacts","splt":"\\n","spltType":"str","arraySplt":1,"arraySpltType":"len","stream":false,"addname":"","x":1430,"y":520,"wires":[["767c84f2.c9ba2c"]]},{"id":"767c84f2.c9ba2c","type":"switch","z":"dca608c3.7d8af8","name":"alert: Run Analyzer","property":"payload.dataType","propertyType":"msg","rules":[{"t":"eq","v":"ip","vt":"str"},{"t":"eq","v":"domain","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":1630,"y":400,"wires":[["eb8cfeb7.a7118","a5dd8a8a.065b88"],["a5dd8a8a.065b88","eb8cfeb7.a7118"]],"info":"# References\n\n\n"},{"id":"82ad0f08.7a53f","type":"http response","z":"dca608c3.7d8af8","name":"Ack Event Receipt","statusCode":"200","headers":{},"x":250,"y":940,"wires":[]},{"id":"a5dd8a8a.065b88","type":"function","z":"dca608c3.7d8af8","name":"Run Analyzer: CERT DNS","func":"msg.analyzer_id = \"4f28afc20d78f98df425e36e561af33f\";\n\nif (msg.payload.objectId) {\n msg.tag = \"case_artifact\"\n msg.artifact_id = msg.payload.objectId\n msg.url = msg.thehive_url + '/api/connector/cortex/job';\n msg.payload = {\n 'cortexId' : msg.cortex_id,\n 'artifactId': msg.artifact_id,\n 'analyzerId': msg.analyzer_id\n };\n}\nelse {\n msg.tag = \"observable\"\n msg.observable = msg.payload.data\n msg.dataType = msg.payload.dataType\n\n msg.url = msg.cortex_url + '/api/analyzer/' + msg.analyzer_id + '/run';\n msg.payload = {\n 'data' : msg.observable,\n 'dataType': msg.dataType \n };\n}\nreturn msg;","outputs":1,"noerr":0,"x":1930,"y":420,"wires":[["f050a09f.b2201"]]},{"id":"eb8cfeb7.a7118","type":"function","z":"dca608c3.7d8af8","name":"Run Analyzer: Urlscan","func":"msg.analyzer_id = \"54e51b62c6c8ddc3cbc3cbdd889a0557\";\n\nif (msg.payload.objectId) {\n msg.tag = \"case_artifact\"\n msg.artifact_id = msg.payload.objectId\n msg.url = msg.thehive_url + '/api/connector/cortex/job';\n msg.payload = {\n 'cortexId' : msg.cortex_id,\n 'artifactId': msg.artifact_id,\n 'analyzerId': msg.analyzer_id\n };\n}\nelse {\n msg.tag = \"observable\"\n msg.observable = msg.payload.data\n msg.dataType = msg.payload.dataType\n\n msg.url = msg.cortex_url + '/api/analyzer/' + msg.analyzer_id + '/run';\n msg.payload = {\n 'data' : msg.observable,\n 'dataType': msg.dataType \n };\n}\nreturn msg;","outputs":1,"noerr":0,"x":1920,"y":320,"wires":[["f050a09f.b2201"]]},{"id":"1c448528.3032fb","type":"http request","z":"dca608c3.7d8af8","name":"Submit to Cortex","method":"POST","ret":"obj","paytoqs":false,"url":"","tls":"4db74fa6.2556d","persist":false,"proxy":"","authType":"bearer","credentials": {"user": "", "password": "{{ CORTEXKEY }}"},"x":2450,"y":420,"wires":[["ea6614fb.752a78"]]},{"id":"ea6614fb.752a78","type":"debug","z":"dca608c3.7d8af8","name":"Debug","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"true","targetType":"full","x":2670,"y":360,"wires":[]},{"id":"f050a09f.b2201","type":"switch","z":"dca608c3.7d8af8","name":"Cases vs Alerts","property":"tag","propertyType":"msg","rules":[{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"observable","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":2200,"y":360,"wires":[["f7fca977.a73b28"],["1c448528.3032fb"]],"inputLabels":["Data"],"outputLabels":["Cases","Alerts"]},{"id":"f7fca977.a73b28","type":"http request","z":"dca608c3.7d8af8","name":"Submit to TheHive","method":"POST","ret":"obj","paytoqs":false,"url":"","tls":"4db74fa6.2556d","persist":false,"proxy":"","authType":"bearer","credentials": {"user": "", "password": "{{ HIVEKEY }}"},"x":2450,"y":280,"wires":[["ea6614fb.752a78"]]}] diff --git a/salt/nodered/init.sls b/salt/nodered/init.sls index bec8f266a..34aacbd81 100644 --- a/salt/nodered/init.sls +++ b/salt/nodered/init.sls @@ -13,7 +13,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} # Create the nodered group noderedgroup: diff --git a/salt/pcap/files/sensoroni.json b/salt/pcap/files/sensoroni.json index ab99c175c..79e97a75b 100644 --- a/salt/pcap/files/sensoroni.json +++ b/salt/pcap/files/sensoroni.json @@ -1,5 +1,5 @@ {%- set MANAGER = salt['grains.get']('master') -%} -{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%} +{%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') -%} {%- set CHECKININTERVALMS = salt['pillar.get']('pcap:sensor_checkin_interval_ms', 10000) -%} { "logFilename": "/opt/sensoroni/logs/sensoroni.log", diff --git a/salt/pcap/init.sls b/salt/pcap/init.sls index 1a9de6611..3db7a227c 100644 --- a/salt/pcap/init.sls +++ b/salt/pcap/init.sls @@ -12,8 +12,8 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set INTERFACE = salt['pillar.get']('sensor:interface', 'bond0') %} {% set BPF_STENO = salt['pillar.get']('steno:bpf', None) %} diff --git a/salt/playbook/init.sls b/salt/playbook/init.sls index 44b806f9a..d390a36fb 100644 --- a/salt/playbook/init.sls +++ b/salt/playbook/init.sls @@ -1,6 +1,6 @@ {% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set MAINIP = salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] %} {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%} diff --git a/salt/reactor/fleet.sls b/salt/reactor/fleet.sls index 177dabf3a..4e4e13791 100644 --- a/salt/reactor/fleet.sls +++ b/salt/reactor/fleet.sls @@ -10,7 +10,7 @@ def run(): MINIONID = data['id'] ACTION = data['data']['action'] LOCAL_SALT_DIR = "/opt/so/saltstack/local" - STATICFILE = f"{LOCAL_SALT_DIR}/pillar/static.sls" + STATICFILE = f"{LOCAL_SALT_DIR}/pillar/global.sls" SECRETSFILE = f"{LOCAL_SALT_DIR}/pillar/secrets.sls" if MINIONID.split('_')[-1] in ['manager','eval','fleet','managersearch','standalone']: diff --git a/salt/redis/init.sls b/salt/redis/init.sls index 5a981e688..4864fc8a2 100644 --- a/salt/redis/init.sls +++ b/salt/redis/init.sls @@ -12,8 +12,8 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} # Redis Setup diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json index 31e49fc86..b9470652b 100644 --- a/salt/soc/files/soc/soc.json +++ b/salt/soc/files/soc/soc.json @@ -1,5 +1,5 @@ -{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%} -{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%} +{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') -%} +{%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') -%} { "logFilename": "/opt/sensoroni/logs/sensoroni-server.log", "server": { diff --git a/salt/soc/init.sls b/salt/soc/init.sls index e3fdf538a..1c25f42a1 100644 --- a/salt/soc/init.sls +++ b/salt/soc/init.sls @@ -1,5 +1,5 @@ -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} socdir: diff --git a/salt/soctopus/files/SOCtopus.conf b/salt/soctopus/files/SOCtopus.conf index 477113376..093b4fd3e 100644 --- a/salt/soctopus/files/SOCtopus.conf +++ b/salt/soctopus/files/SOCtopus.conf @@ -1,6 +1,6 @@ {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %} -{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %} -{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %} +{%- set HIVEKEY = salt['pillar.get']('global:hivekey', '') %} +{%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %} [es] es_url = http://{{MANAGER}}:9200 diff --git a/salt/soctopus/files/templates/es-generic.template b/salt/soctopus/files/templates/es-generic.template index b56050741..8183a5af4 100644 --- a/salt/soctopus/files/templates/es-generic.template +++ b/salt/soctopus/files/templates/es-generic.template @@ -1,4 +1,4 @@ -{% set ES = salt['pillar.get']('static:managerip', '') %} +{% set ES = salt['pillar.get']('global:managerip', '') %} alert: modules.so.playbook-es.PlaybookESAlerter elasticsearch_host: "{{ ES }}:9200" diff --git a/salt/soctopus/files/templates/generic.template b/salt/soctopus/files/templates/generic.template index 7bb5a969d..cdd5947d3 100644 --- a/salt/soctopus/files/templates/generic.template +++ b/salt/soctopus/files/templates/generic.template @@ -1,6 +1,6 @@ -{% set es = salt['pillar.get']('static:managerip', '') %} -{% set hivehost = salt['pillar.get']('static:managerip', '') %} -{% set hivekey = salt['pillar.get']('static:hivekey', '') %} +{% set es = salt['pillar.get']('global:managerip', '') %} +{% set hivehost = salt['pillar.get']('global:managerip', '') %} +{% set hivekey = salt['pillar.get']('global:hivekey', '') %} alert: hivealerter hive_connection: diff --git a/salt/soctopus/files/templates/osquery.template b/salt/soctopus/files/templates/osquery.template index 4fff9a1d5..352c3d69a 100644 --- a/salt/soctopus/files/templates/osquery.template +++ b/salt/soctopus/files/templates/osquery.template @@ -1,6 +1,6 @@ -{% set es = salt['pillar.get']('static:managerip', '') %} -{% set hivehost = salt['pillar.get']('static:managerip', '') %} -{% set hivekey = salt['pillar.get']('static:hivekey', '') %} +{% set es = salt['pillar.get']('global:managerip', '') %} +{% set hivehost = salt['pillar.get']('global:managerip', '') %} +{% set hivekey = salt['pillar.get']('global:hivekey', '') %} alert: hivealerter hive_connection: diff --git a/salt/soctopus/init.sls b/salt/soctopus/init.sls index 3fcdf8717..7526974df 100644 --- a/salt/soctopus/init.sls +++ b/salt/soctopus/init.sls @@ -1,8 +1,8 @@ -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {%- set MANAGER_URL = salt['pillar.get']('manager:url_base', '') %} -{%- set MANAGER_IP = salt['pillar.get']('static:managerip', '') %} +{%- set MANAGER_IP = salt['pillar.get']('global:managerip', '') %} soctopusdir: file.directory: diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index dfbd4c12a..1cef1bf0a 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -1,11 +1,11 @@ {% set manager = salt['grains.get']('master') %} -{% set managerip = salt['pillar.get']('static:managerip', '') %} +{% set managerip = salt['pillar.get']('global:managerip', '') %} {% set HOSTNAME = salt['grains.get']('host') %} {% set global_ca_text = [] %} {% set global_ca_server = [] %} {% set MAININT = salt['pillar.get']('host:mainint') %} {% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %} -{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %} +{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %} {% if grains.id.split('_')|last in ['manager', 'eval', 'standalone'] %} {% set trusttheca_text = salt['mine.get'](grains.id, 'x509.get_pem_entries')[grains.id]['/etc/pki/ca.crt']|replace('\n', '') %} diff --git a/salt/strelka/files/backend/backend.yaml b/salt/strelka/files/backend/backend.yaml index b25e5630d..8748a4fd6 100644 --- a/salt/strelka/files/backend/backend.yaml +++ b/salt/strelka/files/backend/backend.yaml @@ -2,7 +2,7 @@ {%- set mainint = salt['pillar.get']('sensor:mainint') %} {%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %} {%- else %} - {%- set ip = salt['pillar.get']('static:managerip') %} + {%- set ip = salt['pillar.get']('global:managerip') %} {%- endif -%} logging_cfg: '/etc/strelka/logging.yaml' limits: diff --git a/salt/strelka/files/filestream/filestream.yaml b/salt/strelka/files/filestream/filestream.yaml index 539e4314c..1dc6795d9 100644 --- a/salt/strelka/files/filestream/filestream.yaml +++ b/salt/strelka/files/filestream/filestream.yaml @@ -2,7 +2,7 @@ {%- set mainint = salt['pillar.get']('sensor:mainint') %} {%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %} {%- else %} - {%- set ip = salt['pillar.get']('static:managerip') %} + {%- set ip = salt['pillar.get']('global:managerip') %} {%- endif -%} conn: server: '{{ ip }}:57314' diff --git a/salt/strelka/files/frontend/frontend.yaml b/salt/strelka/files/frontend/frontend.yaml index 5d72f1e0d..23edef3e3 100644 --- a/salt/strelka/files/frontend/frontend.yaml +++ b/salt/strelka/files/frontend/frontend.yaml @@ -2,7 +2,7 @@ {%- set mainint = salt['pillar.get']('sensor:mainint') %} {%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %} {%- else %} - {%- set ip = salt['pillar.get']('static:managerip') %} + {%- set ip = salt['pillar.get']('global:managerip') %} {%- endif -%} server: ":57314" coordinator: diff --git a/salt/strelka/files/manager/manager.yaml b/salt/strelka/files/manager/manager.yaml index db9dd7f91..b4a73b1c0 100644 --- a/salt/strelka/files/manager/manager.yaml +++ b/salt/strelka/files/manager/manager.yaml @@ -2,7 +2,7 @@ {%- set mainint = salt['pillar.get']('sensor:mainint') %} {%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %} {%- else %} - {%- set ip = salt['pillar.get']('static:managerip') %} + {%- set ip = salt['pillar.get']('global:managerip') %} {%- endif -%} coordinator: addr: '{{ ip }}:6380' diff --git a/salt/strelka/init.sls b/salt/strelka/init.sls index c6a900e8e..e85b62f83 100644 --- a/salt/strelka/init.sls +++ b/salt/strelka/init.sls @@ -13,9 +13,9 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . {%- set MANAGER = salt['grains.get']('master') %} -{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {%- set STRELKA_RULES = salt['pillar.get']('strelka:rules', '1') -%} # Strelka config diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls index c0677db16..783f174ca 100644 --- a/salt/suricata/init.sls +++ b/salt/suricata/init.sls @@ -14,9 +14,9 @@ # along with this program. If not, see . {% set interface = salt['pillar.get']('sensor:interface', 'bond0') %} -{% set ZEEKVER = salt['pillar.get']('static:zeekversion', '') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set ZEEKVER = salt['pillar.get']('global:zeekversion', '') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set BPF_NIDS = salt['pillar.get']('nids:bpf') %} {% set BPF_STATUS = 0 %} diff --git a/salt/suricata/suricata_config.map.jinja b/salt/suricata/suricata_config.map.jinja index 9fb3c9a7f..a544f6d96 100644 --- a/salt/suricata/suricata_config.map.jinja +++ b/salt/suricata/suricata_config.map.jinja @@ -11,7 +11,7 @@ HOME_NET: "[{{salt['pillar.get']('sensor:hnsensor')}}]" {% endload %} {% else %} {% load_yaml as homenet %} -HOME_NET: "[{{salt['pillar.get']('static:hnmanager', '')}}]" +HOME_NET: "[{{salt['pillar.get']('global:hnmanager', '')}}]" {% endload %} {% endif %} @@ -44,7 +44,7 @@ HOME_NET: "[{{salt['pillar.get']('static:hnmanager', '')}}]" {% endfor %} {% set surimeta_evelog_index = surimeta_evelog_index[0] %} -{% if salt['pillar.get']('static:zeekversion', 'ZEEK') == 'SURICATA' %} +{% if salt['pillar.get']('global:zeekversion', 'ZEEK') == 'SURICATA' %} {% do suricata_defaults.suricata.config.outputs[default_evelog_index]['eve-log'].types.extend(suricata_meta.suricata.config.outputs[surimeta_evelog_index]['eve-log'].types) %} {% endif %} diff --git a/salt/tcpreplay/init.sls b/salt/tcpreplay/init.sls index 7247e4505..a828c72f1 100644 --- a/salt/tcpreplay/init.sls +++ b/salt/tcpreplay/init.sls @@ -1,6 +1,6 @@ {% if grains['role'] == 'so-sensor' or grains['role'] == 'so-eval' %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} so-tcpreplay: diff --git a/salt/telegraf/init.sls b/salt/telegraf/init.sls index 668a8839a..c252cdb5b 100644 --- a/salt/telegraf/init.sls +++ b/salt/telegraf/init.sls @@ -1,6 +1,6 @@ {% set MANAGER = salt['grains.get']('master') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} # Add Telegraf to monitor all the things. tgraflogdir: diff --git a/salt/thehive/etc/application.conf b/salt/thehive/etc/application.conf index 8aaf7a9a5..675c5222c 100644 --- a/salt/thehive/etc/application.conf +++ b/salt/thehive/etc/application.conf @@ -1,6 +1,6 @@ -{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %} -{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %} -{%- set HIVEPLAYSECRET = salt['pillar.get']('static:hiveplaysecret', '') %} +{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} +{%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %} +{%- set HIVEPLAYSECRET = salt['pillar.get']('global:hiveplaysecret', '') %} # Secret Key # The secret key is used to secure cryptographic functions. diff --git a/salt/thehive/etc/cortex-application.conf b/salt/thehive/etc/cortex-application.conf index c7e52d954..d84566068 100644 --- a/salt/thehive/etc/cortex-application.conf +++ b/salt/thehive/etc/cortex-application.conf @@ -1,5 +1,5 @@ -{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %} -{%- set CORTEXPLAYSECRET = salt['pillar.get']('static:cortexplaysecret', '') %} +{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} +{%- set CORTEXPLAYSECRET = salt['pillar.get']('global:cortexplaysecret', '') %} # Secret Key # The secret key is used to secure cryptographic functions. diff --git a/salt/thehive/init.sls b/salt/thehive/init.sls index 062637855..ffbb50f0c 100644 --- a/salt/thehive/init.sls +++ b/salt/thehive/init.sls @@ -1,6 +1,6 @@ {% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} thehiveconfdir: file.directory: diff --git a/salt/thehive/scripts/cortex_init b/salt/thehive/scripts/cortex_init index 7eb50df5e..6f5d890ae 100644 --- a/salt/thehive/scripts/cortex_init +++ b/salt/thehive/scripts/cortex_init @@ -1,18 +1,18 @@ #!/bin/bash -# {%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %} -# {%- set CORTEXUSER = salt['pillar.get']('static:cortexuser', 'cortexadmin') %} -# {%- set CORTEXPASSWORD = salt['pillar.get']('static:cortexpassword', 'cortexchangeme') %} -# {%- set CORTEXKEY = salt['pillar.get']('static:cortexkey', '') %} -# {%- set CORTEXORGNAME = salt['pillar.get']('static:cortexorgname', '') %} -# {%- set CORTEXORGUSER = salt['pillar.get']('static:cortexorguser', 'soadmin') %} -# {%- set CORTEXORGUSERKEY = salt['pillar.get']('static:cortexorguserkey', '') %} +# {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} +# {%- set CORTEXUSER = salt['pillar.get']('global:cortexuser', 'cortexadmin') %} +# {%- set CORTEXPASSWORD = salt['pillar.get']('global:cortexpassword', 'cortexchangeme') %} +# {%- set CORTEXKEY = salt['pillar.get']('global:cortexkey', '') %} +# {%- set CORTEXORGNAME = salt['pillar.get']('global:cortexorgname', '') %} +# {%- set CORTEXORGUSER = salt['pillar.get']('global:cortexorguser', 'soadmin') %} +# {%- set CORTEXORGUSERKEY = salt['pillar.get']('global:cortexorguserkey', '') %} default_salt_dir=/opt/so/saltstack/default cortex_clean(){ - sed -i '/^ cortexuser:/d' /opt/so/saltstack/local/pillar/static.sls - sed -i '/^ cortexpassword:/d' /opt/so/saltstack/local/pillar/static.sls - sed -i '/^ cortexorguser:/d' /opt/so/saltstack/local/pillar/static.sls + sed -i '/^ cortexuser:/d' /opt/so/saltstack/local/pillar/global.sls + sed -i '/^ cortexpassword:/d' /opt/so/saltstack/local/pillar/global.sls + sed -i '/^ cortexorguser:/d' /opt/so/saltstack/local/pillar/global.sls } cortex_init(){ diff --git a/salt/thehive/scripts/hive_init b/salt/thehive/scripts/hive_init index 0caff6e2d..c44af6339 100755 --- a/salt/thehive/scripts/hive_init +++ b/salt/thehive/scripts/hive_init @@ -1,12 +1,12 @@ #!/bin/bash -# {%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %} -# {%- set THEHIVEUSER = salt['pillar.get']('static:hiveuser', 'hiveadmin') %} -# {%- set THEHIVEPASSWORD = salt['pillar.get']('static:hivepassword', 'hivechangeme') %} -# {%- set THEHIVEKEY = salt['pillar.get']('static:hivekey', '') %} +# {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} +# {%- set THEHIVEUSER = salt['pillar.get']('global:hiveuser', 'hiveadmin') %} +# {%- set THEHIVEPASSWORD = salt['pillar.get']('global:hivepassword', 'hivechangeme') %} +# {%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %} thehive_clean(){ - sed -i '/^ hiveuser:/d' /opt/so/saltstack/local/pillar/static.sls - sed -i '/^ hivepassword:/d' /opt/so/saltstack/local/pillar/static.sls + sed -i '/^ hiveuser:/d' /opt/so/saltstack/local/pillar/global.sls + sed -i '/^ hivepassword:/d' /opt/so/saltstack/local/pillar/global.sls } thehive_init(){ diff --git a/salt/top.sls b/salt/top.sls index ff2fbfb0e..30f198b05 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -1,11 +1,11 @@ -{%- set ZEEKVER = salt['pillar.get']('static:zeekversion', '') -%} -{%- set WAZUH = salt['pillar.get']('static:wazuh', '0') -%} +{%- set ZEEKVER = salt['pillar.get']('global:zeekversion', '') -%} +{%- set WAZUH = salt['pillar.get']('global:wazuh', '0') -%} {%- set THEHIVE = salt['pillar.get']('manager:thehive', '0') -%} {%- set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') -%} {%- set FREQSERVER = salt['pillar.get']('manager:freq', '0') -%} {%- set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') -%} -{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%} -{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%} +{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%} +{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%} {%- set STRELKA = salt['pillar.get']('strelka:enabled', '0') -%} {% import_yaml 'salt/minion.defaults.yaml' as salt %} {% set saltversion = salt.salt.minion.version %} diff --git a/salt/wazuh/files/agent/ossec.conf b/salt/wazuh/files/agent/ossec.conf index 8d38868ef..7e33f5599 100644 --- a/salt/wazuh/files/agent/ossec.conf +++ b/salt/wazuh/files/agent/ossec.conf @@ -1,5 +1,5 @@ {%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %} - {%- set ip = salt['pillar.get']('static:managerip', '') %} + {%- set ip = salt['pillar.get']('global:managerip', '') %} {%- elif grains['role'] == 'so-node' or grains['role'] == 'so-heavynode' %} {%- set ip = salt['pillar.get']('elasticsearch:mainip', '') %} {%- elif grains['role'] == 'so-sensor' %} diff --git a/salt/wazuh/files/agent/wazuh-register-agent b/salt/wazuh/files/agent/wazuh-register-agent index bed0ba57f..c6411b492 100755 --- a/salt/wazuh/files/agent/wazuh-register-agent +++ b/salt/wazuh/files/agent/wazuh-register-agent @@ -1,5 +1,5 @@ {%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %} - {%- set ip = salt['pillar.get']('static:managerip', '') %} + {%- set ip = salt['pillar.get']('global:managerip', '') %} {%- elif grains['role'] == 'so-node' or grains['role'] == 'so-heavynode' %} {%- set ip = salt['pillar.get']('elasticsearch:mainip', '') %} {%- elif grains['role'] == 'so-sensor' %} diff --git a/salt/wazuh/files/wazuh-manager-whitelist b/salt/wazuh/files/wazuh-manager-whitelist index 8a8bc9832..c3ecf31a9 100755 --- a/salt/wazuh/files/wazuh-manager-whitelist +++ b/salt/wazuh/files/wazuh-manager-whitelist @@ -1,5 +1,5 @@ -{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %} -{%- set WAZUH_ENABLED = salt['pillar.get']('static:wazuh', '0') %} +{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} +{%- set WAZUH_ENABLED = salt['pillar.get']('global:wazuh', '0') %} #!/bin/bash local_salt_dir=/opt/so/saltstack/local diff --git a/salt/wazuh/init.sls b/salt/wazuh/init.sls index 09c4e258b..94b16b199 100644 --- a/salt/wazuh/init.sls +++ b/salt/wazuh/init.sls @@ -1,6 +1,6 @@ {%- set HOSTNAME = salt['grains.get']('host', '') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} # Add ossec group ossecgroup: diff --git a/salt/yum/etc/yum.conf.jinja b/salt/yum/etc/yum.conf.jinja index aab63550b..22449083e 100644 --- a/salt/yum/etc/yum.conf.jinja +++ b/salt/yum/etc/yum.conf.jinja @@ -11,6 +11,6 @@ installonly_limit={{ salt['pillar.get']('yum:config:installonly_limit', 2) }} bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum distroverpkg=centos-release -{% if salt['pillar.get']('static:managerupdate', '0') %} +{% if salt['pillar.get']('global:managerupdate', '0') %} proxy=http://{{ salt['pillar.get']('yum:config:proxy', salt['config.get']('master')) }}:3142 {% endif %} \ No newline at end of file diff --git a/salt/zeek/init.sls b/salt/zeek/init.sls index 68908a2ce..8743878da 100644 --- a/salt/zeek/init.sls +++ b/salt/zeek/init.sls @@ -1,5 +1,5 @@ -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set BPF_ZEEK = salt['pillar.get']('zeek:bpf', {}) %} {% set BPF_STATUS = 0 %} diff --git a/setup/so-functions b/setup/so-functions index ad4b4252f..7ebfe3f7a 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1006,8 +1006,8 @@ manager_pillar() { cat "$pillar_file" >> "$setup_log" 2>&1 } -manager_static() { - local static_pillar="$local_salt_dir/pillar/static.sls" +manager_global() { + local global_pillar="$local_salt_dir/pillar/global.sls" if [ -z "$SENSOR_CHECKIN_INTERVAL_MS" ]; then SENSOR_CHECKIN_INTERVAL_MS=10000 @@ -1016,9 +1016,9 @@ manager_static() { fi fi - # Create a static file for global values + # Create a global file for global values printf '%s\n'\ - "static:"\ + "global:"\ " soversion: $SOVERSION"\ " hnmanager: $HNMANAGER"\ " ntpserver: $NTPSERVER"\ @@ -1117,10 +1117,13 @@ manager_static() { " shards: 5"\ " warm: 7"\ " close: 365"\ - " delete: 45" > "$static_pillar" - + " delete: 45"\ + "minio:"\ + " access_key: $ACCESS_KEY"\ + " access_secret: $ACCESS_SECRET" > "$global_pillar" + printf '%s\n' '----' >> "$setup_log" 2>&1 - cat "$static_pillar" >> "$setup_log" 2>&1 + cat "$global_pillar" >> "$setup_log" 2>&1 } minio_generate_keys() { @@ -1520,10 +1523,6 @@ sensor_pillar() { if [ "$HNSENSOR" != 'inherit' ]; then echo " hnsensor: $HNSENSOR" >> "$pillar_file" fi - printf '%s\n'\ - " access_key: $ACCESS_KEY"\ - " access_secret: $ACCESS_SECRET"\ - "" >> "$pillar_file" printf '%s\n' '----' >> "$setup_log" 2>&1 cat "$pillar_file" >> "$setup_log" 2>&1 diff --git a/setup/so-setup b/setup/so-setup index 68ca99824..7335b5acc 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -428,8 +428,8 @@ fi set_progress_str 11 'Updating sudoers file for soremote user' update_sudoers >> $setup_log 2>&1 - set_progress_str 12 'Generating manager static pillar' - manager_static >> $setup_log 2>&1 + set_progress_str 12 'Generating manager global pillar' + manager_global >> $setup_log 2>&1 set_progress_str 13 'Generating manager pillar' manager_pillar >> $setup_log 2>&1 @@ -571,7 +571,7 @@ fi if [[ $is_fleet_standalone && $FLEETCUSTOMHOSTNAME != '' ]]; then set_progress_str 77 "$(print_salt_state_apply 'fleet.event_update-custom-hostname')" - pillar_override="{\"static\":{\"fleet_custom_hostname\": \"$FLEETCUSTOMHOSTNAME\"}}" + pillar_override="{\"global\":{\"fleet_custom_hostname\": \"$FLEETCUSTOMHOSTNAME\"}}" salt-call state.apply -l info fleet.event_update-custom-hostname pillar="$pillar_override" >> $setup_log 2>&1 fi From 407160b72989c5b5b7a3d16886389bc788137500 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 4 Aug 2020 16:23:03 -0400 Subject: [PATCH 102/376] Update changes.json --- salt/soc/files/soc/changes.json | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/soc/files/soc/changes.json b/salt/soc/files/soc/changes.json index dc3e4118f..4f359a996 100644 --- a/salt/soc/files/soc/changes.json +++ b/salt/soc/files/soc/changes.json @@ -1,6 +1,10 @@ { - "title": "Security Onion 2.0.2 RC1 is here!", + "title": "Security Onion 2.0.3 RC1 is here!", "changes": [ + { "summary": "Resolved an issue with large drives and the ISO install." }, + { "summary": "Modified ISO installation to use Logical Volume Management (LVM) for disk partitioning." }, + { "summary": "Updated Elastic Stack components to version 7.8.1." }, + { "summary": "Updated Zeek to version 3.0.8." }, { "summary": "Fixed standalone pcap interval issue." }, { "summary": "Security Fix 1067: variables.txt from ISO install stays on disk for 10 days." }, { "summary": "Security Fix 1068: Remove user values from static.sls." }, From c56ead08e950867b4c644e946116ee0915577ce0 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 4 Aug 2020 16:28:50 -0400 Subject: [PATCH 103/376] add so minio docker --- salt/common/tools/sbin/so-docker-refresh | 1 + salt/common/tools/sbin/soup | 1 + salt/minio/init.sls | 7 +++---- setup/so-functions | 1 + 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/salt/common/tools/sbin/so-docker-refresh b/salt/common/tools/sbin/so-docker-refresh index ace1e9554..770d9f241 100755 --- a/salt/common/tools/sbin/so-docker-refresh +++ b/salt/common/tools/sbin/so-docker-refresh @@ -76,6 +76,7 @@ if [ $MANAGERCHECK != 'so-helix' ]; then "so-kibana:$VERSION" \ "so-kratos:$VERSION" \ "so-logstash:$VERSION" \ + "so-minio:$VERSION" \ "so-mysql:$VERSION" \ "so-nginx:$VERSION" \ "so-pcaptools:$VERSION" \ diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 48d9314a3..608394530 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -111,6 +111,7 @@ update_dockers() { "so-kibana" \ "so-kratos" \ "so-logstash" \ + "so-minio" \ "so-mysql" \ "so-nginx" \ "so-pcaptools" \ diff --git a/salt/minio/init.sls b/salt/minio/init.sls index 438face99..f85effe09 100644 --- a/salt/minio/init.sls +++ b/salt/minio/init.sls @@ -15,6 +15,8 @@ {% set access_key = salt['pillar.get']('minio:access_key', '') %} {% set access_secret = salt['pillar.get']('minio:access_secret', '') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} # Minio Setup minioconfdir: @@ -38,12 +40,9 @@ logstashbucket: - group: 939 - makedirs: True -minio/minio: - docker_image.present - minio: docker_container.running: - - image: minio/minio + - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-minio:{{ VERSION }} - hostname: so-minio - user: socore - port_bindings: diff --git a/setup/so-functions b/setup/so-functions index 7ebfe3f7a..de14447e4 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -789,6 +789,7 @@ docker_seed_registry() { "so-grafana:$VERSION" \ "so-influxdb:$VERSION" \ "so-kibana:$VERSION" \ + "so-minio:$VERSION" \ "so-mysql:$VERSION" \ "so-pcaptools:$VERSION" \ "so-playbook:$VERSION" \ From fd039b3008dac2c7dc3328731ba77aae3cd827dc Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 4 Aug 2020 17:11:20 -0400 Subject: [PATCH 104/376] Fix top file for minio --- salt/top.sls | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/salt/top.sls b/salt/top.sls index 30f198b05..34b825355 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -142,7 +142,6 @@ base: - manager - idstools - suricata.manager - - redis {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %} - mysql {%- endif %} @@ -151,6 +150,7 @@ base: {%- endif %} - logstash - minio + - redis - kibana - elastalert - filebeat @@ -159,6 +159,7 @@ base: {%- if FLEETMANAGER or FLEETNODE %} - fleet - fleet.install_package + - redis {%- endif %} - soctopus {%- if THEHIVE != 0 %} @@ -190,7 +191,6 @@ base: - idstools - suricata.manager - healthcheck - - redis {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %} - mysql {%- endif %} @@ -314,7 +314,7 @@ base: - manager - idstools - suricata.manager - - redis + - minio {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %} - mysql {%- endif %} @@ -330,6 +330,7 @@ base: - schedule {%- if FLEETMANAGER or FLEETNODE %} - fleet + - redis - fleet.install_package {%- endif %} - soctopus @@ -353,7 +354,7 @@ base: - common - telegraf - firewall - - redis + - minio {%- if WAZUH != 0 %} - wazuh {%- endif %} @@ -362,6 +363,7 @@ base: - filebeat {%- if FLEETMANAGER or FLEETNODE %} - fleet.install_package + - redis {%- endif %} - pcap - suricata From 9c5a969c2e18b96665ea21383a0f1f1ba5713811 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 4 Aug 2020 17:18:09 -0400 Subject: [PATCH 105/376] Fix minio init --- salt/minio/init.sls | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/minio/init.sls b/salt/minio/init.sls index f85effe09..d77c775aa 100644 --- a/salt/minio/init.sls +++ b/salt/minio/init.sls @@ -17,6 +17,8 @@ {% set access_secret = salt['pillar.get']('minio:access_secret', '') %} {% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} +{% set MANAGER = salt['grains.get']('master') %} + # Minio Setup minioconfdir: From 38d0f519ce79e50418a272db9b12e14b2d6e5482 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 4 Aug 2020 18:00:05 -0400 Subject: [PATCH 106/376] Fix output pillar for minio --- .../logstash/pipelines/config/so/9998_output_minio.conf.jinja | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja index a085ee587..060f42daf 100644 --- a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja +++ b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja @@ -1,6 +1,6 @@ {%- set MANAGER = salt['pillar.get']('global:managerip', '') -%} -{%- set access_key = salt['pillar.get']('global:access_key', '') %} -{%- set access_secret = salt['pillar.get']('global:access_secret', '') %} +{%- set access_key = salt['pillar.get']('minio:access_key', '') %} +{%- set access_secret = salt['pillar.get']('minio:access_secret', '') %} output { s3 { access_key_id => "{{ access_key }}" From a2e5dca06529bc3f4fb1ea938e2f6a50f605acc9 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 4 Aug 2020 18:02:54 -0400 Subject: [PATCH 107/376] Fix output pillar for minio --- salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja b/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja index 1f6bf03b4..33a5e9055 100644 --- a/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja +++ b/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja @@ -4,8 +4,8 @@ {%- set MANAGER = salt['pillar.get']('global:managerip', '') %} {% endif -%} {%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %} -{%- set access_key = salt['pillar.get']('global:access_key', '') %} -{%- set access_secret = salt['pillar.get']('global:access_secret', '') %} +{%- set access_key = salt['pillar.get']('minio:access_key', '') %} +{%- set access_secret = salt['pillar.get']('minio:access_secret', '') %} input { s3 { access_key_id => "{{ access_key }}" From 61ff944087edd51d51ef29305713ed03c71c1b0e Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 4 Aug 2020 18:18:06 -0400 Subject: [PATCH 108/376] add tmp to survive restarts --- salt/logstash/init.sls | 2 +- salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja | 1 + setup/so-setup | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index b63c1ce96..85590673d 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -127,7 +127,7 @@ importdir: # Create the logstash data directory nsmlsdir: file.directory: - - name: /nsm/logstash + - name: /nsm/logstash/tmp - user: 931 - group: 939 - makedirs: True diff --git a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja index 060f42daf..0d8efa4c4 100644 --- a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja +++ b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja @@ -10,6 +10,7 @@ output { size_file => 2048 time_file => 1 codec => json + temporary_directory => "/usr/share/logstash/data/tmp" additional_settings => { "force_path_style" => true } diff --git a/setup/so-setup b/setup/so-setup index 7335b5acc..7f127fc57 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -429,6 +429,7 @@ fi update_sudoers >> $setup_log 2>&1 set_progress_str 12 'Generating manager global pillar' + minio_generate_keys manager_global >> $setup_log 2>&1 set_progress_str 13 'Generating manager pillar' From 5d4a0c53b580bc56ca55720785b29ebfaae130f2 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 4 Aug 2020 21:29:07 -0400 Subject: [PATCH 109/376] add ssl cert for minio --- salt/ssl/init.sls | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 1cef1bf0a..d7c84675e 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -181,6 +181,41 @@ regkeyperms: - mode: 640 - group: 939 +/etc/pki/minio.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/minio.key') -%} + - prereq: + - x509: /etc/pki/minio.crt + {%- endif %} + +# Create a cert for the docker registry +/etc/pki/minio.crt: + x509.certificate_managed: + - ca_server: {{ ca_server }} + - signing_policy: registry + - public_key: /etc/pki/minio.key + - CN: {{ manager }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/minio.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' + +miniokeyperms: + file.managed: + - replace: False + - name: /etc/pki/minio.key + - mode: 640 + - group: 939 + /etc/pki/managerssl.key: x509.private_key_managed: - CN: {{ manager }} From a733dceb180f6ed8a5c94610bcc1919115bc2cb1 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 4 Aug 2020 22:33:40 -0400 Subject: [PATCH 110/376] enable ssl minio --- salt/minio/init.sls | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/minio/init.sls b/salt/minio/init.sls index d77c775aa..391f7f811 100644 --- a/salt/minio/init.sls +++ b/salt/minio/init.sls @@ -55,4 +55,6 @@ minio: - binds: - /nsm/minio/data:/data:rw - /opt/so/conf/minio/etc:/root/.minio:rw + - /etc/pki/minio.key:/root/.minio/certs/private.key:ro + - /etc/pki/minio.crt:/root/.minio/certs/private.crt:ro - entrypoint: "/usr/bin/docker-entrypoint.sh server --address :9595 /data" From a765790d6c1575de96c337355da0e6965704839d Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 4 Aug 2020 22:37:04 -0400 Subject: [PATCH 111/376] fix minio container name --- salt/minio/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/minio/init.sls b/salt/minio/init.sls index 391f7f811..2dca6cca3 100644 --- a/salt/minio/init.sls +++ b/salt/minio/init.sls @@ -42,7 +42,7 @@ logstashbucket: - group: 939 - makedirs: True -minio: +so-minio: docker_container.running: - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-minio:{{ VERSION }} - hostname: so-minio From 58872c9b4817ca82cd7f8cd33bc1a62d48406a93 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 4 Aug 2020 22:40:59 -0400 Subject: [PATCH 112/376] enable ssl logstash --- salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja | 2 +- salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja b/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja index 33a5e9055..7358cf6e3 100644 --- a/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja +++ b/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja @@ -10,7 +10,7 @@ input { s3 { access_key_id => "{{ access_key }}" secret_access_key => "{{ access_secret }}" - endpoint => "http://{{ MANAGER }}:9595" + endpoint => "https://{{ MANAGER }}:9595" bucket => "logstash" delete => true interval => 10 diff --git a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja index 0d8efa4c4..4092b6edd 100644 --- a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja +++ b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja @@ -5,7 +5,7 @@ output { s3 { access_key_id => "{{ access_key }}" secret_access_key => "{{ access_secret}}" - endpoint => "http://{{ MANAGER }}:9595" + endpoint => "https://{{ MANAGER }}:9595" bucket => "logstash" size_file => 2048 time_file => 1 From 970ee195a1e274de7b74cbe0c44a9736a6e0c527 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 4 Aug 2020 23:08:33 -0400 Subject: [PATCH 113/376] use hostname so TLS will work --- salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja | 4 ++-- .../logstash/pipelines/config/so/9998_output_minio.conf.jinja | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja b/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja index 7358cf6e3..27b287532 100644 --- a/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja +++ b/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja @@ -1,7 +1,7 @@ {%- if grains.role == 'so-heavynode' %} -{%- set MANAGER = salt['pillar.get']('elasticsearch:mainip', '') %} +{%- set MANAGER = salt['grains.get']('host') %} {%- else %} -{%- set MANAGER = salt['pillar.get']('global:managerip', '') %} +{%- set MANAGER = salt['grains.get']('master') %} {% endif -%} {%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %} {%- set access_key = salt['pillar.get']('minio:access_key', '') %} diff --git a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja index 4092b6edd..34a044f34 100644 --- a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja +++ b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja @@ -1,4 +1,4 @@ -{%- set MANAGER = salt['pillar.get']('global:managerip', '') -%} +{%- set MANAGER = salt['grains.get']('master') %} {%- set access_key = salt['pillar.get']('minio:access_key', '') %} {%- set access_secret = salt['pillar.get']('minio:access_secret', '') %} output { From 1855eeaa139102f8b820ec0f3b43dbd1f594aadb Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 4 Aug 2020 23:09:08 -0400 Subject: [PATCH 114/376] fix cert name --- salt/minio/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/minio/init.sls b/salt/minio/init.sls index 2dca6cca3..44c89d4d4 100644 --- a/salt/minio/init.sls +++ b/salt/minio/init.sls @@ -56,5 +56,5 @@ so-minio: - /nsm/minio/data:/data:rw - /opt/so/conf/minio/etc:/root/.minio:rw - /etc/pki/minio.key:/root/.minio/certs/private.key:ro - - /etc/pki/minio.crt:/root/.minio/certs/private.crt:ro + - /etc/pki/minio.crt:/root/.minio/certs/public.crt:ro - entrypoint: "/usr/bin/docker-entrypoint.sh server --address :9595 /data" From 734f2979d27b283a7cf2bf243859275fa68dc405 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 4 Aug 2020 23:20:51 -0400 Subject: [PATCH 115/376] add ca.crt to lgostash docker bind --- salt/logstash/init.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index 85590673d..ffaee296b 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -166,6 +166,7 @@ so-logstash: - /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro - /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro - /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro + - /etc/pki/ca.crt:/etc/ssl/certs/ca.crt:ro {%- if grains['role'] == 'so-eval' %} - /nsm/zeek:/nsm/zeek:ro - /nsm/suricata:/suricata:ro From e30746c5ca2d43396e0d8f78556d63ca205a4c4c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 5 Aug 2020 14:12:06 -0400 Subject: [PATCH 116/376] Final minio fix --- salt/minio/init.sls | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/salt/minio/init.sls b/salt/minio/init.sls index 44c89d4d4..ece8673bd 100644 --- a/salt/minio/init.sls +++ b/salt/minio/init.sls @@ -19,11 +19,10 @@ {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} - # Minio Setup minioconfdir: file.directory: - - name: /opt/so/conf/minio/etc + - name: /opt/so/conf/minio/etc/certs - user: 939 - group: 939 - makedirs: True @@ -54,7 +53,7 @@ so-minio: - MINIO_SECRET_KEY: {{ access_secret }} - binds: - /nsm/minio/data:/data:rw - - /opt/so/conf/minio/etc:/root/.minio:rw - - /etc/pki/minio.key:/root/.minio/certs/private.key:ro - - /etc/pki/minio.crt:/root/.minio/certs/public.crt:ro - - entrypoint: "/usr/bin/docker-entrypoint.sh server --address :9595 /data" + - /opt/so/conf/minio/etc:/.minio:rw + - /etc/pki/minio.key:/.minio/certs/private.key:ro + - /etc/pki/minio.crt:/.minio/certs/public.crt:ro + - entrypoint: "/usr/bin/docker-entrypoint.sh server --certs-dir /.minio/certs --address :9595 /data" \ No newline at end of file From 95cae2f17ac534247cc92cced3952f79a260df0a Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 5 Aug 2020 14:14:35 -0400 Subject: [PATCH 117/376] SSL path for logstash --- salt/logstash/init.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index ffaee296b..356a3aceb 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -148,6 +148,7 @@ so-logstash: - user: logstash - environment: - LS_JAVA_OPTS=-Xms{{ lsheap }} -Xmx{{ lsheap }} + - SSL_CERT_FILE=/etc/ssl/certs/ca.crt - port_bindings: {% for BINDING in DOCKER_OPTIONS.port_bindings %} - {{ BINDING }} From 66ca7b266cd0a6d550112d421ab22d24161e6c99 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 5 Aug 2020 14:44:23 -0400 Subject: [PATCH 118/376] first commit of importpcap node mode code, kek --- salt/firewall/assigned_hostgroups.map.yaml | 48 +++++++ salt/importpcap/bond.sls | 5 + setup/so-functions | 142 +++++++++++---------- setup/so-setup | 76 +++++++++-- 4 files changed, 193 insertions(+), 78 deletions(-) create mode 100644 salt/importpcap/bond.sls diff --git a/salt/firewall/assigned_hostgroups.map.yaml b/salt/firewall/assigned_hostgroups.map.yaml index 2500c604a..eaafd45ca 100644 --- a/salt/firewall/assigned_hostgroups.map.yaml +++ b/salt/firewall/assigned_hostgroups.map.yaml @@ -480,3 +480,51 @@ role: localhost: portgroups: - {{ portgroups.all }} + importpcap: + chain: + DOCKER-USER: + hostgroups: + manager: + portgroups: + - {{ portgroups.kibana }} + - {{ portgroups.redis }} + - {{ portgroups.influxdb }} + - {{ portgroups.elasticsearch_rest }} + - {{ portgroups.elasticsearch_node }} + sensor: + portgroups: + - {{ portgroups.beats_5044 }} + - {{ portgroups.beats_5644 }} + search_node: + portgroups: + - {{ portgroups.redis }} + - {{ portgroups.elasticsearch_node }} + self: + portgroups: + - {{ portgroups.syslog}} + beats_endpoint: + portgroups: + - {{ portgroups.beats_5044 }} + beats_endpoint_ssl: + portgroups: + - {{ portgroups.beats_5644 }} + elasticsearch_rest: + portgroups: + - {{ portgroups.elasticsearch_rest }} + analyst: + portgroups: + - {{ portgroups.nginx }} + INPUT: + hostgroups: + anywhere: + portgroups: + - {{ portgroups.ssh }} + dockernet: + portgroups: + - {{ portgroups.all }} + localhost: + portgroups: + - {{ portgroups.all }} + minion: + portgroups: + - {{ portgroups.salt_manager }} \ No newline at end of file diff --git a/salt/importpcap/bond.sls b/salt/importpcap/bond.sls new file mode 100644 index 000000000..85a4065a2 --- /dev/null +++ b/salt/importpcap/bond.sls @@ -0,0 +1,5 @@ +configure_bond0: + network.managed: + - name: bond0 + - type: bond + - enabled: True \ No newline at end of file diff --git a/setup/so-functions b/setup/so-functions index cb9c75437..12f8d2ec0 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -142,65 +142,7 @@ secrets_pillar(){ fi } -# Enable Bro Logs -zeek_logs_enabled() { - echo "Enabling Bro Logs" >> "$setup_log" 2>&1 - local zeeklogs_pillar=./pillar/zeeklogs.sls - - printf '%s\n'\ - "zeeklogs:"\ - " enabled:" > "$zeeklogs_pillar" - - if [ "$MANAGERADV" = 'ADVANCED' ]; then - for BLOG in "${BLOGS[@]}"; do - echo " - $BLOG" | tr -d '"' >> "$zeeklogs_pillar" - done - else - printf '%s\n'\ - " - conn"\ - " - dce_rpc"\ - " - dhcp"\ - " - dhcpv6"\ - " - dnp3"\ - " - dns"\ - " - dpd"\ - " - files"\ - " - ftp"\ - " - http"\ - " - intel"\ - " - irc"\ - " - kerberos"\ - " - modbus"\ - " - mqtt"\ - " - notice"\ - " - ntlm"\ - " - openvpn"\ - " - pe"\ - " - radius"\ - " - rfb"\ - " - rdp"\ - " - signatures"\ - " - sip"\ - " - smb_files"\ - " - smb_mapping"\ - " - smtp"\ - " - snmp"\ - " - software"\ - " - ssh"\ - " - ssl"\ - " - syslog"\ - " - telnet"\ - " - tunnel"\ - " - weird"\ - " - mysql"\ - " - socks"\ - " - x509" >> "$zeeklogs_pillar" - fi - - printf '%s\n' '----' >> "$setup_log" 2>&1 - cat "$zeeklogs_pillar" >> "$setup_log" 2>&1 -} check_admin_pass() { check_pass_match "$ADMINPASS1" "$ADMINPASS2" "APMATCH" @@ -416,11 +358,19 @@ check_requirements() { req_cores=4 if [[ "$node_type" == 'sensor' ]]; then req_nics=2; else req_nics=1; fi if [[ "$node_type" == 'fleet' ]]; then req_mem=4; fi + elif [[ "$standalone_or_dist" == 'importpcap' ]]; then + req_mem=4 + req_cores=2 + req_nics=1 fi - if [[ $setup_type == 'network' ]]; then + if [[ $setup_type == 'network' ]] ; then if [[ -n $nsm_mount ]]; then - req_storage=100 + if [[ "$standalone_or_dist" == 'importpcap' ]]; then + req_storage=50 + else + req_storage=100 + fi if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB" fi @@ -428,7 +378,11 @@ check_requirements() { whiptail_storage_requirements "/nsm" "${free_space_nsm} GB" "${req_storage} GB" fi else - req_storage=200 + if [[ "$standalone_or_dist" == 'importpcap' ]]; then + req_storage=50 + else + req_storage=200 + fi if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB" fi @@ -720,7 +674,7 @@ docker_install() { else case "$install_type" in - 'MANAGER' | 'EVAL') + 'MANAGER' | 'EVAL' | 'STANDALONE' | 'MANAGERSEARCH' | 'IMPORTPCAP') apt-get update >> "$setup_log" 2>&1 ;; *) @@ -1264,7 +1218,7 @@ saltify() { set_progress_str 6 'Installing various dependencies' yum -y install wget nmap-ncat >> "$setup_log" 2>&1 case "$install_type" in - 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE') + 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORTPCAP') reserve_group_ids >> "$setup_log" 2>&1 yum -y install epel-release >> "$setup_log" 2>&1 yum -y install sqlite argon2 curl mariadb-devel >> "$setup_log" 2>&1 @@ -1335,7 +1289,7 @@ saltify() { 'FLEET') if [ "$OSVER" != 'xenial' ]; then apt-get -y install python3-mysqldb >> "$setup_log" 2>&1; else apt-get -y install python-mysqldb >> "$setup_log" 2>&1; fi ;; - 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE') # TODO: should this also be HELIXSENSOR? + 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORTPCAP') # TODO: should this also be HELIXSENSOR? # Add saltstack repo(s) wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3001/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1 @@ -1802,3 +1756,63 @@ es_heapsize() { export NODE_ES_HEAP_SIZE fi } + +# Enable Bro Logs +zeek_logs_enabled() { + echo "Enabling Bro Logs" >> "$setup_log" 2>&1 + + local zeeklogs_pillar=./pillar/zeeklogs.sls + + printf '%s\n'\ + "zeeklogs:"\ + " enabled:" > "$zeeklogs_pillar" + + if [ "$MANAGERADV" = 'ADVANCED' ]; then + for BLOG in "${BLOGS[@]}"; do + echo " - $BLOG" | tr -d '"' >> "$zeeklogs_pillar" + done + else + printf '%s\n'\ + " - conn"\ + " - dce_rpc"\ + " - dhcp"\ + " - dhcpv6"\ + " - dnp3"\ + " - dns"\ + " - dpd"\ + " - files"\ + " - ftp"\ + " - http"\ + " - intel"\ + " - irc"\ + " - kerberos"\ + " - modbus"\ + " - mqtt"\ + " - notice"\ + " - ntlm"\ + " - openvpn"\ + " - pe"\ + " - radius"\ + " - rfb"\ + " - rdp"\ + " - signatures"\ + " - sip"\ + " - smb_files"\ + " - smb_mapping"\ + " - smtp"\ + " - snmp"\ + " - software"\ + " - ssh"\ + " - ssl"\ + " - syslog"\ + " - telnet"\ + " - tunnel"\ + " - weird"\ + " - mysql"\ + " - socks"\ + " - x509" >> "$zeeklogs_pillar" + fi + + printf '%s\n' '----' >> "$setup_log" 2>&1 + cat "$zeeklogs_pillar" >> "$setup_log" 2>&1 +} \ No newline at end of file diff --git a/setup/so-setup b/setup/so-setup index 68ca99824..260642415 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -165,6 +165,8 @@ elif [ "$install_type" = 'FLEET' ]; then OSQUERY=1 elif [ "$install_type" = 'HELIXSENSOR' ]; then is_helix=true +elif [ "$install_type" = 'IMPORTPCAP' ]; then + is_importpcap=true fi if [[ $is_manager && $is_sensor ]]; then @@ -173,8 +175,10 @@ elif [[ $is_fleet_standalone ]]; then check_requirements "dist" "fleet" elif [[ $is_sensor && ! $is_eval ]]; then check_requirements "dist" "sensor" -elif [[ $is_distmanager || $is_minion ]]; then +elif [[ $is_distmanager || $is_minion ]] && [[ ! $is_importpcap ]]; then check_requirements "dist" +elif [[ $is_importpcap ]]; then + check_requirements "importpcap" fi whiptail_patch_schedule @@ -239,13 +243,38 @@ if [[ $is_node ]]; then CURCLOSEDAYS=30 fi +if [[ $is_importpcap ]]; then + patch_schedule=Automatic + RULESETUP=ETOPEN + NSMSETUP=BASIC + HNSENSOR=inherit + MANAGERUPDATES=0 + MANAGERADV=BASIC + ZEEKVERSION=ZEEK + NIDS=Suricata + RULESETUP=ETOPEN + GRAFANA=0 + OSQUERY=0 + WAZUH=0 + THEHIVE=0 + PLAYBOOK=0 + STRELKA=0 + +fi + + # Start user prompts + + if [[ $is_helix || $is_sensor ]]; then whiptail_sensor_nics +fi + +if [[ $is_helix || $is_sensor || $is_importpcap ]]; then calculate_useable_cores fi -if [[ $is_helix || $is_manager ]]; then +if [[ $is_helix || $is_manager || $is_importpcap ]]; then whiptail_homenet_manager fi @@ -274,6 +303,9 @@ if [[ $is_manager ]]; then if [[ $STRELKA == 1 ]]; then whiptail_strelka_rules fi +fi + +if [[ $is_manager || $is_importpcap ]]; then collect_webuser_inputs get_redirect fi @@ -335,7 +367,7 @@ else FLEETNODEPASSWD1=$WEBPASSWD1 fi -if [[ $is_manager ]]; then whiptail_so_allow; fi +if [[ $is_manager || $is_importpcap ]]; then whiptail_so_allow; fi whiptail_make_changes @@ -359,7 +391,7 @@ fi } >> $setup_log 2>&1 -if [[ $is_manager ]]; then +if [[ $is_manager || $is_importpcap ]]; then { generate_passwords; secrets_pillar; @@ -399,6 +431,9 @@ fi if [[ $is_sensor || $is_helix ]]; then set_progress_str 3 'Configuring sensor interface' configure_network_sensor >> $setup_log 2>&1 + fi + + if [[ $is_sensor || $is_helix || $is_importpcap ]]; then set_progress_str 4 'Generating sensor pillar' sensor_pillar >> $setup_log 2>&1 fi @@ -415,7 +450,7 @@ fi set_progress_str 9 'Initializing Salt minion' configure_minion "$minion_type" >> $setup_log 2>&1 - if [[ $is_manager || $is_helix ]]; then + if [[ $is_manager || $is_helix || $is_importpcap ]]; then set_progress_str 10 'Configuring Salt master' { create_local_directories; @@ -459,7 +494,7 @@ fi accept_salt_key_remote >> $setup_log 2>&1 fi - if [[ $is_manager ]]; then + if [[ $is_manager || $is_importpcap ]]; then set_progress_str 20 'Accepting Salt key' salt-key -ya "$MINION_ID" >> $setup_log 2>&1 fi @@ -472,10 +507,15 @@ fi salt-call state.apply salt.minion -l info >> $setup_log 2>&1 fi + if [[ $is_importpcap ]]; then + set_progress_str 22 'Configuring bond interface' + salt-call state.apply importpcap.bond -l info >> $setup_log 2>&1 + fi + set_progress_str 23 'Generating CA and checking in' salt_checkin >> $setup_log 2>&1 - if [[ $is_manager || $is_helix ]]; then + if [[ $is_manager || $is_helix || $is_importpcap ]]; then set_progress_str 25 'Configuring firewall' set_initial_firewall_policy >> $setup_log 2>&1 @@ -485,14 +525,18 @@ fi set_progress_str 26 'Downloading containers from the internet' fi - salt-call state.apply -l info registry >> $setup_log 2>&1 - docker_seed_registry 2>> "$setup_log" # ~ 60% when finished + if [[ ! $is_importpcap ]]; then + salt-call state.apply -l info registry >> $setup_log 2>&1 + docker_seed_registry 2>> "$setup_log" # ~ 60% when finished + fi set_progress_str 60 "$(print_salt_state_apply 'manager')" salt-call state.apply -l info manager >> $setup_log 2>&1 - set_progress_str 61 "$(print_salt_state_apply 'idstools')" - salt-call state.apply -l info idstools >> $setup_log 2>&1 + if [[ ! $is_importpcap ]]; then + set_progress_str 61 "$(print_salt_state_apply 'idstools')" + salt-call state.apply -l info idstools >> $setup_log 2>&1 + fi set_progress_str 61 "$(print_salt_state_apply 'suricata.manager')" salt-call state.apply -l info suricata.manager >> $setup_log 2>&1 @@ -513,7 +557,7 @@ fi set_progress_str 64 "$(print_salt_state_apply 'nginx')" salt-call state.apply -l info nginx >> $setup_log 2>&1 - if [[ $is_manager || $is_node ]]; then + if [[ $is_manager || $is_node || $is_importpcap ]]; then set_progress_str 64 "$(print_salt_state_apply 'elasticsearch')" salt-call state.apply -l info elasticsearch >> $setup_log 2>&1 fi @@ -521,7 +565,9 @@ fi if [[ $is_sensor ]]; then set_progress_str 65 "$(print_salt_state_apply 'pcap')" salt-call state.apply -l info pcap >> $setup_log 2>&1 + fi + if [[ $is_sensor || $is_importpcap ]]; then set_progress_str 66 "$(print_salt_state_apply 'suricata')" salt-call state.apply -l info suricata >> $setup_log 2>&1 @@ -534,13 +580,15 @@ fi salt-call state.apply -l info curator >> $setup_log 2>&1 fi - if [[ $is_manager ]]; then + if [[ $is_manager || $is_importpcap ]]; then set_progress_str 69 "$(print_salt_state_apply 'soc')" salt-call state.apply -l info soc >> $setup_log 2>&1 set_progress_str 70 "$(print_salt_state_apply 'kibana')" salt-call state.apply -l info kibana >> $setup_log 2>&1 + fi + if [[ $is_manager ]]; then set_progress_str 71 "$(print_salt_state_apply 'elastalert')" salt-call state.apply -l info elastalert >> $setup_log 2>&1 @@ -598,7 +646,7 @@ fi fi fi - if [[ $is_manager || $is_helix ]]; then + if [[ $is_manager || $is_helix || $is_importpcap ]]; then set_progress_str 81 "$(print_salt_state_apply 'utility')" salt-call state.apply -l info utility >> $setup_log 2>&1 fi From 83dc35c72082d64f7abae925b79619d9a7f06d84 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 5 Aug 2020 15:24:11 -0400 Subject: [PATCH 119/376] add importpcap mode to whiptail --- setup/so-whiptail | 1 + 1 file changed, 1 insertion(+) diff --git a/setup/so-whiptail b/setup/so-whiptail index 5b201818e..2ba6da10e 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -475,6 +475,7 @@ whiptail_install_type() { "EVAL" "Evaluation mode (not for production) " ON \ "STANDALONE" "Standalone production install " OFF \ "DISTRIBUTED" "Distributed install submenu " OFF \ + "IMPORTPCAP" "Import PCAP mode " OFF \ 3>&1 1>&2 2>&3 ) From 8079dc54fc81380a1fae899809fe77a0bd53140a Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 5 Aug 2020 15:42:22 -0400 Subject: [PATCH 120/376] add stuff for /etc/salt/minion to get populated for importpcap node --- setup/so-functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 12f8d2ec0..b7050cf70 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -302,7 +302,7 @@ configure_minion() { 'helix') echo "master: $HOSTNAME" >> "$minion_config" ;; - 'manager' | 'eval' | 'managersearch' | 'standalone') + 'manager' | 'eval' | 'managersearch' | 'standalone' | 'importpcap') printf '%s\n'\ "master: $HOSTNAME"\ "mysql.host: '$MAINIP'"\ @@ -856,7 +856,7 @@ got_root() { get_minion_type() { local minion_type case "$install_type" in - 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'FLEET' | 'STANDALONE') + 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'FLEET' | 'STANDALONE' | 'IMPORTPCAP') minion_type=$(echo "$install_type" | tr '[:upper:]' '[:lower:]') ;; 'HELIXSENSOR') From 64c366971fc54fccd84854da82cccbf9462a5f46 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Wed, 5 Aug 2020 16:13:25 -0400 Subject: [PATCH 121/376] [fix] Redirect ca state apply in setup to /dev/null Redirect ca state apply line in accept_salt_key_remote to /dev/null to avoid generating error in setup log --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index ad4b4252f..837df5eb5 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -27,7 +27,7 @@ accept_salt_key_remote() { echo "Accept the key remotely on the manager" >> "$setup_log" 2>&1 # Delete the key just in case. ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -d "$MINION_ID" -y - salt-call state.apply ca + salt-call state.apply ca >> /dev/null 2>&1 ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -a "$MINION_ID" -y } From 30ff6d2b93def2d8cb2640787c3561f5e4e375be Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 5 Aug 2020 16:28:32 -0400 Subject: [PATCH 122/376] Update event fields to reflect new ECS terms - WIP --- salt/soc/files/soc/soc.json | 70 ++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json index 31e49fc86..d64f95983 100644 --- a/salt/soc/files/soc/soc.json +++ b/salt/soc/files/soc/soc.json @@ -33,44 +33,44 @@ "mostRecentlyUsedLimit": 5, "eventFields": { "default": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "network.community_id", "event.dataset" ], - "bro_conn": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "protocol", "service", "log.id.uid" ], - "bro_dce_rpc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "endpoint", "named_pipe", "operation", "log.id.uid" ], - "bro_dhcp": ["soc_timestamp", "source.ip", "destination.ip", "domain_name", "hostname", "message_types", "log.id.uid" ], - "bro_dnp3": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "fc_reply", "log.id.uid" ], - "bro_dns": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "protocol", "query", "query_type_name", "rcode_name", "log.id.uid" ], - "bro_dpd": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid" ], - "bro_files": ["soc_timestamp", "source.ip", "destination.ip", "log.id.flog.id.uid", "mimetype", "source", "log.id.uid" ], - "bro_ftp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ftp_argument", "ftp_command", "reply_code", "log.id.uid", "username" ], - "bro_http": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "method", "virtual_host", "status_code", "status_message", "log.id.uid" ], - "bro_intel": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "indicator", "indicator_type", "seen_where", "log.id.uid" ], - "bro_irc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "irc_command", "log.id.uid", "value" ], - "bro_kerberos": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "client", "service", "request_type", "log.id.uid" ], - "bro_modbus": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "function", "log.id.uid" ], - "bro_mysql": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "mysql_argument", "mysql_command", "mysql_success", "response", "log.id.uid" ], - "bro_notice": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "note", "msg", "log.id.uid" ], - "bro_ntlm": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "hostname", "ntlm_success", "server_dns_computer_name", "server_nb_computer_name", "server_tree_name", "log.id.uid" ], - "bro_pe": ["soc_timestamp", "is_64bit", "is_exe", "machine", "os", "subsystem", "log.id.flog.id.uid" ], - "bro_radius": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "username", "framed_addr", "reply_msg", "result" ], - "bro_rdp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "client_build", "client_name", "cookie", "encryption_level", "encryption_method", "keyboard_layout", "result", "security_protocol", "log.id.uid" ], - "bro_rfb": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "authentication_method", "auth", "share_flag", "desktop_name", "log.id.uid" ], - "bro_signatures" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "note", "signature_id", "event_message", "sub_message", "signature_count", "host_count", "log.id.uid" ], - "bro_sip": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "method", "uri", "request_from", "request_to", "response_from", "response_to", "call_id", "subject", "user_agent", "status_code", "log.id.uid" ], - "bro_smb_files" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.flog.id.uid", "action", "path", "name", "size", "prev_name", "log.id.uid" ], - "bro_smb_mapping" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "path", "service", "share_type", "log.id.uid" ], - "bro_smtp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "helo", "mail_from", "recipient_to", "from", "to", "cc", "reply_to", "subject", "useragent", "log.id.uid" ], - "bro_snmp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "community", "version", "log.id.uid" ], - "bro_socks": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid" ], - "bro_software": ["soc_timestamp", "source.ip", "name", "software_type" ], - "bro_ssh": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "version", "hassh", "direction", "client", "server", "log.id.uid" ], - "bro_ssl": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "cipher", "curve", "server_name", "log.id.uid", "validation_status", "version" ], - "bro_syslog": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "facility", "protocol", "severity", "syslog-priority", "log.id.uid" ], - "bro_tunnels": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "tunnel_type", "action", "log.id.uid" ], - "bro_weird": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "name", "log.id.uid" ], - "bro_x509": ["soc_timestamp", "certificate_common_name", "certificate_country_code", "certificate_key_length", "issuer_organization", "log.id.id" ], + "zeek:conn": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "network.protocol", "log.id.uid" ], + "zeek:dce_rpc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "endpoint", "named_pipe", "operation", "log.id.uid" ], + "zeek:dhcp": ["soc_timestamp", "source.ip", "destination.ip", "domain_name", "hostname", "message_types", "log.id.uid" ], + "zeek:dnp3": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "fc_reply", "log.id.uid" ], + "zeek:dns": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "query", "query_type_name", "rcode_name", "log.id.uid" ], + "zeek:dpd": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid" ], + "zeek:files": ["soc_timestamp", "source.ip", "destination.ip", "log.id.flog.id.uid", "mimetype", "source", "log.id.uid" ], + "zeek:ftp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ftp_argument", "ftp_command", "reply_code", "log.id.uid", "username" ], + "zeek:http": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "method", "virtual_host", "status_code", "status_message", "log.id.uid" ], + "zeek:intel": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "indicator", "indicator_type", "seen_where", "log.id.uid" ], + "zeek:irc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "irc_command", "log.id.uid", "value" ], + "zeek:kerberos": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "client", "network.protocol", "request_type", "log.id.uid" ], + "zeek:modbus": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "function", "log.id.uid" ], + "zeek:mysql": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "mysql_argument", "mysql_command", "mysql_success", "response", "log.id.uid" ], + "zeek:notice": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "note", "msg", "log.id.uid" ], + "zeek:ntlm": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "hostname", "ntlm_success", "server_dns_computer_name", "server_nb_computer_name", "server_tree_name", "log.id.uid" ], + "zeek:pe": ["soc_timestamp", "is_64bit", "is_exe", "machine", "os", "subsystem", "log.id.flog.id.uid" ], + "zeek:radius": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "username", "framed_addr", "reply_msg", "result" ], + "zeek:rdp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "client_build", "client_name", "cookie", "encryption_level", "encryption_method", "keyboard_layout", "result", "security_protocol", "log.id.uid" ], + "zeek:rfb": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "authentication_method", "auth", "share_flag", "desktop_name", "log.id.uid" ], + "zeek:signatures" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "note", "signature_id", "event_message", "sub_message", "signature_count", "host_count", "log.id.uid" ], + "zeek:sip": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "method", "uri", "request_from", "request_to", "response_from", "response_to", "call_id", "subject", "user_agent", "status_code", "log.id.uid" ], + "zeek:smb_files" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.fuid", "action", "path", "name", "size", "prev_name", "log.id.uid" ], + "zeek:smb_mapping" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "path", "network.protocol", "share_type", "log.id.uid" ], + "zeek:smtp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "helo", "mail_from", "recipient_to", "from", "to", "cc", "reply_to", "subject", "useragent", "log.id.uid" ], + "zeek:snmp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "community", "version", "log.id.uid" ], + "zeek:socks": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid" ], + "zeek:software": ["soc_timestamp", "source.ip", "name", "software_type" ], + "zeek:ssh": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssh.version", "ssh.hassh_version", "ssh.direction", "ssh.client", "ssh.server", "log.id.uid" ], + "zeek:ssl": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssl.cipher", "ssl.curve", "ssl.certificate.subject", "ssl.validation_status", "ssl.version", "log.id.uid" ], + "zeek:syslog": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "syslog.facility", "network.protocol", "syslog.severity", "log.id.uid" ], + "zeek:tunnels": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "tunnel_type", "action", "log.id.uid" ], + "zeek:weird": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "weird.name", "log.id.uid" ], + "zeek:x509": ["soc_timestamp", "x509.certificate.subject", "x509.certificate.key.type", "x509.certificate.key.length", "x509.certificate.issuer", "log.id.id" ], "cron" : ["soc_timestamp", "message" ], "anacron": ["soc_timestamp", "message" ], "bluetoothd": ["soc_timestamp", "message" ], - "firewall": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "protocol", "direction", "interface", "action", "reason" ], + "firewall": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "direction", "interface", "action", "reason" ], "ntpd" : ["soc_timestamp", "message" ], "ossec": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "alert_level", "classification", "description", "username", "escalated_user", "location", "process" ], "pulseaudio": ["soc_timestamp", "message" ], From 633c100ace35906f3245e67c28c239cf86e84bc5 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 5 Aug 2020 16:40:21 -0400 Subject: [PATCH 123/376] final logstash tweaks --- salt/logstash/init.sls | 2 +- salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index 356a3aceb..1fa5b0e86 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -167,7 +167,7 @@ so-logstash: - /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro - /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro - /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro - - /etc/pki/ca.crt:/etc/ssl/certs/ca.crt:ro + - /etc/ssl/certs/intca.crt:/etc/ssl/certs/ca.crt:ro {%- if grains['role'] == 'so-eval' %} - /nsm/zeek:/nsm/zeek:ro - /nsm/suricata:/suricata:ro diff --git a/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja b/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja index 27b287532..36a81b537 100644 --- a/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja +++ b/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja @@ -13,7 +13,7 @@ input { endpoint => "https://{{ MANAGER }}:9595" bucket => "logstash" delete => true - interval => 10 + interval => 5 codec => json additional_settings => { "force_path_style" => true From d7801acea5453b77b5e071348b8a8065e3c6c7f5 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 5 Aug 2020 17:09:41 -0400 Subject: [PATCH 124/376] add mode 1 --- salt/importpcap/bond.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/importpcap/bond.sls b/salt/importpcap/bond.sls index 85a4065a2..14de298ec 100644 --- a/salt/importpcap/bond.sls +++ b/salt/importpcap/bond.sls @@ -2,4 +2,5 @@ configure_bond0: network.managed: - name: bond0 - type: bond + - mode: '1' - enabled: True \ No newline at end of file From d9b1127308826706a184b8331d56a0aa6e92199c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 5 Aug 2020 22:36:23 -0400 Subject: [PATCH 125/376] Switch to gzip encoding --- salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja index 34a044f34..08c81cee9 100644 --- a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja +++ b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja @@ -10,6 +10,7 @@ output { size_file => 2048 time_file => 1 codec => json + encoding => gzip temporary_directory => "/usr/share/logstash/data/tmp" additional_settings => { "force_path_style" => true From 4e40615e51bea1427f2dfb4cff1d26af56988b3a Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 5 Aug 2020 22:47:12 -0400 Subject: [PATCH 126/376] Add tuneable to the global pillar --- .../pipelines/config/so/0899_input_minio.conf.jinja | 3 ++- .../pipelines/config/so/9998_output_minio.conf.jinja | 9 ++++++--- setup/so-functions | 7 ++++++- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja b/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja index 36a81b537..59e457115 100644 --- a/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja +++ b/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja @@ -6,6 +6,7 @@ {%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %} {%- set access_key = salt['pillar.get']('minio:access_key', '') %} {%- set access_secret = salt['pillar.get']('minio:access_secret', '') %} +{%- set INTERVAL = salt['pillar.get']('s3_settings:interval', 5) %} input { s3 { access_key_id => "{{ access_key }}" @@ -13,7 +14,7 @@ input { endpoint => "https://{{ MANAGER }}:9595" bucket => "logstash" delete => true - interval => 5 + interval => {{ INTERVAL }} codec => json additional_settings => { "force_path_style" => true diff --git a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja index 08c81cee9..37f829ec0 100644 --- a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja +++ b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja @@ -1,16 +1,19 @@ {%- set MANAGER = salt['grains.get']('master') %} {%- set access_key = salt['pillar.get']('minio:access_key', '') %} {%- set access_secret = salt['pillar.get']('minio:access_secret', '') %} +{%- set SIZE_FILE = salt['pillar.get']('s3_settings:size_file', 2048) %} +{%- set TIME_FILE = salt['pillar.get']('s3_settings:time_file', 1) %} +{%- set ENCODING = salt['pillar.get']('s3_settings:encoding', 'gzip') %} output { s3 { access_key_id => "{{ access_key }}" secret_access_key => "{{ access_secret}}" endpoint => "https://{{ MANAGER }}:9595" bucket => "logstash" - size_file => 2048 - time_file => 1 + size_file => {{ SIZE_FILE }} + time_file => {{ TIME_FILE }} codec => json - encoding => gzip + encoding => {{ ENCODING }} temporary_directory => "/usr/share/logstash/data/tmp" additional_settings => { "force_path_style" => true diff --git a/setup/so-functions b/setup/so-functions index de14447e4..fdf667d76 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1121,7 +1121,12 @@ manager_global() { " delete: 45"\ "minio:"\ " access_key: $ACCESS_KEY"\ - " access_secret: $ACCESS_SECRET" > "$global_pillar" + " access_secret: $ACCESS_SECRET"\ + "s3_settings:"\ + " size_file: 2048"\ + " time_file: 1"\ + " encoding: gzip"\ + " interval: 5" > "$global_pillar" printf '%s\n' '----' >> "$setup_log" 2>&1 cat "$global_pillar" >> "$setup_log" 2>&1 From e7225349a6133c925270c994b8735acc2d678c06 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 5 Aug 2020 22:56:41 -0400 Subject: [PATCH 127/376] Ability to toggle between redis and minio --- pillar/logstash/manager.sls | 5 +++++ pillar/logstash/search.sls | 5 +++++ setup/so-functions | 1 + 3 files changed, 11 insertions(+) diff --git a/pillar/logstash/manager.sls b/pillar/logstash/manager.sls index 861b8f665..dcf222ae4 100644 --- a/pillar/logstash/manager.sls +++ b/pillar/logstash/manager.sls @@ -1,7 +1,12 @@ +{%- set PIPELINE = salt['pillar.get']('global:pipeline', 'minio') %} logstash: pipelines: manager: config: - so/0009_input_beats.conf - so/0010_input_hhbeats.conf + {%- if PIPELINE == "minio"%} - so/9998_output_minio.conf.jinja + {%- else %} + - so/9999_output_redis.conf.jinja + {%- endif %} \ No newline at end of file diff --git a/pillar/logstash/search.sls b/pillar/logstash/search.sls index cad849153..22f73c5d4 100644 --- a/pillar/logstash/search.sls +++ b/pillar/logstash/search.sls @@ -1,8 +1,13 @@ +{%- set PIPELINE = salt['pillar.get']('global:pipeline', 'minio') %} logstash: pipelines: search: config: + {%- if PIPELINE == "minio"%} - so/0899_input_minio.conf.jinja + {%- else %} + - so/0900_input_redis.conf.jinja + {%- endif %} - so/9000_output_zeek.conf.jinja - so/9002_output_import.conf.jinja - so/9034_output_syslog.conf.jinja diff --git a/setup/so-functions b/setup/so-functions index fdf667d76..d965a8b86 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1049,6 +1049,7 @@ manager_global() { " wazuh: $WAZUH"\ " managerupdate: $MANAGERUPDATES"\ " imagerepo: $IMAGEREPO"\ + " pipeline: minio"\ "pcap:"\ " sensor_checkin_interval_ms: $SENSOR_CHECKIN_INTERVAL_MS"\ "strelka:"\ From 15efe77e066da203cd09200f2f6e48c669369518 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Thu, 6 Aug 2020 13:11:47 -0400 Subject: [PATCH 128/376] Ingest Parsing Update for Sysmon/WEL --- salt/elasticsearch/files/ingest/sysmon | 1 + salt/elasticsearch/files/ingest/win.eventlogs | 1 + .../templates/so/so-common-template.json | 12 ++++++++++-- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/files/ingest/sysmon b/salt/elasticsearch/files/ingest/sysmon index 6e5f9e60f..feb96720d 100644 --- a/salt/elasticsearch/files/ingest/sysmon +++ b/salt/elasticsearch/files/ingest/sysmon @@ -4,6 +4,7 @@ {"community_id": {"if": "ctx.winlog.event_data?.Protocol != null", "field":["winlog.event_data.SourceIp","winlog.event_data.SourcePort","winlog.event_data.DestinationIp","winlog.event_data.DestinationPort","winlog.event_data.Protocol"],"target_field":"network.community_id"}}, { "set": { "field": "event.code", "value": "{{winlog.event_id}}", "override": true } }, { "set": { "field": "event.module", "value": "sysmon", "override": true } }, + { "set": { "if": "ctx.winlog?.computer_name != null", "field": "observer.name", "value": "{{winlog.computer_name}}", "override": true } }, { "set": { "if": "ctx.event?.code == '3'", "field": "event.category", "value": "host,process,network", "override": true } }, { "set": { "if": "ctx.event?.code == '1'", "field": "event.category", "value": "host,process", "override": true } }, { "set": { "if": "ctx.event?.code == '1'", "field": "event.dataset", "value": "process_creation", "override": true } }, diff --git a/salt/elasticsearch/files/ingest/win.eventlogs b/salt/elasticsearch/files/ingest/win.eventlogs index 962286d3a..0a128aae9 100644 --- a/salt/elasticsearch/files/ingest/win.eventlogs +++ b/salt/elasticsearch/files/ingest/win.eventlogs @@ -3,6 +3,7 @@ "processors" : [ { "set": { "if": "ctx.winlog?.channel != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } }, { "set": { "if": "ctx.winlog?.channel != null", "field": "event.dataset", "value": "{{winlog.channel}}", "override": true } }, + { "set": { "if": "ctx.winlog?.computer_name != null", "field": "observer.name", "value": "{{winlog.computer_name}}", "override": true } }, { "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } } ] diff --git a/salt/elasticsearch/templates/so/so-common-template.json b/salt/elasticsearch/templates/so/so-common-template.json index 85a65fd6f..745abbc28 100644 --- a/salt/elasticsearch/templates/so/so-common-template.json +++ b/salt/elasticsearch/templates/so/so-common-template.json @@ -387,8 +387,16 @@ }, "winlog":{ "type":"object", - "dynamic": true - }, + "dynamic": true, + "properties":{ + "event_id":{ + "type":"long" + }, + "event_data":{ + "type":"object" + } + } + }, "x509":{ "type":"object", "dynamic": true From 4f9ef890980eee18b2184902bdf34f77385b9d71 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Thu, 6 Aug 2020 14:30:44 -0400 Subject: [PATCH 129/376] Simplify elastalert rules --- salt/elastalert/files/rules/so/suricata_thehive.yaml | 8 ++------ salt/elastalert/files/rules/so/wazuh_thehive.yaml | 8 ++------ 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/salt/elastalert/files/rules/so/suricata_thehive.yaml b/salt/elastalert/files/rules/so/suricata_thehive.yaml index 0135edadd..8657d4168 100644 --- a/salt/elastalert/files/rules/so/suricata_thehive.yaml +++ b/salt/elastalert/files/rules/so/suricata_thehive.yaml @@ -8,14 +8,10 @@ es_host: {{es}} es_port: 9200 name: Suricata-Alert -type: frequency +type: any index: "*:so-ids-*" -num_events: 1 -timeframe: - minutes: 10 buffer_time: - minutes: 10 -allow_buffer_time_overlap: true + minutes: 5 query_key: ["rule.uuid","source.ip","destination.ip"] realert: days: 1 diff --git a/salt/elastalert/files/rules/so/wazuh_thehive.yaml b/salt/elastalert/files/rules/so/wazuh_thehive.yaml index 8aa085566..7fd49e23e 100644 --- a/salt/elastalert/files/rules/so/wazuh_thehive.yaml +++ b/salt/elastalert/files/rules/so/wazuh_thehive.yaml @@ -8,14 +8,10 @@ es_host: {{es}} es_port: 9200 name: Wazuh-Alert -type: frequency +type: any index: "*:so-ossec-*" -num_events: 1 -timeframe: - minutes: 10 buffer_time: - minutes: 10 -allow_buffer_time_overlap: true + minutes: 5 realert: days: 1 filter: From 31fd0b6407a4cc70bd4cbfe2848b30cf2fe9a5cb Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 6 Aug 2020 14:59:32 -0400 Subject: [PATCH 130/376] Update the Hunt event fields lookups to reflect the latest ingest configs --- salt/soc/files/soc/soc.json | 88 +++++++++++++++++-------------------- 1 file changed, 41 insertions(+), 47 deletions(-) diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json index b098931ba..999819356 100644 --- a/salt/soc/files/soc/soc.json +++ b/salt/soc/files/soc/soc.json @@ -33,53 +33,47 @@ "mostRecentlyUsedLimit": 5, "eventFields": { "default": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "network.community_id", "event.dataset" ], - "zeek:conn": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "network.protocol", "log.id.uid" ], - "zeek:dce_rpc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "endpoint", "named_pipe", "operation", "log.id.uid" ], - "zeek:dhcp": ["soc_timestamp", "source.ip", "destination.ip", "domain_name", "hostname", "message_types", "log.id.uid" ], - "zeek:dnp3": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "fc_reply", "log.id.uid" ], - "zeek:dns": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "query", "query_type_name", "rcode_name", "log.id.uid" ], - "zeek:dpd": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid" ], - "zeek:files": ["soc_timestamp", "source.ip", "destination.ip", "log.id.flog.id.uid", "mimetype", "source", "log.id.uid" ], - "zeek:ftp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ftp_argument", "ftp_command", "reply_code", "log.id.uid", "username" ], - "zeek:http": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "method", "virtual_host", "status_code", "status_message", "log.id.uid" ], - "zeek:intel": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "indicator", "indicator_type", "seen_where", "log.id.uid" ], - "zeek:irc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "irc_command", "log.id.uid", "value" ], - "zeek:kerberos": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "client", "network.protocol", "request_type", "log.id.uid" ], - "zeek:modbus": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "function", "log.id.uid" ], - "zeek:mysql": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "mysql_argument", "mysql_command", "mysql_success", "response", "log.id.uid" ], - "zeek:notice": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "note", "msg", "log.id.uid" ], - "zeek:ntlm": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "hostname", "ntlm_success", "server_dns_computer_name", "server_nb_computer_name", "server_tree_name", "log.id.uid" ], - "zeek:pe": ["soc_timestamp", "is_64bit", "is_exe", "machine", "os", "subsystem", "log.id.flog.id.uid" ], - "zeek:radius": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "username", "framed_addr", "reply_msg", "result" ], - "zeek:rdp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "client_build", "client_name", "cookie", "encryption_level", "encryption_method", "keyboard_layout", "result", "security_protocol", "log.id.uid" ], - "zeek:rfb": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "authentication_method", "auth", "share_flag", "desktop_name", "log.id.uid" ], - "zeek:signatures" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "note", "signature_id", "event_message", "sub_message", "signature_count", "host_count", "log.id.uid" ], - "zeek:sip": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "method", "uri", "request_from", "request_to", "response_from", "response_to", "call_id", "subject", "user_agent", "status_code", "log.id.uid" ], - "zeek:smb_files" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.fuid", "action", "path", "name", "size", "prev_name", "log.id.uid" ], - "zeek:smb_mapping" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "path", "network.protocol", "share_type", "log.id.uid" ], - "zeek:smtp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "helo", "mail_from", "recipient_to", "from", "to", "cc", "reply_to", "subject", "useragent", "log.id.uid" ], - "zeek:snmp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "community", "version", "log.id.uid" ], - "zeek:socks": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid" ], - "zeek:software": ["soc_timestamp", "source.ip", "name", "software_type" ], - "zeek:ssh": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssh.version", "ssh.hassh_version", "ssh.direction", "ssh.client", "ssh.server", "log.id.uid" ], - "zeek:ssl": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssl.cipher", "ssl.curve", "ssl.certificate.subject", "ssl.validation_status", "ssl.version", "log.id.uid" ], - "zeek:syslog": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "syslog.facility", "network.protocol", "syslog.severity", "log.id.uid" ], - "zeek:tunnels": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "tunnel_type", "action", "log.id.uid" ], - "zeek:weird": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "weird.name", "log.id.uid" ], - "zeek:x509": ["soc_timestamp", "x509.certificate.subject", "x509.certificate.key.type", "x509.certificate.key.length", "x509.certificate.issuer", "log.id.id" ], - "cron" : ["soc_timestamp", "message" ], - "anacron": ["soc_timestamp", "message" ], - "bluetoothd": ["soc_timestamp", "message" ], - "firewall": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "direction", "interface", "action", "reason" ], - "ntpd" : ["soc_timestamp", "message" ], - "ossec": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "alert_level", "classification", "description", "username", "escalated_user", "location", "process" ], - "pulseaudio": ["soc_timestamp", "message" ], - "snort": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "sid", "alert", "category", "classification", "severity" ], - "su" : ["soc_timestamp", "message" ], - "sudo" : ["soc_timestamp", "message" ], - "systemd": ["soc_timestamp", "message" ], - "sysmon": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "host.name", "event.dataset", "parent_image_path", "source_name", "task", "user.name" ], - "wineventlog": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "host.name", "event.code", "event.dataset", "source_name", "task" ] + "::conn": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "network.protocol", "log.id.uid" ], + "::dce_rpc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "dce_rpc.endpoint", "dce_rpc.named_pipe", "dce_rpc.operation", "log.id.uid" ], + "::dhcp": ["soc_timestamp", "source.ip", "destination.ip", "host.domain", "host.hostname", "dhcp.message_types", "log.id.uid" ], + "::dnp3": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "dnp3.fc_reply", "log.id.uid" ], + "::dns": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "dns.query.name", "dns.query.type_name", "dns.response.code_name", "log.id.uid" ], + "::dpd": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.protocol", "observer.analyser", "error.reason", "log.id.uid" ], + "::files": ["soc_timestamp", "source.ip", "destination.ip", "file.name", "file.mime_type", "file.source", "file.bytes.total", "log.id.fuid", "log.id.uid" ], + "::ftp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ftp.user", "ftp.command", "ftp.argument", "ftp.reply_code", "file.size", "log.id.uid" ], + "::http": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "http.method", "http.virtual_host", "http.status_code", "http.status_message", "http.request.body.length", "http.response.body.length", "log.id.uid" ], + "::intel": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "intel.indicator", "intel.indicator_type", "intel.seen_where", "log.id.uid" ], + "::irc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "irc.username", "irc.nickname", "irc.command.type", "irc.command.value", "irc.command.info", "log.id.uid" ], + "::kerberos": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "kerberos.client", "kerberos.service", "kerberos.request_type", "log.id.uid" ], + "::modbus": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "modbus.function", "log.id.uid" ], + "::mysql": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "mysql.command", "mysql.argument", "mysql.success", "mysql.response", "log.id.uid" ], + "::notice": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "notice.note", "notice.message", "log.id.fuid", "log.id.uid" ], + "::ntlm": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ntlm.name", "ntlm.success", "ntlm.server.dns.name", "ntlm.server.nb.name", "ntlm.server.tree.name", "log.id.uid" ], + "::pe": ["soc_timestamp", "file.is_64bit", "file.is_exe", "file.machine", "file.os", "file.subsystem", "log.id.fuid" ], + "::radius": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "username", "radius.framed_address", "radius.reply_message", "radius.result" ], + "::rdp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rdp.client_build", "client_name", "rdp.cookie", "rdp.encryption_level", "rdp.encryption_method", "rdp.keyboard_layout", "rdp.result", "rdp.security_protocol", "log.id.uid" ], + "::rfb": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rfb.authentication.method", "rfb.authentication.success", "rfb.share_flag", "rfb.desktop.name", "log.id.uid" ], + "::signatures" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "note", "signature_id", "event_message", "sub_message", "signature_count", "host.count", "log.id.uid" ], + "::sip": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "sip.method", "sip.uri", "sip.request.from", "sip.request.to", "sip.response.from", "sip.response.to", "sip.call_id", "sip.subject", "sip.user_agent", "sip.status_code", "log.id.uid" ], + "::smb_files" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.fuid", "file.action", "file.path", "file.name", "file.size", "file.prev_name", "log.id.uid" ], + "::smb_mapping" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "smb.path", "smb.service", "smb.share_type", "log.id.uid" ], + "::smtp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "smtp.helo", "smtp.mail_from", "smtp.recipient_to", "smtp.from", "smtp.to", "smtp.cc", "smtp.reply_to", "smtp.subject", "smtp.useragent", "log.id.uid" ], + "::snmp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "snmp.community", "snmp.version", "log.id.uid" ], + "::socks": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "socks.name", "socks.request.host", "socks.request.port", "socks.status", "log.id.uid" ], + "::software": ["soc_timestamp", "source.ip", "software.name", "software.type" ], + "::ssh": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssh.version", "ssh.hassh_version", "ssh.direction", "ssh.client", "ssh.server", "log.id.uid" ], + "::ssl": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssl.cipher", "ssl.curve", "ssl.certificate.subject", "ssl.validation_status", "ssl.version", "log.id.uid" ], + "::syslog": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "syslog.facility", "network.protocol", "syslog.severity", "log.id.uid" ], + "::tunnels": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "tunnel_type", "action", "log.id.uid" ], + "::weird": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "weird.name", "log.id.uid" ], + "::x509": ["soc_timestamp", "x509.certificate.subject", "x509.certificate.key.type", "x509.certificate.key.length", "x509.certificate.issuer", "log.id.id" ], + ":firewall:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "direction", "interface", "action", "reason" ], + ":osquery:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "source.hostname", "event.dataset", "process.executable", "user.name" ], + ":ossec:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rule.name", "rule.level", "rule.category", "process.name", "user.name", "user.escalated", "location", "process.name" ], + ":strelka:file": ["soc_timestamp", "scan.exiftool.OriginalFileName", "file.size", "hash.md5", "scan.exiftool.CompanyName", "scan.exiftool.Description", "scan.exiftool.Directory", "scan.exiftool.FileType", "scan.exiftool.FileOS", "log.id.fuid" ], + ":suricata:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rule.gid", "rule.name", "rule.category", "rule.rev", "event.severity", "event.severity_label" ], + ":sysmon:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "source.hostname", "event.dataset", "process.executable", "user.name" ], + ":windows_eventlog:": ["soc_timestamp", "user.name" ] }, "queries": [ { "name": "Default Query", "description": "Show all events grouped by the origin host", "query": "* | groupby observer.name"}, From 63e31bd6b9e875eb202c393c238b015aa7d18ee7 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 6 Aug 2020 15:33:48 -0400 Subject: [PATCH 131/376] Add upload queue thread --- setup/so-functions | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 002ed8d81..b3dc5b060 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -27,7 +27,7 @@ accept_salt_key_remote() { echo "Accept the key remotely on the manager" >> "$setup_log" 2>&1 # Delete the key just in case. ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -d "$MINION_ID" -y - salt-call state.apply ca >> /dev/null 2>&1 + salt-call state.apply ca ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -a "$MINION_ID" -y } @@ -1126,6 +1126,7 @@ manager_global() { "s3_settings:"\ " size_file: 2048"\ " time_file: 1"\ + " upload_queue_size: 4" " encoding: gzip"\ " interval: 5" > "$global_pillar" From 16d0c02113162aa8244738d58d79722a3bbe5094 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 6 Aug 2020 15:39:02 -0400 Subject: [PATCH 132/376] Fix cert dev null --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index b3dc5b060..d4218a10c 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -27,7 +27,7 @@ accept_salt_key_remote() { echo "Accept the key remotely on the manager" >> "$setup_log" 2>&1 # Delete the key just in case. ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -d "$MINION_ID" -y - salt-call state.apply ca + salt-call state.apply ca >> /dev/null 2>&1 ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -a "$MINION_ID" -y } From bbdaee28ed56cc813f44eec5a91382f025869cea Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 6 Aug 2020 15:41:10 -0400 Subject: [PATCH 133/376] Add upload queue thread --- salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja index 37f829ec0..e953c3521 100644 --- a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja +++ b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja @@ -3,6 +3,7 @@ {%- set access_secret = salt['pillar.get']('minio:access_secret', '') %} {%- set SIZE_FILE = salt['pillar.get']('s3_settings:size_file', 2048) %} {%- set TIME_FILE = salt['pillar.get']('s3_settings:time_file', 1) %} +{%- set UPLOAD_QUEUE_SIZE = salt['pillar.get']('s3_settings:upload_queue_size', 4) %} {%- set ENCODING = salt['pillar.get']('s3_settings:encoding', 'gzip') %} output { s3 { @@ -14,6 +15,7 @@ output { time_file => {{ TIME_FILE }} codec => json encoding => {{ ENCODING }} + upload_queue_size => {{ UPLOAD_QUEUE_SIZE }} temporary_directory => "/usr/share/logstash/data/tmp" additional_settings => { "force_path_style" => true From ddd099233a1111dadd0bba37571162fdb81c9080 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Thu, 6 Aug 2020 15:43:45 -0400 Subject: [PATCH 134/376] Playbook Fixes - Issue #1064 --- salt/elastalert/files/elastalert_config.yaml | 4 ++-- salt/elastalert/files/modules/so/playbook-es.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/elastalert/files/elastalert_config.yaml b/salt/elastalert/files/elastalert_config.yaml index 7646e8221..ba2b79448 100644 --- a/salt/elastalert/files/elastalert_config.yaml +++ b/salt/elastalert/files/elastalert_config.yaml @@ -21,7 +21,7 @@ run_every: # ElastAlert will buffer results from the most recent # period of time, in case some log sources are not in real time buffer_time: - minutes: 1 + minutes: 5 # The maximum time between queries for ElastAlert to start at the most recently # run query. When ElastAlert starts, for each rule, it will search elastalert_metadata @@ -38,7 +38,7 @@ es_host: {{ esip }} es_port: {{ esport }} # Sets timeout for connecting to and reading from es_host -es_conn_timeout: 60 +es_conn_timeout: 55 # The maximum number of documents that will be downloaded from Elasticsearch in # a single query. The default is 10,000, and if you expect to get near this number, diff --git a/salt/elastalert/files/modules/so/playbook-es.py b/salt/elastalert/files/modules/so/playbook-es.py index c794bdf12..adc03dd29 100644 --- a/salt/elastalert/files/modules/so/playbook-es.py +++ b/salt/elastalert/files/modules/so/playbook-es.py @@ -16,7 +16,7 @@ class PlaybookESAlerter(Alerter): today = strftime("%Y.%m.%d", gmtime()) timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S", gmtime()) headers = {"Content-Type": "application/json"} - payload = {"rule.name": self.rule['play_title'],"event.severity": self.rule['event.severity'],"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"event.module": self.rule['event.module'],"event.dataset": self.rule['event.dataset'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"rule.category": self.rule['rule.category'],"data": match, "@timestamp": timestamp} + payload = {"rule.name": self.rule['play_title'],"event.severity": self.rule['event.severity'],"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"event.module": self.rule['event.module'],"event.dataset": self.rule['event.dataset'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"rule.category": self.rule['rule.category'],"alert_data": match, "@timestamp": timestamp} url = f"http://{self.rule['elasticsearch_host']}/so-playbook-alerts-{today}/_doc/" requests.post(url, data=json.dumps(payload), headers=headers, verify=False) From d3e6657b455b03c0f91821a7623c1effbf4ae170 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Aug 2020 10:01:40 -0400 Subject: [PATCH 135/376] Fix Spacing --- setup/so-functions | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index d4218a10c..87b6b5756 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1047,7 +1047,7 @@ manager_global() { " fleet_ip: N/A"\ " sensoronikey: $SENSORONIKEY"\ " wazuh: $WAZUH"\ - " managerupdate: $MANAGERUPDATES"\ + " managerupdate: $MANAGERUPDATES"\ " imagerepo: $IMAGEREPO"\ " pipeline: minio"\ "pcap:"\ @@ -1066,9 +1066,9 @@ manager_global() { " discovery_nodes: 1"\ " hot_warm_enabled: False"\ " cluster_routing_allocation_disk.threshold_enabled: true"\ - " cluster_routing_allocation_disk_watermark_low: 95%"\ - " cluster_routing_allocation_disk_watermark_high: 98%"\ - " cluster_routing_allocation_disk_watermark_flood_stage: 98%"\ + " cluster_routing_allocation_disk_watermark_low: 95%"\ + " cluster_routing_allocation_disk_watermark_high: 98%"\ + " cluster_routing_allocation_disk_watermark_flood_stage: 98%"\ " index_settings:"\ " so-beats:"\ " shards: 1"\ From b534d2b975c43fd96f6717286ebd3093ab28e8f5 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Aug 2020 10:05:47 -0400 Subject: [PATCH 136/376] Update so-functions --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 87b6b5756..038a0ba6e 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1126,7 +1126,7 @@ manager_global() { "s3_settings:"\ " size_file: 2048"\ " time_file: 1"\ - " upload_queue_size: 4" + " upload_queue_size: 4"\ " encoding: gzip"\ " interval: 5" > "$global_pillar" From d668b850336574fd48c834618f4cc8d9687998da Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Aug 2020 11:09:12 -0400 Subject: [PATCH 137/376] copy_ssh_key for is_importpcap also --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index c14b71c1e..9a9c2788e 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -410,7 +410,7 @@ fi host_pillar >> $setup_log 2>&1 -if [[ $is_minion ]]; then +if [[ $is_minion || $is_importpcap ]]; then set_updates >> $setup_log 2>&1 copy_ssh_key >> $setup_log 2>&1 fi From 2c6a20fee98fd079e4bd694544f9f857c3c63ae9 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Aug 2020 11:11:21 -0400 Subject: [PATCH 138/376] enlarge whiptail for install type selection --- setup/so-whiptail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 2ba6da10e..92c130f3d 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -471,7 +471,7 @@ whiptail_install_type() { # What kind of install are we doing? install_type=$(whiptail --title "Security Onion Setup" --radiolist \ - "Choose install type:" 10 65 3 \ + "Choose install type:" 12 65 3 \ "EVAL" "Evaluation mode (not for production) " ON \ "STANDALONE" "Standalone production install " OFF \ "DISTRIBUTED" "Distributed install submenu " OFF \ From 24b77fa855c8f797a6d5323d2a5f2dffc8afade1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Aug 2020 11:16:52 -0400 Subject: [PATCH 139/376] enlarge whiptail for install type selection --- setup/so-whiptail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 92c130f3d..264390d30 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -471,7 +471,7 @@ whiptail_install_type() { # What kind of install are we doing? install_type=$(whiptail --title "Security Onion Setup" --radiolist \ - "Choose install type:" 12 65 3 \ + "Choose install type:" 10 65 4 \ "EVAL" "Evaluation mode (not for production) " ON \ "STANDALONE" "Standalone production install " OFF \ "DISTRIBUTED" "Distributed install submenu " OFF \ From 7d11fc345f14a2c15a5e0d15d496b9ec6445b0dd Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Aug 2020 11:19:31 -0400 Subject: [PATCH 140/376] dont ask for patch schedule for importpcap node --- setup/so-setup | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 9a9c2788e..9c0255cea 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -181,7 +181,9 @@ elif [[ $is_importpcap ]]; then check_requirements "importpcap" fi -whiptail_patch_schedule +if [[ ! $is_importpcap ]]; then + whiptail_patch_schedule +fi case "$setup_type" in 'iso') From 2d7aefed0d96eb8b6d88eb3dd6e343050181ffd0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Aug 2020 11:42:48 -0400 Subject: [PATCH 141/376] add IMPORTPCAP node to set_hostname --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index eb93e2975..d2e2be748 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1521,7 +1521,7 @@ set_hostname() { set_hostname_iso - if [[ ! $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE)$ ]]; then + if [[ ! $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE|IMPORTPCAP)$ ]]; then if ! getent hosts "$MSRV"; then echo "$MSRVIP $MSRV" >> /etc/hosts fi From a8b980b6a79bd0b8a35b980390d951152910a42e Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Fri, 7 Aug 2020 13:35:43 -0400 Subject: [PATCH 142/376] More Playbook Fixes - Issue #1064 --- salt/elastalert/files/modules/so/playbook-es.py | 2 +- salt/elasticsearch/files/ingest/common | 1 + salt/soctopus/files/templates/generic.template | 5 +++-- salt/soctopus/files/templates/osquery.template | 5 +++-- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/salt/elastalert/files/modules/so/playbook-es.py b/salt/elastalert/files/modules/so/playbook-es.py index adc03dd29..46d6c8f45 100644 --- a/salt/elastalert/files/modules/so/playbook-es.py +++ b/salt/elastalert/files/modules/so/playbook-es.py @@ -16,7 +16,7 @@ class PlaybookESAlerter(Alerter): today = strftime("%Y.%m.%d", gmtime()) timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S", gmtime()) headers = {"Content-Type": "application/json"} - payload = {"rule.name": self.rule['play_title'],"event.severity": self.rule['event.severity'],"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"event.module": self.rule['event.module'],"event.dataset": self.rule['event.dataset'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"rule.category": self.rule['rule.category'],"alert_data": match, "@timestamp": timestamp} + payload = {"rule.name": self.rule['play_title'],"event.severity": self.rule['event.severity'],"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"event.module": self.rule['event.module'],"event.dataset": self.rule['event.dataset'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"rule.category": self.rule['rule.category'],"event_data": match, "@timestamp": timestamp} url = f"http://{self.rule['elasticsearch_host']}/so-playbook-alerts-{today}/_doc/" requests.post(url, data=json.dumps(payload), headers=headers, verify=False) diff --git a/salt/elasticsearch/files/ingest/common b/salt/elasticsearch/files/ingest/common index b255ad86c..01d18529b 100644 --- a/salt/elasticsearch/files/ingest/common +++ b/salt/elasticsearch/files/ingest/common @@ -49,6 +49,7 @@ { "convert": { "field": "destination.port", "type": "integer", "ignore_failure": true, "ignore_missing": true } }, { "convert": { "field": "source.port", "type": "integer", "ignore_failure": true, "ignore_missing": true } }, { "convert": { "field": "log.id.uid", "type": "string", "ignore_failure": true, "ignore_missing": true } }, + { "convert": { "field": "agent.id", "type": "string", "ignore_failure": true, "ignore_missing": true } }, { "convert": { "field": "event.severity", "type": "integer", "ignore_failure": true, "ignore_missing": true } }, { "remove": { diff --git a/salt/soctopus/files/templates/generic.template b/salt/soctopus/files/templates/generic.template index cdd5947d3..1f56bc134 100644 --- a/salt/soctopus/files/templates/generic.template +++ b/salt/soctopus/files/templates/generic.template @@ -1,7 +1,9 @@ {% set es = salt['pillar.get']('global:managerip', '') %} {% set hivehost = salt['pillar.get']('global:managerip', '') %} {% set hivekey = salt['pillar.get']('global:hivekey', '') %} -alert: hivealerter +alert: +- "modules.so.playbook-es.PlaybookESAlerter" +- "hivealerter" hive_connection: hive_host: http://{{hivehost}} @@ -24,7 +26,6 @@ hive_alert_config: follow: True caseTemplate: '5000' -alert: modules.so.playbook-es.PlaybookESAlerter elasticsearch_host: "{{ es }}:9200" play_title: "" event.module: "playbook" diff --git a/salt/soctopus/files/templates/osquery.template b/salt/soctopus/files/templates/osquery.template index 352c3d69a..44214afa3 100644 --- a/salt/soctopus/files/templates/osquery.template +++ b/salt/soctopus/files/templates/osquery.template @@ -1,7 +1,9 @@ {% set es = salt['pillar.get']('global:managerip', '') %} {% set hivehost = salt['pillar.get']('global:managerip', '') %} {% set hivekey = salt['pillar.get']('global:hivekey', '') %} -alert: hivealerter +alert: +- "modules.so.playbook-es.PlaybookESAlerter" +- "hivealerter" hive_connection: hive_host: http://{{hivehost}} @@ -31,7 +33,6 @@ hive_alert_config: caseTemplate: '5000' -alert: modules.so.playbook-es.PlaybookESAlerter elasticsearch_host: "{{ es }}:9200" play_title: "" event.module: "playbook" From 7c3070655b53416c921756afaa050bab8238ecae Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Aug 2020 13:39:17 -0400 Subject: [PATCH 143/376] copy_minion_tmp_files for IMPORTPCAP too --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index d2e2be748..c10cc6661 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -417,7 +417,7 @@ copy_salt_master_config() { copy_minion_tmp_files() { case "$install_type" in - 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE') + 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORTPCAP') echo "Copying pillar and salt files in $temp_install_dir to $local_salt_dir" cp -Rv "$temp_install_dir"/pillar/ $local_salt_dir/ >> "$setup_log" 2>&1 if [ -d "$temp_install_dir"/salt ] ; then From fadd81c9f38d807f7595c14267549bbe9a8b3884 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Aug 2020 13:58:29 -0400 Subject: [PATCH 144/376] so-importpcap to ssl state --- salt/ssl/init.sls | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index d7c84675e..fdb40a0bf 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -7,7 +7,7 @@ {% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %} {% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %} -{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone'] %} +{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone', 'importpcap'] %} {% set trusttheca_text = salt['mine.get'](grains.id, 'x509.get_pem_entries')[grains.id]['/etc/pki/ca.crt']|replace('\n', '') %} {% set ca_server = grains.id %} {% else %} @@ -72,7 +72,7 @@ influxkeyperms: - mode: 640 - group: 939 -{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone'] %} +{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-importpcap'] %} /etc/pki/filebeat.key: x509.private_key_managed: @@ -287,7 +287,7 @@ fleetkeyperms: - group: 939 {% endif %} -{% if grains['role'] in ['so-sensor', 'so-manager', 'so-node', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone'] %} +{% if grains['role'] in ['so-sensor', 'so-manager', 'so-node', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-importpcap'] %} fbcertdir: file.directory: From 847939e9b295de91d4ad6cbef728b8ac36eb89c6 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 7 Aug 2020 14:11:28 -0400 Subject: [PATCH 145/376] Fixed extra space that causes global.sls file to be empty --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 038a0ba6e..aee2039af 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1123,7 +1123,7 @@ manager_global() { "minio:"\ " access_key: $ACCESS_KEY"\ " access_secret: $ACCESS_SECRET"\ - "s3_settings:"\ + "s3_settings:"\ " size_file: 2048"\ " time_file: 1"\ " upload_queue_size: 4"\ From a8147d7d3baf9ee1abbd2d029c2e13bf4589e743 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Aug 2020 14:19:58 -0400 Subject: [PATCH 146/376] add importpcap to salt_checkin for setup ssl/ca --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index c10cc6661..800d57a90 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1357,7 +1357,7 @@ saltify() { salt_checkin() { case "$install_type" in - 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE') # Fix Mine usage + 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORTPCAP') # Fix Mine usage { echo "Building Certificate Authority"; salt-call state.apply ca; From 9649994f734cf7bb97349b724588baf43e82ab5c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Aug 2020 14:40:02 -0400 Subject: [PATCH 147/376] add importpcap to pillar/top --- pillar/top.sls | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pillar/top.sls b/pillar/top.sls index c11b66eaa..153945163 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -80,3 +80,10 @@ base: - logstash.search - elasticsearch.search - minions.{{ grains.id }} + + '*_importpcap': + - zeeklogs + - secrets + - elasticsearch.eval + - global + - minions.{{ grains.id }} \ No newline at end of file From 86b118ba1a0eacc1d07d59fb6b4defd5e802bb1c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Aug 2020 15:00:32 -0400 Subject: [PATCH 148/376] add importpcap to local assigned hostgroups yaml --- files/firewall/assigned_hostgroups.local.map.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/files/firewall/assigned_hostgroups.local.map.yaml b/files/firewall/assigned_hostgroups.local.map.yaml index 5d9b662b6..b39d34ae7 100644 --- a/files/firewall/assigned_hostgroups.local.map.yaml +++ b/files/firewall/assigned_hostgroups.local.map.yaml @@ -13,6 +13,7 @@ role: fleet: heavynode: helixsensor: + importpcap: manager: managersearch: standalone: From d7b55c110935bf143ec1d4d0a1a1505cc0741501 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Aug 2020 15:21:07 -0400 Subject: [PATCH 149/376] add so-status map for importpcap --- salt/common/maps/importpcap.map.jinja | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 salt/common/maps/importpcap.map.jinja diff --git a/salt/common/maps/importpcap.map.jinja b/salt/common/maps/importpcap.map.jinja new file mode 100644 index 000000000..f412a030e --- /dev/null +++ b/salt/common/maps/importpcap.map.jinja @@ -0,0 +1,14 @@ +{% set docker = { + 'containers': [ + 'so-filebeat', + 'so-nginx', + 'so-soc', + 'so-kratos', + 'so-elasticsearch', + 'so-kibana', + 'so-suricata', + 'so-zeek', + 'so-soctopus', + 'so-sensoroni' + ] +} %} \ No newline at end of file From 7933bafd5524ae79bb5cd9aa6a4ebd0feb6c20fc Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Aug 2020 15:46:45 -0400 Subject: [PATCH 150/376] more fixes for importpcap node --- pillar/top.sls | 2 +- salt/elasticsearch/init.sls | 2 +- salt/nginx/etc/nginx.conf.so-importpcap | 326 ++++++++++++++++++++++++ 3 files changed, 328 insertions(+), 2 deletions(-) create mode 100644 salt/nginx/etc/nginx.conf.so-importpcap diff --git a/pillar/top.sls b/pillar/top.sls index 153945163..44f56edc5 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -2,7 +2,7 @@ base: '*': - patch.needs_restarting - '*_eval or *_helix or *_heavynode or *_sensor or *_standalone': + '*_eval or *_helix or *_heavynode or *_sensor or *_standalone or *_importpcap': - match: compound - zeek diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index f3777481c..0b2090591 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -23,7 +23,7 @@ {% set FEATURES = '' %} {% endif %} -{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %} +{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone', 'so-importpcap'] %} {% set esclustername = salt['pillar.get']('manager:esclustername', '') %} {% set esheap = salt['pillar.get']('manager:esheap', '') %} {% elif grains['role'] in ['so-node','so-heavynode'] %} diff --git a/salt/nginx/etc/nginx.conf.so-importpcap b/salt/nginx/etc/nginx.conf.so-importpcap new file mode 100644 index 000000000..9c919c764 --- /dev/null +++ b/salt/nginx/etc/nginx.conf.so-importpcap @@ -0,0 +1,326 @@ +{%- set managerip = salt['pillar.get']('manager:mainip', '') %} +{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} +{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} +{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} +# For more information on configuration, see: +# * Official English Documentation: http://nginx.org/en/docs/ +# * Official Russian Documentation: http://nginx.org/ru/docs/ + +worker_processes auto; +error_log /var/log/nginx/error.log; +pid /run/nginx.pid; + +# Load dynamic modules. See /usr/share/nginx/README.dynamic. +include /usr/share/nginx/modules/*.conf; + +events { + worker_connections 1024; +} + +http { + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + client_max_body_size 1024M; + + include /etc/nginx/mime.types; + default_type application/octet-stream; + + # Load modular configuration files from the /etc/nginx/conf.d directory. + # See http://nginx.org/en/docs/ngx_core_module.html#include + # for more information. + include /etc/nginx/conf.d/*.conf; + + #server { + # listen 80 default_server; + # listen [::]:80 default_server; + # server_name _; + # root /opt/socore/html; + # index index.html; + + # Load configuration files for the default server block. + #include /etc/nginx/default.d/*.conf; + + # location / { + # } + + # error_page 404 /404.html; + # location = /40x.html { + # } + + # error_page 500 502 503 504 /50x.html; + # location = /50x.html { + # } + #} + server { + listen 80 default_server; + server_name _; + return 301 https://$host$request_uri; + } + +{% if FLEET_MANAGER %} + server { + listen 8090 ssl http2 default_server; + server_name _; + root /opt/socore/html; + index blank.html; + + ssl_certificate "/etc/pki/nginx/server.crt"; + ssl_certificate_key "/etc/pki/nginx/server.key"; + ssl_session_cache shared:SSL:1m; + ssl_session_timeout 10m; + ssl_ciphers HIGH:!aNULL:!MD5; + ssl_prefer_server_ciphers on; + + location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ { + grpc_pass grpcs://{{ managerip }}:8080; + grpc_set_header Host $host; + grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_buffering off; + } + + } +{% endif %} + +# Settings for a TLS enabled server. + + server { + listen 443 ssl http2 default_server; + #listen [::]:443 ssl http2 default_server; + server_name _; + root /opt/socore/html; + index index.html; + + ssl_certificate "/etc/pki/nginx/server.crt"; + ssl_certificate_key "/etc/pki/nginx/server.key"; + ssl_session_cache shared:SSL:1m; + ssl_session_timeout 10m; + ssl_ciphers HIGH:!aNULL:!MD5; + ssl_prefer_server_ciphers on; + + # Load configuration files for the default server block. + #include /etc/nginx/default.d/*.conf; + + location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) { + proxy_pass http://{{ managerip }}:9822; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location / { + auth_request /auth/sessions/whoami; + proxy_pass http://{{ managerip }}:9822/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location ~ ^/auth/.*?(whoami|login|logout|settings) { + rewrite /auth/(.*) /$1 break; + proxy_pass http://{{ managerip }}:4433; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /cyberchef/ { + auth_request /auth/sessions/whoami; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /navigator/ { + auth_request /auth/sessions/whoami; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /packages/ { + try_files $uri =206; + auth_request /auth/sessions/whoami; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /grafana/ { + auth_request /auth/sessions/whoami; + rewrite /grafana/(.*) /$1 break; + proxy_pass http://{{ managerip }}:3000/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /kibana/ { + auth_request /auth/sessions/whoami; + rewrite /kibana/(.*) /$1 break; + proxy_pass http://{{ managerip }}:5601/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /nodered/ { + proxy_pass http://{{ managerip }}:1880/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /playbook/ { + proxy_pass http://{{ managerip }}:3200/playbook/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + {%- if FLEET_NODE %} + location /fleet/ { + return 301 https://{{ FLEET_IP }}/fleet; + } + {%- else %} + location /fleet/ { + proxy_pass https://{{ managerip }}:8080; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + {%- endif %} + + location /thehive/ { + proxy_pass http://{{ managerip }}:9000/thehive/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_http_version 1.1; # this is essential for chunked responses to work + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /cortex/ { + proxy_pass http://{{ managerip }}:9001/cortex/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_http_version 1.1; # this is essential for chunked responses to work + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /soctopus/ { + proxy_pass http://{{ managerip }}:7000/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /kibana/app/soc/ { + rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent; + } + + location /kibana/app/fleet/ { + rewrite ^/kibana/app/fleet/(.*) /fleet/$1 permanent; + } + + location /kibana/app/soctopus/ { + rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent; + } + + location /sensoroniagents/ { + proxy_pass http://{{ managerip }}:9822/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + error_page 401 = @error401; + + location @error401 { + add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400"; + return 302 /auth/self-service/browser/flows/login; + } + + #error_page 404 /404.html; + # location = /usr/share/nginx/html/40x.html { + #} + + error_page 500 502 503 504 /50x.html; + location = /usr/share/nginx/html/50x.html { + } + } + +} From b02332d84aaef4be45aa0a1e00c4eee11dbd9a80 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Aug 2020 16:18:11 -0400 Subject: [PATCH 151/376] fix global pillar location for setup --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 800d57a90..26871658c 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -962,7 +962,7 @@ manager_pillar() { } manager_global() { - local global_pillar="$local_salt_dir/pillar/global.sls" + local global_pillar="$temp_install_dir/pillar/global.sls" if [ -z "$SENSOR_CHECKIN_INTERVAL_MS" ]; then SENSOR_CHECKIN_INTERVAL_MS=10000 From 0c2ea53f259759c9b536e2a926ca298459d8b5f2 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Aug 2020 16:42:46 -0400 Subject: [PATCH 152/376] revert back to local_salt_dir --- setup/so-functions | 2 +- setup/so-setup | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 26871658c..800d57a90 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -962,7 +962,7 @@ manager_pillar() { } manager_global() { - local global_pillar="$temp_install_dir/pillar/global.sls" + local global_pillar="$local_salt_dir/pillar/global.sls" if [ -z "$SENSOR_CHECKIN_INTERVAL_MS" ]; then SENSOR_CHECKIN_INTERVAL_MS=10000 diff --git a/setup/so-setup b/setup/so-setup index 9c0255cea..cae5d1029 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -247,6 +247,7 @@ fi if [[ $is_importpcap ]]; then patch_schedule=Automatic + MTU=1500 RULESETUP=ETOPEN NSMSETUP=BASIC HNSENSOR=inherit From 928e5ed832d958d0fd4dc50b9d391a5fda48dbb6 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Fri, 7 Aug 2020 17:02:48 -0400 Subject: [PATCH 153/376] Playbook/Nav Fixes - Issue #1064 --- salt/common/tools/sbin/so-playbook-sync | 2 +- salt/elasticsearch/files/ingest/sysmon | 4 ++++ salt/nginx/files/navigator_config.json | 4 ++-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/salt/common/tools/sbin/so-playbook-sync b/salt/common/tools/sbin/so-playbook-sync index 8b2817eaa..f4c2c456e 100755 --- a/salt/common/tools/sbin/so-playbook-sync +++ b/salt/common/tools/sbin/so-playbook-sync @@ -17,4 +17,4 @@ . /usr/sbin/so-common -docker exec so-soctopus python3 playbook_play-sync.py >> /opt/so/log/soctopus/so-playbook-sync.log 2>&1 +docker exec so-soctopus python3 playbook_play-sync.py diff --git a/salt/elasticsearch/files/ingest/sysmon b/salt/elasticsearch/files/ingest/sysmon index feb96720d..5fe46b3a5 100644 --- a/salt/elasticsearch/files/ingest/sysmon +++ b/salt/elasticsearch/files/ingest/sysmon @@ -7,6 +7,9 @@ { "set": { "if": "ctx.winlog?.computer_name != null", "field": "observer.name", "value": "{{winlog.computer_name}}", "override": true } }, { "set": { "if": "ctx.event?.code == '3'", "field": "event.category", "value": "host,process,network", "override": true } }, { "set": { "if": "ctx.event?.code == '1'", "field": "event.category", "value": "host,process", "override": true } }, + { "set": { "if": "ctx.event?.code == '5'", "field": "event.category", "value": "host,process", "override": true } }, + { "set": { "if": "ctx.event?.code == '6'", "field": "event.category", "value": "host,driver", "override": true } }, + { "set": { "if": "ctx.event?.code == '22'", "field": "event.category", "value": "network", "override": true } }, { "set": { "if": "ctx.event?.code == '1'", "field": "event.dataset", "value": "process_creation", "override": true } }, { "set": { "if": "ctx.event?.code == '2'", "field": "event.dataset", "value": "process_changed_file", "override": true } }, { "set": { "if": "ctx.event?.code == '3'", "field": "event.dataset", "value": "network_connection", "override": true } }, @@ -34,6 +37,7 @@ { "rename": { "field": "winlog.event_data.CurrentDirectory", "target_field": "process.working_directory", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.Description", "target_field": "process.pe.description", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.Product", "target_field": "process.pe.product", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.Company", "target_field": "process.pe.company", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.OriginalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.FileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } }, { "rename": { "field": "winlog.event_data.ParentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } }, diff --git a/salt/nginx/files/navigator_config.json b/salt/nginx/files/navigator_config.json index d54f13265..b0866d742 100644 --- a/salt/nginx/files/navigator_config.json +++ b/salt/nginx/files/navigator_config.json @@ -1,4 +1,4 @@ -{%- set ip = salt['pillar.get']('global:managerip', '') %} +{%- set URL_BASE = salt['pillar.get']('manager:url_base', '') %} { "enterprise_attack_url": "https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json", @@ -16,7 +16,7 @@ "domain": "mitre-enterprise", - "custom_context_menu_items": [ {"label": "view related plays","url": " https://{{ip}}/playbook/projects/detection-playbooks/issues?utf8=%E2%9C%93&set_filter=1&sort=id%3Adesc&f%5B%5D=cf_15&op%5Bcf_15%5D=%3D&f%5B%5D=&c%5B%5D=status&c%5B%5D=cf_10&c%5B%5D=cf_13&c%5B%5D=cf_18&c%5B%5D=cf_19&c%5B%5D=cf_1&c%5B%5D=updated_on&v%5Bcf_15%5D%5B%5D=~Technique_ID~"}], + "custom_context_menu_items": [ {"label": "view related plays","url": " https://{{URL_BASE}}/playbook/projects/detection-playbooks/issues?utf8=%E2%9C%93&set_filter=1&sort=id%3Adesc&f%5B%5D=cf_15&op%5Bcf_15%5D=%3D&f%5B%5D=&c%5B%5D=status&c%5B%5D=cf_10&c%5B%5D=cf_13&c%5B%5D=cf_18&c%5B%5D=cf_19&c%5B%5D=cf_1&c%5B%5D=updated_on&v%5Bcf_15%5D%5B%5D=~Technique_ID~"}], "default_layers": { "enabled": true, From d15d53bcdcd6c54fca499f2ffe12f0ea3d68cad5 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Aug 2020 22:04:30 -0400 Subject: [PATCH 154/376] Add script to extract cacerts --- salt/elasticsearch/files/scripts/catrust.sh | 27 +++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 salt/elasticsearch/files/scripts/catrust.sh diff --git a/salt/elasticsearch/files/scripts/catrust.sh b/salt/elasticsearch/files/scripts/catrust.sh new file mode 100644 index 000000000..4ebdd6c88 --- /dev/null +++ b/salt/elasticsearch/files/scripts/catrust.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +{%- set VERSION = salt['pillar.get']('global:soversion', '') %} +{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} +{%- set MANAGER = salt['grains.get']('master') %} +# Check to see if we have extracted the ca cert. +if [ ! -f /opt/so/saltstack/local/salt/common/cacerts ]; then + docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-elasticsearchca --user root --entrypoint keytool {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-logstash:{{ VERSION }} -keystore /etc/pki/ca-trust/extracted/java/cacerts -alias SOSCA -import -file /etc/pki/ca.crt -storepass changeit -noprompt + docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/java/cacerts /opt/so/saltstack/local/salt/common/cacerts + docker rm so-elasticsearchca +else + exit 0 +fi \ No newline at end of file From 1b0f90b7e437115f17aeb2abe13a541740a9ef3c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Aug 2020 22:12:47 -0400 Subject: [PATCH 155/376] sync script --- .../files/scripts/{catrust.sh => so-catrust} | 1 + salt/elasticsearch/init.sls | 12 ++++++++++++ 2 files changed, 13 insertions(+) rename salt/elasticsearch/files/scripts/{catrust.sh => so-catrust} (98%) diff --git a/salt/elasticsearch/files/scripts/catrust.sh b/salt/elasticsearch/files/scripts/so-catrust similarity index 98% rename from salt/elasticsearch/files/scripts/catrust.sh rename to salt/elasticsearch/files/scripts/so-catrust index 4ebdd6c88..fd43acff1 100644 --- a/salt/elasticsearch/files/scripts/catrust.sh +++ b/salt/elasticsearch/files/scripts/so-catrust @@ -17,6 +17,7 @@ {%- set VERSION = salt['pillar.get']('global:soversion', '') %} {%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {%- set MANAGER = salt['grains.get']('master') %} +. /usr/sbin/so-common # Check to see if we have extracted the ca cert. if [ ! -f /opt/so/saltstack/local/salt/common/cacerts ]; then docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-elasticsearchca --user root --entrypoint keytool {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-logstash:{{ VERSION }} -keystore /etc/pki/ca-trust/extracted/java/cacerts -alias SOSCA -import -file /etc/pki/ca.crt -storepass changeit -noprompt diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index f3777481c..46bc32ec6 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -26,6 +26,7 @@ {% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %} {% set esclustername = salt['pillar.get']('manager:esclustername', '') %} {% set esheap = salt['pillar.get']('manager:esheap', '') %} + {% set ismanager = True %} {% elif grains['role'] in ['so-node','so-heavynode'] %} {% set esclustername = salt['pillar.get']('elasticsearch:esclustername', '') %} {% set esheap = salt['pillar.get']('elasticsearch:esheap', '') %} @@ -37,6 +38,17 @@ vm.max_map_count: sysctl.present: - value: 262144 +{% if ismanager %} +cascriptsync: + file.managed: + - name: /usr/sbin/so-catrust + - source: salt://elasticsearch/files/scripts/so-catrust + - user: 939 + - group: 939 + - mode: 750 + +{% endif %} + # Add ES Group elasticsearchgroup: group.present: From cca0dd93440de7175cc658c64b65fd48c29618a0 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Aug 2020 22:14:33 -0400 Subject: [PATCH 156/376] enable jinja --- salt/elasticsearch/init.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 46bc32ec6..fda0b3b27 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -46,6 +46,7 @@ cascriptsync: - user: 939 - group: 939 - mode: 750 + - template: jinja {% endif %} From 952234446fec92d250706a7630f7cee3f36a710c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Aug 2020 22:18:58 -0400 Subject: [PATCH 157/376] fix logic --- salt/elasticsearch/init.sls | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index fda0b3b27..495d7d8c2 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -30,6 +30,7 @@ {% elif grains['role'] in ['so-node','so-heavynode'] %} {% set esclustername = salt['pillar.get']('elasticsearch:esclustername', '') %} {% set esheap = salt['pillar.get']('elasticsearch:esheap', '') %} + {% set ismanager = False %} {% endif %} {% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %} @@ -47,7 +48,7 @@ cascriptsync: - group: 939 - mode: 750 - template: jinja - + {% endif %} # Add ES Group From 0d66e323051111597f2f6ca7ee1ff32c6410cc4e Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Aug 2020 22:39:29 -0400 Subject: [PATCH 158/376] sync cacerts --- salt/elasticsearch/init.sls | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 495d7d8c2..adf82a286 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -40,6 +40,7 @@ vm.max_map_count: - value: 262144 {% if ismanager %} +# We have to add the Manager CA to the CA list cascriptsync: file.managed: - name: /usr/sbin/so-catrust @@ -51,6 +52,21 @@ cascriptsync: {% endif %} +# Move our new CA over so Elastic and Logstash can use SSL with the internal CA +catrustdir: + file.directory: + - name: /opt/so/conf/ca + - user: 939 + - group: 939 + - makedirs: True + +cacertz: + file.managed: + - name: /opt/so/conf/ca/cacerts + - source: salt://common/cacerts + - user: 939 + - group: 939 + # Add ES Group elasticsearchgroup: group.present: @@ -163,6 +179,10 @@ so-elasticsearch: - /opt/so/conf/elasticsearch/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro - /nsm/elasticsearch:/usr/share/elasticsearch/data:rw - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw + - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro + + - watch: + - file: cacertz so-elasticsearch-pipelines-file: file.managed: From 321122cc8794ff5ba43827484bdc1f2c1028732e Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Aug 2020 22:43:34 -0400 Subject: [PATCH 159/376] update logstash --- salt/elasticsearch/init.sls | 1 - salt/logstash/init.sls | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index adf82a286..e0a8b0a94 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -180,7 +180,6 @@ so-elasticsearch: - /nsm/elasticsearch:/usr/share/elasticsearch/data:rw - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - - watch: - file: cacertz diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index 1fa5b0e86..6cdecbc47 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -148,7 +148,6 @@ so-logstash: - user: logstash - environment: - LS_JAVA_OPTS=-Xms{{ lsheap }} -Xmx{{ lsheap }} - - SSL_CERT_FILE=/etc/ssl/certs/ca.crt - port_bindings: {% for BINDING in DOCKER_OPTIONS.port_bindings %} - {{ BINDING }} @@ -167,7 +166,7 @@ so-logstash: - /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro - /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro - /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro - - /etc/ssl/certs/intca.crt:/etc/ssl/certs/ca.crt:ro + - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro {%- if grains['role'] == 'so-eval' %} - /nsm/zeek:/nsm/zeek:ro - /nsm/suricata:/suricata:ro From 62a6f29c965fcd279fd461f84930a81154aa1844 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Aug 2020 22:51:52 -0400 Subject: [PATCH 160/376] bucket stuff --- salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja index e953c3521..a38d2cd44 100644 --- a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja +++ b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja @@ -17,6 +17,7 @@ output { encoding => {{ ENCODING }} upload_queue_size => {{ UPLOAD_QUEUE_SIZE }} temporary_directory => "/usr/share/logstash/data/tmp" + validate_credentials_on_root_bucket => false additional_settings => { "force_path_style" => true } From 5525e235d176d7d8fb9dfc7be9217c5b30c01af8 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Aug 2020 23:28:58 -0400 Subject: [PATCH 161/376] jruby ssl fun --- salt/elasticsearch/files/scripts/so-catrust | 4 + salt/firewall/portgroups.yaml | 1 + .../config/so/9999_output_redis.conf.jinja | 2 + salt/redis/etc/redis.conf | 872 ++++++++--- salt/redis/etc/redis.conf.5 | 1316 +++++++++++++++++ 5 files changed, 2019 insertions(+), 176 deletions(-) create mode 100644 salt/redis/etc/redis.conf.5 diff --git a/salt/elasticsearch/files/scripts/so-catrust b/salt/elasticsearch/files/scripts/so-catrust index fd43acff1..1a6144aca 100644 --- a/salt/elasticsearch/files/scripts/so-catrust +++ b/salt/elasticsearch/files/scripts/so-catrust @@ -22,7 +22,11 @@ if [ ! -f /opt/so/saltstack/local/salt/common/cacerts ]; then docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-elasticsearchca --user root --entrypoint keytool {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-logstash:{{ VERSION }} -keystore /etc/pki/ca-trust/extracted/java/cacerts -alias SOSCA -import -file /etc/pki/ca.crt -storepass changeit -noprompt docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/java/cacerts /opt/so/saltstack/local/salt/common/cacerts + docker cp so-elasticsearchca:/etc/pki/tls/certs/ca-bundle.crt /opt/so/saltstack/local/salt/common/ca-bundle.crt docker rm so-elasticsearchca + echo "" >> /opt/so/saltstack/local/salt/common/ca-bundle.crt + echo "sosca" >> /opt/so/saltstack/local/salt/common/ca-bundle.crt + echo /etc/pki/ca.crt >> /opt/so/saltstack/local/salt/common/ca-bundle.crt else exit 0 fi \ No newline at end of file diff --git a/salt/firewall/portgroups.yaml b/salt/firewall/portgroups.yaml index 5dee48755..db7450364 100644 --- a/salt/firewall/portgroups.yaml +++ b/salt/firewall/portgroups.yaml @@ -64,6 +64,7 @@ firewall: redis: tcp: - 6379 + - 6380 salt_manager: tcp: - 4505 diff --git a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja index 239ca8cb6..25620e501 100644 --- a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja +++ b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja @@ -3,11 +3,13 @@ output { redis { host => '{{ MANAGER }}' + port => 6380 data_type => 'list' key => 'logstash:unparsed' congestion_interval => 1 congestion_threshold => 50000000 batch => true batch_events => {{ BATCH }} + ssl => true } } diff --git a/salt/redis/etc/redis.conf b/salt/redis/etc/redis.conf index d5f39da99..aa8d69eb6 100644 --- a/salt/redis/etc/redis.conf +++ b/salt/redis/etc/redis.conf @@ -59,7 +59,7 @@ # internet, binding to all the interfaces is dangerous and will expose the # instance to everybody on the internet. So by default we uncomment the # following bind directive, that will force Redis to listen only into -# the IPv4 lookback interface address (this means Redis will be able to +# the IPv4 loopback interface address (this means Redis will be able to # accept connections only from clients running into the same computer it # is running). # @@ -86,6 +86,10 @@ bind 0.0.0.0 # even if no authentication is configured, nor a specific set of interfaces # are explicitly listed using the "bind" directive. protected-mode no +tls-cert-file /certs/redis.crt +tls-key-file /certs/to/redis.key +tls-ca-cert-file /certs/ca.crt +tls-port 6380 # Accept connections on the specified port, default is 6379 (IANA #815344). # If port 0 is specified Redis will not listen on a TCP socket. @@ -129,6 +133,92 @@ timeout 0 # Redis default starting with Redis 3.2.1. tcp-keepalive 300 +################################# TLS/SSL ##################################### + +# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration +# directive can be used to define TLS-listening ports. To enable TLS on the +# default port, use: +# +# port 0 +# tls-port 6379 + +# Configure a X.509 certificate and private key to use for authenticating the +# server to connected clients, masters or cluster peers. These files should be +# PEM formatted. +# +# tls-cert-file redis.crt +# tls-key-file redis.key + +# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange: +# +# tls-dh-params-file redis.dh + +# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL +# clients and peers. Redis requires an explicit configuration of at least one +# of these, and will not implicitly use the system wide configuration. +# +# tls-ca-cert-file ca.crt +# tls-ca-cert-dir /etc/ssl/certs + +# By default, clients (including replica servers) on a TLS port are required +# to authenticate using valid client side certificates. +# +# It is possible to disable authentication using this directive. +# +# tls-auth-clients no + +# By default, a Redis replica does not attempt to establish a TLS connection +# with its master. +# +# Use the following directive to enable TLS on replication links. +# +# tls-replication yes + +# By default, the Redis Cluster bus uses a plain TCP connection. To enable +# TLS for the bus protocol, use the following directive: +# +# tls-cluster yes + +# Explicitly specify TLS versions to support. Allowed values are case insensitive +# and include "TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3" (OpenSSL >= 1.1.1) or +# any combination. To enable only TLSv1.2 and TLSv1.3, use: +# +# tls-protocols "TLSv1.2 TLSv1.3" + +# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information +# about the syntax of this string. +# +# Note: this configuration applies only to <= TLSv1.2. +# +# tls-ciphers DEFAULT:!MEDIUM + +# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more +# information about the syntax of this string, and specifically for TLSv1.3 +# ciphersuites. +# +# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 + +# When choosing a cipher, use the server's preference instead of the client +# preference. By default, the server follows the client's preference. +# +# tls-prefer-server-ciphers yes + +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + ################################# GENERAL ##################################### # By default Redis does not run as a daemon. Use 'yes' if you need it. @@ -168,7 +258,7 @@ loglevel notice # Specify the log file name. Also the empty string can be used to force # Redis to log on the standard output. Note that if you use standard # output for logging but daemonize, logs will be sent to /dev/null -logfile "/var/log/redis/redis-server.log" +logfile "" # To enable logging to the system logger, just set 'syslog-enabled' to yes, # and optionally update the other syslog parameters to suit your needs. @@ -252,6 +342,19 @@ rdbchecksum yes # The filename where to dump the DB dbfilename dump.rdb +# Remove RDB files used by replication in instances without persistence +# enabled. By default this option is disabled, however there are environments +# where for regulations or other security concerns, RDB files persisted on +# disk by masters in order to feed replicas, or stored on disk by replicas +# in order to load them for the initial synchronization, should be deleted +# ASAP. Note that this option ONLY WORKS in instances that have both AOF +# and RDB persistence disabled, otherwise is completely ignored. +# +# An alternative (and sometimes better) way to obtain the same effect is +# to use diskless replication on both master and replicas instances. However +# in the case of replicas, diskless is not always an option. +rdb-del-sync-files no + # The working directory. # # The DB will be written inside this directory, with the filename specified @@ -260,88 +363,104 @@ dbfilename dump.rdb # The Append Only File will also be created inside this directory. # # Note that you must specify a directory here, not a file name. -dir /redis +dir ./ ################################# REPLICATION ################################# -# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# Master-Replica replication. Use replicaof to make a Redis instance a copy of # another Redis server. A few things to understand ASAP about Redis replication. # +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# # 1) Redis replication is asynchronous, but you can configure a master to # stop accepting writes if it appears to be not connected with at least -# a given number of slaves. -# 2) Redis slaves are able to perform a partial resynchronization with the +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the # master if the replication link is lost for a relatively small amount of # time. You may want to configure the replication backlog size (see the next # sections of this file) with a sensible value depending on your needs. # 3) Replication is automatic and does not need user intervention. After a -# network partition slaves automatically try to reconnect to masters +# network partition replicas automatically try to reconnect to masters # and resynchronize with them. # -# slaveof +# replicaof # If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before +# directive below) it is possible to tell the replica to authenticate before # starting the replication synchronization process, otherwise the master will -# refuse the slave request. +# refuse the replica request. # # masterauth - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: # -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# However this is not enough if you are using Redis ACLs (for Redis version +# 6 or greater), and the default user is not capable of running the PSYNC +# command and/or other commands needed for replication. In this case it's +# better to configure a special user to use with replication, and specify the +# masteruser configuration as such: +# +# masteruser +# +# When masteruser is specified, the replica will authenticate against its +# master using the new AUTH form: AUTH . + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will # still reply to client requests, possibly with out of date data, or the # data set may just be empty if this is the first synchronization. # -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# 2) if replica-serve-stale-data is set to 'no' the replica will reply with # an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. +# but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, +# SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, +# COMMAND, POST, HOST: and LATENCY. # -slave-serve-stale-data yes +replica-serve-stale-data yes -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but # may also cause problems if clients are writing to it because of a # misconfiguration. # -# Since Redis 2.6 by default slaves are read-only. +# Since Redis 2.6 by default replicas are read-only. # -# Note: read only slaves are not designed to be exposed to untrusted clients +# Note: read only replicas are not designed to be exposed to untrusted clients # on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands +# Still a read only replica exports by default all the administrative commands # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve -# security of read only slaves using 'rename-command' to shadow all the +# security of read only replicas using 'rename-command' to shadow all the # administrative / dangerous commands. -slave-read-only yes +replica-read-only yes # Replication SYNC strategy: disk or socket. # -# ------------------------------------------------------- -# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY -# ------------------------------------------------------- +# New replicas and reconnecting replicas that are not able to continue the +# replication process just receiving differences, need to do what is called a +# "full synchronization". An RDB file is transmitted from the master to the +# replicas. # -# New slaves and reconnecting slaves that are not able to continue the replication -# process just receiving differences, need to do what is called a "full -# synchronization". An RDB file is transmitted from the master to the slaves. # The transmission can happen in two different ways: # # 1) Disk-backed: The Redis master creates a new process that writes the RDB # file on disk. Later the file is transferred by the parent -# process to the slaves incrementally. +# process to the replicas incrementally. # 2) Diskless: The Redis master creates a new process that directly writes the -# RDB file to slave sockets, without touching the disk at all. +# RDB file to replica sockets, without touching the disk at all. # -# With disk-backed replication, while the RDB file is generated, more slaves -# can be queued and served with the RDB file as soon as the current child producing -# the RDB file finishes its work. With diskless replication instead once -# the transfer starts, new slaves arriving will be queued and a new transfer -# will start when the current one terminates. +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child +# producing the RDB file finishes its work. With diskless replication instead +# once the transfer starts, new replicas arriving will be queued and a new +# transfer will start when the current one terminates. # # When diskless replication is used, the master waits a configurable amount of -# time (in seconds) before starting the transfer in the hope that multiple slaves -# will arrive and the transfer can be parallelized. +# time (in seconds) before starting the transfer in the hope that multiple +# replicas will arrive and the transfer can be parallelized. # # With slow disks and fast (large bandwidth) networks, diskless replication # works better. @@ -349,157 +468,334 @@ repl-diskless-sync no # When diskless replication is enabled, it is possible to configure the delay # the server waits in order to spawn the child that transfers the RDB via socket -# to the slaves. +# to the replicas. # # This is important since once the transfer starts, it is not possible to serve -# new slaves arriving, that will be queued for the next RDB transfer, so the server -# waits a delay in order to let more slaves arrive. +# new replicas arriving, that will be queued for the next RDB transfer, so the +# server waits a delay in order to let more replicas arrive. # # The delay is specified in seconds, and by default is 5 seconds. To disable # it entirely just set it to 0 seconds and the transfer will start ASAP. repl-diskless-sync-delay 5 -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. +# ----------------------------------------------------------------------------- +# WARNING: RDB diskless load is experimental. Since in this setup the replica +# does not immediately store an RDB on disk, it may cause data loss during +# failovers. RDB diskless load + Redis modules not handling I/O reads may also +# cause Redis to abort in case of I/O errors during the initial synchronization +# stage with the master. Use only if your do what you are doing. +# ----------------------------------------------------------------------------- # -# repl-ping-slave-period 10 +# Replica can load the RDB it reads from the replication link directly from the +# socket, or store the RDB to a file and read that file after it was completely +# recived from the master. +# +# In many cases the disk is slower than the network, and storing and loading +# the RDB file may increase replication time (and even increase the master's +# Copy on Write memory and salve buffers). +# However, parsing the RDB file directly from the socket may mean that we have +# to flush the contents of the current database before the full rdb was +# received. For this reason we have the following options: +# +# "disabled" - Don't use diskless load (store the rdb file to the disk first) +# "on-empty-db" - Use diskless load only when it is completely safe. +# "swapdb" - Keep a copy of the current db contents in RAM while parsing +# the data directly from the socket. note that this requires +# sufficient memory, if you don't have it, you risk an OOM kill. +repl-diskless-load disabled + +# Replicas send PINGs to server in a predefined interval. It's possible to +# change this interval with the repl_ping_replica_period option. The default +# value is 10 seconds. +# +# repl-ping-replica-period 10 # The following option sets the replication timeout for: # -# 1) Bulk transfer I/O during SYNC, from the point of view of slave. -# 2) Master timeout from the point of view of slaves (data, pings). -# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). # # It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. # # repl-timeout 60 -# Disable TCP_NODELAY on the slave socket after SYNC? +# Disable TCP_NODELAY on the replica socket after SYNC? # # If you select "yes" Redis will use a smaller number of TCP packets and -# less bandwidth to send data to slaves. But this can add a delay for -# the data to appear on the slave side, up to 40 milliseconds with +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with # Linux kernels using a default configuration. # -# If you select "no" the delay for data to appear on the slave side will +# If you select "no" the delay for data to appear on the replica side will # be reduced but more bandwidth will be used for replication. # # By default we optimize for low latency, but in very high traffic conditions -# or when the master and slaves are many hops away, turning this to "yes" may +# or when the master and replicas are many hops away, turning this to "yes" may # be a good idea. repl-disable-tcp-nodelay no # Set the replication backlog size. The backlog is a buffer that accumulates -# slave data when slaves are disconnected for some time, so that when a slave -# wants to reconnect again, often a full resync is not needed, but a partial -# resync is enough, just passing the portion of data the slave missed while -# disconnected. +# replica data when replicas are disconnected for some time, so that when a +# replica wants to reconnect again, often a full resync is not needed, but a +# partial resync is enough, just passing the portion of data the replica +# missed while disconnected. # -# The bigger the replication backlog, the longer the time the slave can be +# The bigger the replication backlog, the longer the time the replica can be # disconnected and later be able to perform a partial resynchronization. # -# The backlog is only allocated once there is at least a slave connected. +# The backlog is only allocated once there is at least a replica connected. # # repl-backlog-size 1mb -# After a master has no longer connected slaves for some time, the backlog +# After a master has no longer connected replicas for some time, the backlog # will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last slave disconnected, for +# need to elapse, starting from the time the last replica disconnected, for # the backlog buffer to be freed. # -# Note that slaves never free the backlog for timeout, since they may be +# Note that replicas never free the backlog for timeout, since they may be # promoted to masters later, and should be able to correctly "partially -# resynchronize" with the slaves: hence they should always accumulate backlog. +# resynchronize" with the replicas: hence they should always accumulate backlog. # # A value of 0 means to never release the backlog. # # repl-backlog-ttl 3600 -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. +# The replica priority is an integer number published by Redis in the INFO +# output. It is used by Redis Sentinel in order to select a replica to promote +# into a master if the master is no longer working correctly. # -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one with priority 10, that is the lowest. +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel +# will pick the one with priority 10, that is the lowest. # -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by # Redis Sentinel for promotion. # # By default the priority is 100. -slave-priority 100 +replica-priority 100 # It is possible for a master to stop accepting writes if there are less than -# N slaves connected, having a lag less or equal than M seconds. +# N replicas connected, having a lag less or equal than M seconds. # -# The N slaves need to be in "online" state. +# The N replicas need to be in "online" state. # # The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the slave, that is usually sent every second. +# the last ping received from the replica, that is usually sent every second. # # This option does not GUARANTEE that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough slaves +# will limit the window of exposure for lost writes in case not enough replicas # are available, to the specified number of seconds. # -# For example to require at least 3 slaves with a lag <= 10 seconds use: +# For example to require at least 3 replicas with a lag <= 10 seconds use: # -# min-slaves-to-write 3 -# min-slaves-max-lag 10 +# min-replicas-to-write 3 +# min-replicas-max-lag 10 # # Setting one or the other to 0 disables the feature. # -# By default min-slaves-to-write is set to 0 (feature disabled) and -# min-slaves-max-lag is set to 10. +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. # A Redis master is able to list the address and port of the attached -# slaves in different ways. For example the "INFO replication" section +# replicas in different ways. For example the "INFO replication" section # offers this information, which is used, among other tools, by -# Redis Sentinel in order to discover slave instances. +# Redis Sentinel in order to discover replica instances. # Another place where this info is available is in the output of the # "ROLE" command of a master. # -# The listed IP and address normally reported by a slave is obtained +# The listed IP and address normally reported by a replica is obtained # in the following way: # # IP: The address is auto detected by checking the peer address -# of the socket used by the slave to connect with the master. +# of the socket used by the replica to connect with the master. # -# Port: The port is communicated by the slave during the replication -# handshake, and is normally the port that the slave is using to -# list for connections. +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. # # However when port forwarding or Network Address Translation (NAT) is -# used, the slave may be actually reachable via different IP and port -# pairs. The following two options can be used by a slave in order to +# used, the replica may be actually reachable via different IP and port +# pairs. The following two options can be used by a replica in order to # report to its master a specific set of IP and port, so that both INFO # and ROLE will report those values. # # There is no need to use both the options if you need to override just # the port or the IP address. # -# slave-announce-ip 5.5.5.5 -# slave-announce-port 1234 +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +############################### KEYS TRACKING ################################# + +# Redis implements server assisted support for client side caching of values. +# This is implemented using an invalidation table that remembers, using +# 16 millions of slots, what clients may have certain subsets of keys. In turn +# this is used in order to send invalidation messages to clients. Please +# to understand more about the feature check this page: +# +# https://redis.io/topics/client-side-caching +# +# When tracking is enabled for a client, all the read only queries are assumed +# to be cached: this will force Redis to store information in the invalidation +# table. When keys are modified, such information is flushed away, and +# invalidation messages are sent to the clients. However if the workload is +# heavily dominated by reads, Redis could use more and more memory in order +# to track the keys fetched by many clients. +# +# For this reason it is possible to configure a maximum fill value for the +# invalidation table. By default it is set to 1M of keys, and once this limit +# is reached, Redis will start to evict keys in the invalidation table +# even if they were not modified, just to reclaim memory: this will in turn +# force the clients to invalidate the cached values. Basically the table +# maximum size is a trade off between the memory you want to spend server +# side to track information about who cached what, and the ability of clients +# to retain cached objects in memory. +# +# If you set the value to 0, it means there are no limits, and Redis will +# retain as many keys as needed in the invalidation table. +# In the "stats" INFO section, you can find information about the number of +# keys in the invalidation table at every given moment. +# +# Note: when key tracking is used in broadcasting mode, no memory is used +# in the server side so this setting is useless. +# +# tracking-table-max-keys 1000000 ################################## SECURITY ################################### -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# # Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. +# 1 million passwords per second against a modern box. This means that you +# should use very strong passwords, otherwise they will be very easy to break. +# Note that because the password is really a shared secret between the client +# and the server, and should not be memorized by any human, the password +# can be easily a long string from /dev/urandom or whatever, so by using a +# long and unguessable password no brute force attack will be possible. + +# Redis ACL users are defined in the following format: +# +# user ... acl rules ... +# +# For example: +# +# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 +# +# The special username "default" is used for new connections. If this user +# has the "nopass" rule, then new connections will be immediately authenticated +# as the "default" user without the need of any password provided via the +# AUTH command. Otherwise if the "default" user is not flagged with "nopass" +# the connections will start in not authenticated state, and will require +# AUTH (or the HELLO command AUTH option) in order to be authenticated and +# start to work. +# +# The ACL rules that describe what an user can do are the following: +# +# on Enable the user: it is possible to authenticate as this user. +# off Disable the user: it's no longer possible to authenticate +# with this user, however the already authenticated connections +# will still work. +# + Allow the execution of that command +# - Disallow the execution of that command +# +@ Allow the execution of all the commands in such category +# with valid categories are like @admin, @set, @sortedset, ... +# and so forth, see the full list in the server.c file where +# the Redis command table is described and defined. +# The special category @all means all the commands, but currently +# present in the server, and that will be loaded in the future +# via modules. +# +|subcommand Allow a specific subcommand of an otherwise +# disabled command. Note that this form is not +# allowed as negative like -DEBUG|SEGFAULT, but +# only additive starting with "+". +# allcommands Alias for +@all. Note that it implies the ability to execute +# all the future commands loaded via the modules system. +# nocommands Alias for -@all. +# ~ Add a pattern of keys that can be mentioned as part of +# commands. For instance ~* allows all the keys. The pattern +# is a glob-style pattern like the one of KEYS. +# It is possible to specify multiple patterns. +# allkeys Alias for ~* +# resetkeys Flush the list of allowed keys patterns. +# > Add this passowrd to the list of valid password for the user. +# For example >mypass will add "mypass" to the list. +# This directive clears the "nopass" flag (see later). +# < Remove this password from the list of valid passwords. +# nopass All the set passwords of the user are removed, and the user +# is flagged as requiring no password: it means that every +# password will work against this user. If this directive is +# used for the default user, every new connection will be +# immediately authenticated with the default user without +# any explicit AUTH command required. Note that the "resetpass" +# directive will clear this condition. +# resetpass Flush the list of allowed passwords. Moreover removes the +# "nopass" status. After "resetpass" the user has no associated +# passwords and there is no way to authenticate without adding +# some password (or setting it as "nopass" later). +# reset Performs the following actions: resetpass, resetkeys, off, +# -@all. The user returns to the same state it has immediately +# after its creation. +# +# ACL rules can be specified in any order: for instance you can start with +# passwords, then flags, or key patterns. However note that the additive +# and subtractive rules will CHANGE MEANING depending on the ordering. +# For instance see the following example: +# +# user alice on +@all -DEBUG ~* >somepassword +# +# This will allow "alice" to use all the commands with the exception of the +# DEBUG command, since +@all added all the commands to the set of the commands +# alice can use, and later DEBUG was removed. However if we invert the order +# of two ACL rules the result will be different: +# +# user alice on -DEBUG +@all ~* >somepassword +# +# Now DEBUG was removed when alice had yet no commands in the set of allowed +# commands, later all the commands are added, so the user will be able to +# execute everything. +# +# Basically ACL rules are processed left-to-right. +# +# For more information about ACL configuration please refer to +# the Redis web site at https://redis.io/topics/acl + +# ACL LOG +# +# The ACL Log tracks failed commands and authentication events associated +# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked +# by ACLs. The ACL Log is stored in memory. You can reclaim memory with +# ACL LOG RESET. Define the maximum entry length of the ACL Log below. +acllog-max-len 128 + +# Using an external ACL file +# +# Instead of configuring users here in this file, it is possible to use +# a stand-alone file just listing users. The two methods cannot be mixed: +# if you configure users here and at the same time you activate the exteranl +# ACL file, the server will refuse to start. +# +# The format of the external ACL user file is exactly the same as the +# format that is used inside redis.conf to describe users. +# +# aclfile /etc/redis/users.acl + +# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatiblity +# layer on top of the new ACL system. The option effect will be just setting +# the password for the default user. Clients will still authenticate using +# AUTH as usually, or more explicitly with AUTH default +# if they follow the new protocol: both will work. # # requirepass foobared -# Command renaming. +# Command renaming (DEPRECATED). +# +# ------------------------------------------------------------------------ +# WARNING: avoid using this option if possible. Instead use ACLs to remove +# commands from the default user, and put them only in some admin user you +# create for administrative purposes. +# ------------------------------------------------------------------------ # # It is possible to change the name of dangerous commands in a shared # environment. For instance the CONFIG command may be renamed into something @@ -516,7 +812,7 @@ slave-priority 100 # rename-command CONFIG "" # # Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. +# AOF file or transmitted to replicas may cause problems. ################################### CLIENTS #################################### @@ -529,6 +825,11 @@ slave-priority 100 # Once the limit is reached Redis will close all the new connections sending # an error 'max number of clients reached'. # +# IMPORTANT: When Redis Cluster is used, the max number of connections is also +# shared with the cluster bus: every node in the cluster will use two +# connections, one incoming and another outgoing. It is important to size the +# limit accordingly in case of very large clusters. +# # maxclients 10000 ############################## MEMORY MANAGEMENT ################################ @@ -545,27 +846,27 @@ slave-priority 100 # This option is usually useful when using Redis as an LRU or LFU cache, or to # set a hard memory limit for an instance (using the 'noeviction' policy). # -# WARNING: If you have slaves attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the slaves are subtracted +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted # from the used memory count, so that network problems / resyncs will # not trigger a loop where keys are evicted, and in turn the output -# buffer of slaves is full with DELs of keys evicted triggering the deletion +# buffer of replicas is full with DELs of keys evicted triggering the deletion # of more keys, and so forth until the database is completely emptied. # -# In short... if you have slaves attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for slave +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica # output buffers (but this is not needed if the policy is 'noeviction'). # -maxmemory 817m +# maxmemory # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached. You can select among five behaviors: +# is reached. You can select one from the following behaviors: # -# volatile-lru -> Evict using approximated LRU among the keys with an expire set. +# volatile-lru -> Evict using approximated LRU, only keys with an expire set. # allkeys-lru -> Evict any key using approximated LRU. -# volatile-lfu -> Evict using approximated LFU among the keys with an expire set. +# volatile-lfu -> Evict using approximated LFU, only keys with an expire set. # allkeys-lfu -> Evict any key using approximated LFU. -# volatile-random -> Remove a random key among the ones with an expire set. +# volatile-random -> Remove a random key having an expire set. # allkeys-random -> Remove a random key, any key. # volatile-ttl -> Remove the key with the nearest expire time (minor TTL) # noeviction -> Don't evict anything, just return an error on write operations. @@ -587,7 +888,7 @@ maxmemory 817m # # The default is: # -maxmemory-policy noeviction +# maxmemory-policy noeviction # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated # algorithms (in order to save memory), so you can tune it for speed or @@ -600,6 +901,43 @@ maxmemory-policy noeviction # # maxmemory-samples 5 +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica +# to have a different memory setting, and you are sure all the writes performed +# to the replica are idempotent, then you may change this default (but be sure +# to understand what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory +# and so forth). So make sure you monitor your replicas and make sure they +# have enough memory to never hit a real out-of-memory condition before the +# master hits the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +# Redis reclaims expired keys in two ways: upon access when those keys are +# found to be expired, and also in background, in what is called the +# "active expire key". The key space is slowly and interactively scanned +# looking for expired keys to reclaim, so that it is possible to free memory +# of keys that are expired and will never be accessed again in a short time. +# +# The default effort of the expire cycle will try to avoid having more than +# ten percent of expired keys still in memory, and will try to avoid consuming +# more than 25% of total memory and to add latency to the system. However +# it is possible to increase the expire "effort" that is normally set to +# "1", to a greater value, up to the value "10". At its maximum value the +# system will use more CPU, longer cycles (and technically may introduce +# more latency), and will tollerate less already expired keys still present +# in the system. It's a tradeoff betweeen memory, CPU and latecy. +# +# active-expire-effort 1 + ############################# LAZY FREEING #################################### # Redis has two primitives to delete keys. One is called DEL and is a blocking @@ -635,19 +973,72 @@ maxmemory-policy noeviction # or SORT with STORE option may delete existing keys. The SET command # itself removes any old content of the specified key in order to replace # it with the specified string. -# 4) During replication, when a slave performs a full resynchronization with +# 4) During replication, when a replica performs a full resynchronization with # its master, the content of the whole database is removed in order to -# load the RDB file just transfered. +# load the RDB file just transferred. # # In all the above cases the default is to delete objects in a blocking way, # like if DEL was called. However you can configure each case specifically # in order to instead release memory in a non-blocking way like if UNLINK -# was called, using the following configuration directives: +# was called, using the following configuration directives. lazyfree-lazy-eviction no lazyfree-lazy-expire no lazyfree-lazy-server-del no -slave-lazy-flush no +replica-lazy-flush no + +# It is also possible, for the case when to replace the user code DEL calls +# with UNLINK calls is not easy, to modify the default behavior of the DEL +# command to act exactly like UNLINK, using the following configuration +# directive: + +lazyfree-lazy-user-del no + +################################ THREADED I/O ################################# + +# Redis is mostly single threaded, however there are certain threaded +# operations such as UNLINK, slow I/O accesses and other things that are +# performed on side threads. +# +# Now it is also possible to handle Redis clients socket reads and writes +# in different I/O threads. Since especially writing is so slow, normally +# Redis users use pipelining in order to speedup the Redis performances per +# core, and spawn multiple instances in order to scale more. Using I/O +# threads it is possible to easily speedup two times Redis without resorting +# to pipelining nor sharding of the instance. +# +# By default threading is disabled, we suggest enabling it only in machines +# that have at least 4 or more cores, leaving at least one spare core. +# Using more than 8 threads is unlikely to help much. We also recommend using +# threaded I/O only if you actually have performance problems, with Redis +# instances being able to use a quite big percentage of CPU time, otherwise +# there is no point in using this feature. +# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# io-threads 4 +# +# Setting io-threads to 1 will just use the main thread as usually. +# When I/O threads are enabled, we only use threads for writes, that is +# to thread the write(2) syscall and transfer the client buffers to the +# socket. However it is also possible to enable threading of reads and +# protocol parsing using the following configuration directive, by setting +# it to yes: +# +# io-threads-do-reads no +# +# Usually threading reads doesn't help much. +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. Aso this feature currently does not work when SSL is +# enabled. +# +# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make +# sure you also run the benchmark itself in threaded mode, using the +# --threads option to match the number of Redis theads, otherwise you'll not +# be able to notice the improvements. ############################## APPEND ONLY MODE ############################### @@ -776,10 +1167,7 @@ aof-load-truncated yes # When loading Redis recognizes that the AOF file starts with the "REDIS" # string and loads the prefixed RDB file, and continues loading the AOF # tail. -# -# This is currently turned off by default in order to avoid the surprise -# of a format change, but will at some point be used as the default. -aof-use-rdb-preamble no +aof-use-rdb-preamble yes ################################ LUA SCRIPTING ############################### @@ -800,13 +1188,7 @@ aof-use-rdb-preamble no lua-time-limit 5000 ################################ REDIS CLUSTER ############################### -# -# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however -# in order to mark it as "mature" we need to wait for a non trivial percentage -# of users to deploy it in production. -# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# + # Normal Redis instances can't be part of a Redis Cluster; only nodes that are # started as cluster nodes can. In order to start a Redis instance as a # cluster node enable the cluster support uncommenting the following: @@ -827,42 +1209,42 @@ lua-time-limit 5000 # # cluster-node-timeout 15000 -# A slave of a failing master will avoid to start a failover if its data +# A replica of a failing master will avoid to start a failover if its data # looks too old. # -# There is no simple way for a slave to actually have an exact measure of +# There is no simple way for a replica to actually have an exact measure of # its "data age", so the following two checks are performed: # -# 1) If there are multiple slaves able to failover, they exchange messages -# in order to try to give an advantage to the slave with the best +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best # replication offset (more data from the master processed). -# Slaves will try to get their rank by offset, and apply to the start +# Replicas will try to get their rank by offset, and apply to the start # of the failover a delay proportional to their rank. # -# 2) Every single slave computes the time of the last interaction with +# 2) Every single replica computes the time of the last interaction with # its master. This can be the last ping or command received (if the master # is still in the "connected" state), or the time that elapsed since the # disconnection with the master (if the replication link is currently down). -# If the last interaction is too old, the slave will not try to failover +# If the last interaction is too old, the replica will not try to failover # at all. # -# The point "2" can be tuned by user. Specifically a slave will not perform +# The point "2" can be tuned by user. Specifically a replica will not perform # the failover if, since the last interaction with the master, the time # elapsed is greater than: # -# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# (node-timeout * replica-validity-factor) + repl-ping-replica-period # -# So for example if node-timeout is 30 seconds, and the slave-validity-factor -# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the -# slave will not try to failover if it was not able to talk with the master +# So for example if node-timeout is 30 seconds, and the replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master # for longer than 310 seconds. # -# A large slave-validity-factor may allow slaves with too old data to failover +# A large replica-validity-factor may allow replicas with too old data to failover # a master, while a too small value may prevent the cluster from being able to -# elect a slave at all. +# elect a replica at all. # -# For maximum availability, it is possible to set the slave-validity-factor -# to a value of 0, which means, that slaves will always try to failover the +# For maximum availability, it is possible to set the replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the # master regardless of the last time they interacted with the master. # (However they'll always try to apply a delay proportional to their # offset rank). @@ -870,22 +1252,22 @@ lua-time-limit 5000 # Zero is the only value able to guarantee that when all the partitions heal # the cluster will always be able to continue. # -# cluster-slave-validity-factor 10 +# cluster-replica-validity-factor 10 -# Cluster slaves are able to migrate to orphaned masters, that are masters -# that are left without working slaves. This improves the cluster ability +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability # to resist to failures as otherwise an orphaned master can't be failed over -# in case of failure if it has no working slaves. +# in case of failure if it has no working replicas. # -# Slaves migrate to orphaned masters only if there are still at least a -# given number of other working slaves for their old master. This number -# is the "migration barrier". A migration barrier of 1 means that a slave -# will migrate only if there is at least 1 other working slave for its master -# and so forth. It usually reflects the number of slaves you want for every +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every # master in your cluster. # -# Default is 1 (slaves migrate only if their masters remain with at least -# one slave). To disable migration just set it to a very large value. +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value. # A value of 0 can be set but is useful only for debugging and dangerous # in production. # @@ -904,7 +1286,7 @@ lua-time-limit 5000 # # cluster-require-full-coverage yes -# This option, when set to yes, prevents slaves from trying to failover its +# This option, when set to yes, prevents replicas from trying to failover its # master during master failures. However the master can still perform a # manual failover, if forced to do so. # @@ -912,7 +1294,23 @@ lua-time-limit 5000 # data center operations, where we want one side to never be promoted if not # in the case of a total DC failure. # -# cluster-slave-no-failover no +# cluster-replica-no-failover no + +# This option, when set to yes, allows nodes to serve read traffic while the +# the cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful for two cases. The first case is for when an application +# doesn't require consistency of data during node failures or network partitions. +# One example of this is a cache, where as long as the node has the data it +# should be able to serve it. +# +# The second use case is for configurations that don't meet the recommended +# three shards but want to enable cluster mode and scale later. A +# master outage in a 1 or 2 shard configuration causes a read/write outage to the +# entire cluster without this option set, with it set there is only a write outage. +# Without a quorum of masters, slot ownership will not change automatically. +# +# cluster-allow-reads-when-down no # In order to setup your cluster make sure to read the documentation # available at http://redis.io web site. @@ -1020,7 +1418,11 @@ latency-monitor-threshold 0 # z Sorted set commands # x Expired events (events generated every time a key expires) # e Evicted events (events generated when a key is evicted for maxmemory) -# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# t Stream commands +# m Key-miss events (Note: It is not included in the 'A' class) +# A Alias for g$lshzxet, so that the "AKE" string means all the events +# (Except key-miss events which are excluded from 'A' due to their +# unique nature). # # The "notify-keyspace-events" takes as argument a string that is composed # of zero or multiple characters. The empty string means that notifications @@ -1041,6 +1443,61 @@ latency-monitor-threshold 0 # specify at least one of K or E, no events will be delivered. notify-keyspace-events "" +############################### GOPHER SERVER ################################# + +# Redis contains an implementation of the Gopher protocol, as specified in +# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt). +# +# The Gopher protocol was very popular in the late '90s. It is an alternative +# to the web, and the implementation both server and client side is so simple +# that the Redis server has just 100 lines of code in order to implement this +# support. +# +# What do you do with Gopher nowadays? Well Gopher never *really* died, and +# lately there is a movement in order for the Gopher more hierarchical content +# composed of just plain text documents to be resurrected. Some want a simpler +# internet, others believe that the mainstream internet became too much +# controlled, and it's cool to create an alternative space for people that +# want a bit of fresh air. +# +# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol +# as a gift. +# +# --- HOW IT WORKS? --- +# +# The Redis Gopher support uses the inline protocol of Redis, and specifically +# two kind of inline requests that were anyway illegal: an empty request +# or any request that starts with "/" (there are no Redis commands starting +# with such a slash). Normal RESP2/RESP3 requests are completely out of the +# path of the Gopher protocol implementation and are served as usually as well. +# +# If you open a connection to Redis when Gopher is enabled and send it +# a string like "/foo", if there is a key named "/foo" it is served via the +# Gopher protocol. +# +# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher +# talking), you likely need a script like the following: +# +# https://github.com/antirez/gopher2redis +# +# --- SECURITY WARNING --- +# +# If you plan to put Redis on the internet in a publicly accessible address +# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance. +# Once a password is set: +# +# 1. The Gopher server (when enabled, not by default) will still serve +# content via Gopher. +# 2. However other commands cannot be called before the client will +# authenticate. +# +# So use the 'requirepass' option to protect your instance. +# +# To enable Gopher support uncomment the following line and set +# the option from no (the default) to yes. +# +# gopher-enabled no + ############################### ADVANCED CONFIG ############################### # Hashes are encoded using a memory efficient data structure when they have a @@ -1107,6 +1564,17 @@ zset-max-ziplist-value 64 # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. hll-sparse-max-bytes 3000 +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entires limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in # order to help rehashing the main Redis hash table (the one mapping top-level # keys to values). The hash table implementation Redis uses (see dict.c) @@ -1135,7 +1603,7 @@ activerehashing yes # The limit can be set differently for the three different classes of clients: # # normal -> normal clients including MONITOR clients -# slave -> slave clients +# replica -> replica clients # pubsub -> clients subscribed to at least one pubsub channel or pattern # # The syntax of every client-output-buffer-limit directive is the following: @@ -1156,12 +1624,12 @@ activerehashing yes # asynchronous clients may create a scenario where data is requested faster # than it can read. # -# Instead there is a default limit for pubsub and slave clients, since -# subscribers and slaves receive data in a push fashion. +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. # # Both the hard or the soft limit can be disabled by setting them to zero. client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit replica 256mb 64mb 60 client-output-buffer-limit pubsub 32mb 8mb 60 # Client query buffers accumulate new commands. They are limited to a fixed @@ -1195,12 +1663,34 @@ client-output-buffer-limit pubsub 32mb 8mb 60 # 100 only in environments where very low latency is required. hz 10 +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporary raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + # When a child rewrites the AOF file, if the following option is enabled # the file will be fsync-ed every 32 MB of data generated. This is useful # in order to commit the file to the disk more incrementally and avoid # big latency spikes. aof-rewrite-incremental-fsync yes +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + # Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good # idea to start with the default settings and only change them after investigating # how to improve the performances and how the keys LFU change over time, which @@ -1255,10 +1745,6 @@ aof-rewrite-incremental-fsync yes ########################### ACTIVE DEFRAGMENTATION ####################### # -# WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested -# even in production and manually tested by multiple engineers for some -# time. -# # What is active defragmentation? # ------------------------------- # @@ -1298,7 +1784,7 @@ aof-rewrite-incremental-fsync yes # a good idea to leave the defaults untouched. # Enabled active defragmentation -# activedefrag yes +# activedefrag no # Minimum amount of fragmentation waste to start active defrag # active-defrag-ignore-bytes 100mb @@ -1309,8 +1795,42 @@ aof-rewrite-incremental-fsync yes # Maximum percentage of fragmentation at which we use maximum effort # active-defrag-threshold-upper 100 -# Minimal effort for defrag in CPU percentage -# active-defrag-cycle-min 25 +# Minimal effort for defrag in CPU percentage, to be used when the lower +# threshold is reached +# active-defrag-cycle-min 1 -# Maximal effort for defrag in CPU percentage -# active-defrag-cycle-max 75 +# Maximal effort for defrag in CPU percentage, to be used when the upper +# threshold is reached +# active-defrag-cycle-max 25 + +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 + +# Jemalloc background thread for purging will be enabled by default +jemalloc-bg-thread yes + +# It is possible to pin different threads and processes of Redis to specific +# CPUs in your system, in order to maximize the performances of the server. +# This is useful both in order to pin different Redis threads in different +# CPUs, but also in order to make sure that multiple Redis instances running +# in the same host will be pinned to different CPUs. +# +# Normally you can do this using the "taskset" command, however it is also +# possible to this via Redis configuration directly, both in Linux and FreeBSD. +# +# You can pin the server/IO threads, bio threads, aof rewrite child process, and +# the bgsave child process. The syntax to specify the cpu list is the same as +# the taskset command: +# +# Set redis server/io threads to cpu affinity 0,2,4,6: +# server_cpulist 0-7:2 +# +# Set bio threads to cpu affinity 1,3: +# bio_cpulist 1,3 +# +# Set aof rewrite child process to cpu affinity 8,9,10,11: +# aof_rewrite_cpulist 8-11 +# +# Set bgsave child process to cpu affinity 1,10,11 +# bgsave_cpulist 1,10-11 \ No newline at end of file diff --git a/salt/redis/etc/redis.conf.5 b/salt/redis/etc/redis.conf.5 new file mode 100644 index 000000000..d5f39da99 --- /dev/null +++ b/salt/redis/etc/redis.conf.5 @@ -0,0 +1,1316 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule /path/to/my_module.so +# loadmodule /path/to/other_module.so + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all the network interfaces available on the server. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 ::1 +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only into +# the IPv4 lookback interface address (this means Redis will be able to +# accept connections only from clients running into the same computer it +# is running). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST COMMENT THE FOLLOWING LINE. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +bind 0.0.0.0 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode no + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous liveness pings back to your supervisor. +supervised no + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +pidfile /var/run/redis_6379.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "/var/log/redis/redis-server.log" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY. Basically this means +# that normally a logo is displayed only in interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo yes + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir /redis + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of slaves. +# 2) Redis slaves are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition slaves automatically try to reconnect to masters +# and resynchronize with them. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New slaves and reconnecting slaves that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the slaves. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the slaves incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to slave sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more slaves +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new slaves arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple slaves +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the slaves. +# +# This is important since once the transfer starts, it is not possible to serve +# new slaves arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more slaves arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# Note that slaves never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with the slaves: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# slaves in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover slave instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP and address normally reported by a slave is obtained +# in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the slave to connect with the master. +# +# Port: The port is communicated by the slave during the replication +# handshake, and is normally the port that the slave is using to +# list for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the slave may be actually reachable via different IP and port +# pairs. The following two options can be used by a slave in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# slave-announce-ip 5.5.5.5 +# slave-announce-port 1234 + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +maxmemory 817m + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> Evict using approximated LRU among the keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU among the keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key among the ones with an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +maxmemory-policy noeviction + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. For default Redis will check five keys and pick the one that was +# used less recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +# maxmemory-samples 5 + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a slave performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transfered. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives: + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +slave-lazy-flush no + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# When rewriting the AOF file, Redis is able to use an RDB preamble in the +# AOF file for faster rewrites and recoveries. When this option is turned +# on the rewritten AOF file is composed of two different stanzas: +# +# [RDB file][AOF tail] +# +# When loading Redis recognizes that the AOF file starts with the "REDIS" +# string and loads the prefixed RDB file, and continues loading the AOF +# tail. +# +# This is currently turned off by default in order to avoid the surprise +# of a format change, but will at some point be used as the default. +aof-use-rdb-preamble no + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A slave of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a slave to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple slaves able to failover, they exchange messages +# in order to try to give an advantage to the slave with the best +# replication offset (more data from the master processed). +# Slaves will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single slave computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the slave will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a slave will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# +# So for example if node-timeout is 30 seconds, and the slave-validity-factor +# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the +# slave will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large slave-validity-factor may allow slaves with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a slave at all. +# +# For maximum availability, it is possible to set the slave-validity-factor +# to a value of 0, which means, that slaves will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-slave-validity-factor 10 + +# Cluster slaves are able to migrate to orphaned masters, that are masters +# that are left without working slaves. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working slaves. +# +# Slaves migrate to orphaned masters only if there are still at least a +# given number of other working slaves for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a slave +# will migrate only if there is at least 1 other working slave for its master +# and so forth. It usually reflects the number of slaves you want for every +# master in your cluster. +# +# Default is 1 (slaves migrate only if their masters remain with at least +# one slave). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents slaves from trying to failover its +# master during master failures. However the master can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-slave-no-failover no + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following two options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-bus-port +# +# Each instruct the node about its address, client port, and cluster message +# bus port. The information is then published in the header of the bus packets +# so that other nodes will be able to correctly map the address of the node +# publishing the information. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usually. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-port 6379 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# slave -> slave clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited ot 512 mb. However you can change this limit +# here. +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A Special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +########################### ACTIVE DEFRAGMENTATION ####################### +# +# WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested +# even in production and manually tested by multiple engineers for some +# time. +# +# What is active defragmentation? +# ------------------------------- +# +# Active (online) defragmentation allows a Redis server to compact the +# spaces left between small allocations and deallocations of data in memory, +# thus allowing to reclaim back memory. +# +# Fragmentation is a natural process that happens with every allocator (but +# less so with Jemalloc, fortunately) and certain workloads. Normally a server +# restart is needed in order to lower the fragmentation, or at least to flush +# away all the data and create it again. However thanks to this feature +# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# in an "hot" way, while the server is running. +# +# Basically when the fragmentation is over a certain level (see the +# configuration options below) Redis will start to create new copies of the +# values in contiguous memory regions by exploiting certain specific Jemalloc +# features (in order to understand if an allocation is causing fragmentation +# and to allocate it in a better place), and at the same time, will release the +# old copies of the data. This process, repeated incrementally for all the keys +# will cause the fragmentation to drop back to normal values. +# +# Important things to understand: +# +# 1. This feature is disabled by default, and only works if you compiled Redis +# to use the copy of Jemalloc we ship with the source code of Redis. +# This is the default with Linux builds. +# +# 2. You never need to enable this feature if you don't have fragmentation +# issues. +# +# 3. Once you experience fragmentation, you can enable this feature when +# needed with the command "CONFIG SET activedefrag yes". +# +# The configuration parameters are able to fine tune the behavior of the +# defragmentation process. If you are not sure about what they mean it is +# a good idea to leave the defaults untouched. + +# Enabled active defragmentation +# activedefrag yes + +# Minimum amount of fragmentation waste to start active defrag +# active-defrag-ignore-bytes 100mb + +# Minimum percentage of fragmentation to start active defrag +# active-defrag-threshold-lower 10 + +# Maximum percentage of fragmentation at which we use maximum effort +# active-defrag-threshold-upper 100 + +# Minimal effort for defrag in CPU percentage +# active-defrag-cycle-min 25 + +# Maximal effort for defrag in CPU percentage +# active-defrag-cycle-max 75 From 2705cbbf450a3ac99db6e347314a723f2daa6137 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Aug 2020 23:33:02 -0400 Subject: [PATCH 162/376] jruby ssl fun --- salt/elasticsearch/files/scripts/so-catrust | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/files/scripts/so-catrust b/salt/elasticsearch/files/scripts/so-catrust index 1a6144aca..68930777f 100644 --- a/salt/elasticsearch/files/scripts/so-catrust +++ b/salt/elasticsearch/files/scripts/so-catrust @@ -22,11 +22,11 @@ if [ ! -f /opt/so/saltstack/local/salt/common/cacerts ]; then docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-elasticsearchca --user root --entrypoint keytool {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-logstash:{{ VERSION }} -keystore /etc/pki/ca-trust/extracted/java/cacerts -alias SOSCA -import -file /etc/pki/ca.crt -storepass changeit -noprompt docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/java/cacerts /opt/so/saltstack/local/salt/common/cacerts - docker cp so-elasticsearchca:/etc/pki/tls/certs/ca-bundle.crt /opt/so/saltstack/local/salt/common/ca-bundle.crt + docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/common/tls-ca-bundle.pem docker rm so-elasticsearchca echo "" >> /opt/so/saltstack/local/salt/common/ca-bundle.crt echo "sosca" >> /opt/so/saltstack/local/salt/common/ca-bundle.crt - echo /etc/pki/ca.crt >> /opt/so/saltstack/local/salt/common/ca-bundle.crt + echo $(cat /etc/pki/ca.crt) >> /opt/so/saltstack/local/salt/common/ca-bundle.crt else exit 0 fi \ No newline at end of file From 64af6f99e9e8651b989492eaadeb886339d5f171 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Aug 2020 23:34:55 -0400 Subject: [PATCH 163/376] jruby ssl fun --- salt/elasticsearch/files/scripts/so-catrust | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/scripts/so-catrust b/salt/elasticsearch/files/scripts/so-catrust index 68930777f..82f89bcf4 100644 --- a/salt/elasticsearch/files/scripts/so-catrust +++ b/salt/elasticsearch/files/scripts/so-catrust @@ -26,7 +26,7 @@ if [ ! -f /opt/so/saltstack/local/salt/common/cacerts ]; then docker rm so-elasticsearchca echo "" >> /opt/so/saltstack/local/salt/common/ca-bundle.crt echo "sosca" >> /opt/so/saltstack/local/salt/common/ca-bundle.crt - echo $(cat /etc/pki/ca.crt) >> /opt/so/saltstack/local/salt/common/ca-bundle.crt + cat /etc/pki/ca.crt >> /opt/so/saltstack/local/salt/common/ca-bundle.crt else exit 0 fi \ No newline at end of file From d171adb9c94e8dea8ab389fdc3da5eae53de58ff Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Aug 2020 23:39:13 -0400 Subject: [PATCH 164/376] jruby ssl fun --- salt/elasticsearch/files/scripts/so-catrust | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/elasticsearch/files/scripts/so-catrust b/salt/elasticsearch/files/scripts/so-catrust index 82f89bcf4..02ea12726 100644 --- a/salt/elasticsearch/files/scripts/so-catrust +++ b/salt/elasticsearch/files/scripts/so-catrust @@ -24,9 +24,9 @@ if [ ! -f /opt/so/saltstack/local/salt/common/cacerts ]; then docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/java/cacerts /opt/so/saltstack/local/salt/common/cacerts docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/common/tls-ca-bundle.pem docker rm so-elasticsearchca - echo "" >> /opt/so/saltstack/local/salt/common/ca-bundle.crt - echo "sosca" >> /opt/so/saltstack/local/salt/common/ca-bundle.crt - cat /etc/pki/ca.crt >> /opt/so/saltstack/local/salt/common/ca-bundle.crt + echo "" >> /opt/so/saltstack/local/salt/common/tls-ca-bundle.pem + echo "sosca" >> /opt/so/saltstack/local/salt/common/tls-ca-bundle.pem + cat /etc/pki/ca.crt >> /opt/so/saltstack/local/salt/common/tls-ca-bundle.pem else exit 0 fi \ No newline at end of file From 5e3d21c43c85dd8a3a07833bdb856f7c9642ea03 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 7 Aug 2020 23:50:14 -0400 Subject: [PATCH 165/376] Wrap minio keys with quotes to ensure YAML parsing --- setup/so-functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index aee2039af..7253856ba 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1121,8 +1121,8 @@ manager_global() { " close: 365"\ " delete: 45"\ "minio:"\ - " access_key: $ACCESS_KEY"\ - " access_secret: $ACCESS_SECRET"\ + " access_key: '$ACCESS_KEY'"\ + " access_secret: '$ACCESS_SECRET'"\ "s3_settings:"\ " size_file: 2048"\ " time_file: 1"\ From ec1065462c623633e442ad0c8d74b728b822d5a2 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Aug 2020 23:50:26 -0400 Subject: [PATCH 166/376] jruby ssl fun --- salt/elasticsearch/init.sls | 7 +++++++ salt/logstash/init.sls | 1 + 2 files changed, 8 insertions(+) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index e0a8b0a94..b9f4894e9 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -67,6 +67,13 @@ cacertz: - user: 939 - group: 939 +capemz: + file.managed: + - name: /opt/so/conf/ca/tls-ca-bundle.pem + - source: salt://common/tls-ca-bundle.pem + - user: 939 + - group: 939 + # Add ES Group elasticsearchgroup: group.present: diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index 6cdecbc47..0cd50b1ab 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -167,6 +167,7 @@ so-logstash: - /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro - /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro + - /opt/so/conf/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem {%- if grains['role'] == 'so-eval' %} - /nsm/zeek:/nsm/zeek:ro - /nsm/suricata:/suricata:ro From 20dba6eaacd92aaf90e0b732f2c9fd5d674d4de9 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Aug 2020 23:56:09 -0400 Subject: [PATCH 167/376] jruby ssl fun --- salt/logstash/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index 0cd50b1ab..9f9a5c51b 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -167,7 +167,7 @@ so-logstash: - /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro - /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - - /opt/so/conf/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem + - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem {%- if grains['role'] == 'so-eval' %} - /nsm/zeek:/nsm/zeek:ro - /nsm/suricata:/suricata:ro From d1c4e3d021d29350152e26a8513cd568ed2a84a4 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sat, 8 Aug 2020 00:15:36 -0400 Subject: [PATCH 168/376] generate redis key --- salt/ssl/init.sls | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index d7c84675e..93af08048 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -216,6 +216,41 @@ miniokeyperms: - mode: 640 - group: 939 +/etc/pki/redis.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/redis.key') -%} + - prereq: + - x509: /etc/pki/redis.crt + {%- endif %} + +# Create a cert for the docker registry +/etc/pki/redis.crt: + x509.certificate_managed: + - ca_server: {{ ca_server }} + - signing_policy: registry + - public_key: /etc/pki/redis.key + - CN: {{ manager }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/redis.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' + +miniokeyperms: + file.managed: + - replace: False + - name: /etc/pki/redis.key + - mode: 640 + - group: 939 + /etc/pki/managerssl.key: x509.private_key_managed: - CN: {{ manager }} From dc12cacee062e29f46e8e00e5bf6f26ac5904b0a Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sat, 8 Aug 2020 00:16:38 -0400 Subject: [PATCH 169/376] generate redis key --- salt/ssl/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 93af08048..9691c861f 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -244,7 +244,7 @@ miniokeyperms: # Will trigger 5 days (432000 sec) from cert expiration - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/redis.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' -miniokeyperms: +rediskeyperms: file.managed: - replace: False - name: /etc/pki/redis.key From 8a50768e1654b3d0c0b1b4c04f189161dd8960c6 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sat, 8 Aug 2020 00:19:55 -0400 Subject: [PATCH 170/376] redis binds --- salt/redis/etc/redis.conf | 2 +- salt/redis/init.sls | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/salt/redis/etc/redis.conf b/salt/redis/etc/redis.conf index aa8d69eb6..857656b87 100644 --- a/salt/redis/etc/redis.conf +++ b/salt/redis/etc/redis.conf @@ -87,7 +87,7 @@ bind 0.0.0.0 # are explicitly listed using the "bind" directive. protected-mode no tls-cert-file /certs/redis.crt -tls-key-file /certs/to/redis.key +tls-key-file /certs/redis.key tls-ca-cert-file /certs/ca.crt tls-port 6380 diff --git a/salt/redis/init.sls b/salt/redis/init.sls index 4864fc8a2..02a7db4e3 100644 --- a/salt/redis/init.sls +++ b/salt/redis/init.sls @@ -57,6 +57,9 @@ so-redis: - /opt/so/log/redis:/var/log/redis:rw - /opt/so/conf/redis/etc/redis.conf:/usr/local/etc/redis/redis.conf:ro - /opt/so/conf/redis/working:/redis:rw + - /etc/pki/redis.crt:/certs/redis.crt:ro + - /etc/pki/redis.key:/certs/redis.key:ro + - /etc/pki/ca.crt:/certs/ca.crt:ro - entrypoint: "redis-server /usr/local/etc/redis/redis.conf" - watch: - file: /opt/so/conf/redis/etc From 26a095a89cd161100cdabb1759886f9470d3d4de Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sat, 8 Aug 2020 00:20:46 -0400 Subject: [PATCH 171/376] redis binds --- salt/redis/init.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/redis/init.sls b/salt/redis/init.sls index 02a7db4e3..6969883dd 100644 --- a/salt/redis/init.sls +++ b/salt/redis/init.sls @@ -53,6 +53,7 @@ so-redis: - user: socore - port_bindings: - 0.0.0.0:6379:6379 + - 0.0.0.0:6380:6380 - binds: - /opt/so/log/redis:/var/log/redis:rw - /opt/so/conf/redis/etc/redis.conf:/usr/local/etc/redis/redis.conf:ro From f840c85a4617d8288596d83fed401aba999c3ac5 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sat, 8 Aug 2020 17:31:59 -0400 Subject: [PATCH 172/376] make script run --- salt/elasticsearch/init.sls | 5 + salt/redis/etc/redis.conf | 2 +- salt/redis/etc/redis.conf.5 | 1316 ----------------------------------- 3 files changed, 6 insertions(+), 1317 deletions(-) delete mode 100644 salt/redis/etc/redis.conf.5 diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index b9f4894e9..5f87a430c 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -50,6 +50,11 @@ cascriptsync: - mode: 750 - template: jinja +# Run the CA magic +cascriptfun: + cmd.run: + - name: /usr/sbin/so-catrust + {% endif %} # Move our new CA over so Elastic and Logstash can use SSL with the internal CA diff --git a/salt/redis/etc/redis.conf b/salt/redis/etc/redis.conf index 857656b87..6ee29b440 100644 --- a/salt/redis/etc/redis.conf +++ b/salt/redis/etc/redis.conf @@ -258,7 +258,7 @@ loglevel notice # Specify the log file name. Also the empty string can be used to force # Redis to log on the standard output. Note that if you use standard # output for logging but daemonize, logs will be sent to /dev/null -logfile "" +logfile "/var/log/redis/redis-server.log" # To enable logging to the system logger, just set 'syslog-enabled' to yes, # and optionally update the other syslog parameters to suit your needs. diff --git a/salt/redis/etc/redis.conf.5 b/salt/redis/etc/redis.conf.5 deleted file mode 100644 index d5f39da99..000000000 --- a/salt/redis/etc/redis.conf.5 +++ /dev/null @@ -1,1316 +0,0 @@ -# Redis configuration file example. -# -# Note that in order to read the configuration file, Redis must be -# started with the file path as first argument: -# -# ./redis-server /path/to/redis.conf - -# Note on units: when memory size is needed, it is possible to specify -# it in the usual form of 1k 5GB 4M and so forth: -# -# 1k => 1000 bytes -# 1kb => 1024 bytes -# 1m => 1000000 bytes -# 1mb => 1024*1024 bytes -# 1g => 1000000000 bytes -# 1gb => 1024*1024*1024 bytes -# -# units are case insensitive so 1GB 1Gb 1gB are all the same. - -################################## INCLUDES ################################### - -# Include one or more other config files here. This is useful if you -# have a standard template that goes to all Redis servers but also need -# to customize a few per-server settings. Include files can include -# other files, so use this wisely. -# -# Notice option "include" won't be rewritten by command "CONFIG REWRITE" -# from admin or Redis Sentinel. Since Redis always uses the last processed -# line as value of a configuration directive, you'd better put includes -# at the beginning of this file to avoid overwriting config change at runtime. -# -# If instead you are interested in using includes to override configuration -# options, it is better to use include as the last line. -# -# include /path/to/local.conf -# include /path/to/other.conf - -################################## MODULES ##################################### - -# Load modules at startup. If the server is not able to load modules -# it will abort. It is possible to use multiple loadmodule directives. -# -# loadmodule /path/to/my_module.so -# loadmodule /path/to/other_module.so - -################################## NETWORK ##################################### - -# By default, if no "bind" configuration directive is specified, Redis listens -# for connections from all the network interfaces available on the server. -# It is possible to listen to just one or multiple selected interfaces using -# the "bind" configuration directive, followed by one or more IP addresses. -# -# Examples: -# -# bind 192.168.1.100 10.0.0.1 -# bind 127.0.0.1 ::1 -# -# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the -# internet, binding to all the interfaces is dangerous and will expose the -# instance to everybody on the internet. So by default we uncomment the -# following bind directive, that will force Redis to listen only into -# the IPv4 lookback interface address (this means Redis will be able to -# accept connections only from clients running into the same computer it -# is running). -# -# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES -# JUST COMMENT THE FOLLOWING LINE. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -bind 0.0.0.0 - -# Protected mode is a layer of security protection, in order to avoid that -# Redis instances left open on the internet are accessed and exploited. -# -# When protected mode is on and if: -# -# 1) The server is not binding explicitly to a set of addresses using the -# "bind" directive. -# 2) No password is configured. -# -# The server only accepts connections from clients connecting from the -# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain -# sockets. -# -# By default protected mode is enabled. You should disable it only if -# you are sure you want clients from other hosts to connect to Redis -# even if no authentication is configured, nor a specific set of interfaces -# are explicitly listed using the "bind" directive. -protected-mode no - -# Accept connections on the specified port, default is 6379 (IANA #815344). -# If port 0 is specified Redis will not listen on a TCP socket. -port 6379 - -# TCP listen() backlog. -# -# In high requests-per-second environments you need an high backlog in order -# to avoid slow clients connections issues. Note that the Linux kernel -# will silently truncate it to the value of /proc/sys/net/core/somaxconn so -# make sure to raise both the value of somaxconn and tcp_max_syn_backlog -# in order to get the desired effect. -tcp-backlog 511 - -# Unix socket. -# -# Specify the path for the Unix socket that will be used to listen for -# incoming connections. There is no default, so Redis will not listen -# on a unix socket when not specified. -# -# unixsocket /tmp/redis.sock -# unixsocketperm 700 - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# TCP keepalive. -# -# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence -# of communication. This is useful for two reasons: -# -# 1) Detect dead peers. -# 2) Take the connection alive from the point of view of network -# equipment in the middle. -# -# On Linux, the specified value (in seconds) is the period used to send ACKs. -# Note that to close the connection the double of the time is needed. -# On other kernels the period depends on the kernel configuration. -# -# A reasonable value for this option is 300 seconds, which is the new -# Redis default starting with Redis 3.2.1. -tcp-keepalive 300 - -################################# GENERAL ##################################### - -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. -daemonize no - -# If you run Redis from upstart or systemd, Redis can interact with your -# supervision tree. Options: -# supervised no - no supervision interaction -# supervised upstart - signal upstart by putting Redis into SIGSTOP mode -# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET -# supervised auto - detect upstart or systemd method based on -# UPSTART_JOB or NOTIFY_SOCKET environment variables -# Note: these supervision methods only signal "process is ready." -# They do not enable continuous liveness pings back to your supervisor. -supervised no - -# If a pid file is specified, Redis writes it where specified at startup -# and removes it at exit. -# -# When the server runs non daemonized, no pid file is created if none is -# specified in the configuration. When the server is daemonized, the pid file -# is used even if not specified, defaulting to "/var/run/redis.pid". -# -# Creating a pid file is best effort: if Redis is not able to create it -# nothing bad happens, the server will start and run normally. -pidfile /var/run/redis_6379.pid - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel notice - -# Specify the log file name. Also the empty string can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile "/var/log/redis/redis-server.log" - -# To enable logging to the system logger, just set 'syslog-enabled' to yes, -# and optionally update the other syslog parameters to suit your needs. -# syslog-enabled no - -# Specify the syslog identity. -# syslog-ident redis - -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -# syslog-facility local0 - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -# By default Redis shows an ASCII art logo only when started to log to the -# standard output and if the standard output is a TTY. Basically this means -# that normally a logo is displayed only in interactive sessions. -# -# However it is possible to force the pre-4.0 behavior and always show a -# ASCII art logo in startup logs by setting the following option to yes. -always-show-logo yes - -################################ SNAPSHOTTING ################################ -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving completely by commenting out all "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -save 900 1 -save 300 10 -save 60 10000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in a hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# disaster will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usual even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir /redis - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. A few things to understand ASAP about Redis replication. -# -# 1) Redis replication is asynchronous, but you can configure a master to -# stop accepting writes if it appears to be not connected with at least -# a given number of slaves. -# 2) Redis slaves are able to perform a partial resynchronization with the -# master if the replication link is lost for a relatively small amount of -# time. You may want to configure the replication backlog size (see the next -# sections of this file) with a sensible value depending on your needs. -# 3) Replication is automatic and does not need user intervention. After a -# network partition slaves automatically try to reconnect to masters -# and resynchronize with them. -# -# slaveof - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -slave-serve-stale-data yes - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes - -# Replication SYNC strategy: disk or socket. -# -# ------------------------------------------------------- -# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY -# ------------------------------------------------------- -# -# New slaves and reconnecting slaves that are not able to continue the replication -# process just receiving differences, need to do what is called a "full -# synchronization". An RDB file is transmitted from the master to the slaves. -# The transmission can happen in two different ways: -# -# 1) Disk-backed: The Redis master creates a new process that writes the RDB -# file on disk. Later the file is transferred by the parent -# process to the slaves incrementally. -# 2) Diskless: The Redis master creates a new process that directly writes the -# RDB file to slave sockets, without touching the disk at all. -# -# With disk-backed replication, while the RDB file is generated, more slaves -# can be queued and served with the RDB file as soon as the current child producing -# the RDB file finishes its work. With diskless replication instead once -# the transfer starts, new slaves arriving will be queued and a new transfer -# will start when the current one terminates. -# -# When diskless replication is used, the master waits a configurable amount of -# time (in seconds) before starting the transfer in the hope that multiple slaves -# will arrive and the transfer can be parallelized. -# -# With slow disks and fast (large bandwidth) networks, diskless replication -# works better. -repl-diskless-sync no - -# When diskless replication is enabled, it is possible to configure the delay -# the server waits in order to spawn the child that transfers the RDB via socket -# to the slaves. -# -# This is important since once the transfer starts, it is not possible to serve -# new slaves arriving, that will be queued for the next RDB transfer, so the server -# waits a delay in order to let more slaves arrive. -# -# The delay is specified in seconds, and by default is 5 seconds. To disable -# it entirely just set it to 0 seconds and the transfer will start ASAP. -repl-diskless-sync-delay 5 - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# The following option sets the replication timeout for: -# -# 1) Bulk transfer I/O during SYNC, from the point of view of slave. -# 2) Master timeout from the point of view of slaves (data, pings). -# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# Disable TCP_NODELAY on the slave socket after SYNC? -# -# If you select "yes" Redis will use a smaller number of TCP packets and -# less bandwidth to send data to slaves. But this can add a delay for -# the data to appear on the slave side, up to 40 milliseconds with -# Linux kernels using a default configuration. -# -# If you select "no" the delay for data to appear on the slave side will -# be reduced but more bandwidth will be used for replication. -# -# By default we optimize for low latency, but in very high traffic conditions -# or when the master and slaves are many hops away, turning this to "yes" may -# be a good idea. -repl-disable-tcp-nodelay no - -# Set the replication backlog size. The backlog is a buffer that accumulates -# slave data when slaves are disconnected for some time, so that when a slave -# wants to reconnect again, often a full resync is not needed, but a partial -# resync is enough, just passing the portion of data the slave missed while -# disconnected. -# -# The bigger the replication backlog, the longer the time the slave can be -# disconnected and later be able to perform a partial resynchronization. -# -# The backlog is only allocated once there is at least a slave connected. -# -# repl-backlog-size 1mb - -# After a master has no longer connected slaves for some time, the backlog -# will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last slave disconnected, for -# the backlog buffer to be freed. -# -# Note that slaves never free the backlog for timeout, since they may be -# promoted to masters later, and should be able to correctly "partially -# resynchronize" with the slaves: hence they should always accumulate backlog. -# -# A value of 0 means to never release the backlog. -# -# repl-backlog-ttl 3600 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one with priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 - -# It is possible for a master to stop accepting writes if there are less than -# N slaves connected, having a lag less or equal than M seconds. -# -# The N slaves need to be in "online" state. -# -# The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the slave, that is usually sent every second. -# -# This option does not GUARANTEE that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough slaves -# are available, to the specified number of seconds. -# -# For example to require at least 3 slaves with a lag <= 10 seconds use: -# -# min-slaves-to-write 3 -# min-slaves-max-lag 10 -# -# Setting one or the other to 0 disables the feature. -# -# By default min-slaves-to-write is set to 0 (feature disabled) and -# min-slaves-max-lag is set to 10. - -# A Redis master is able to list the address and port of the attached -# slaves in different ways. For example the "INFO replication" section -# offers this information, which is used, among other tools, by -# Redis Sentinel in order to discover slave instances. -# Another place where this info is available is in the output of the -# "ROLE" command of a master. -# -# The listed IP and address normally reported by a slave is obtained -# in the following way: -# -# IP: The address is auto detected by checking the peer address -# of the socket used by the slave to connect with the master. -# -# Port: The port is communicated by the slave during the replication -# handshake, and is normally the port that the slave is using to -# list for connections. -# -# However when port forwarding or Network Address Translation (NAT) is -# used, the slave may be actually reachable via different IP and port -# pairs. The following two options can be used by a slave in order to -# report to its master a specific set of IP and port, so that both INFO -# and ROLE will report those values. -# -# There is no need to use both the options if you need to override just -# the port or the IP address. -# -# slave-announce-ip 5.5.5.5 -# slave-announce-port 1234 - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -# requirepass foobared - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### CLIENTS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -############################## MEMORY MANAGEMENT ################################ - -# Set a memory usage limit to the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# according to the eviction policy selected (see maxmemory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -# This option is usually useful when using Redis as an LRU or LFU cache, or to -# set a hard memory limit for an instance (using the 'noeviction' policy). -# -# WARNING: If you have slaves attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the slaves are subtracted -# from the used memory count, so that network problems / resyncs will -# not trigger a loop where keys are evicted, and in turn the output -# buffer of slaves is full with DELs of keys evicted triggering the deletion -# of more keys, and so forth until the database is completely emptied. -# -# In short... if you have slaves attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for slave -# output buffers (but this is not needed if the policy is 'noeviction'). -# -maxmemory 817m - -# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# -# volatile-lru -> Evict using approximated LRU among the keys with an expire set. -# allkeys-lru -> Evict any key using approximated LRU. -# volatile-lfu -> Evict using approximated LFU among the keys with an expire set. -# allkeys-lfu -> Evict any key using approximated LFU. -# volatile-random -> Remove a random key among the ones with an expire set. -# allkeys-random -> Remove a random key, any key. -# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) -# noeviction -> Don't evict anything, just return an error on write operations. -# -# LRU means Least Recently Used -# LFU means Least Frequently Used -# -# Both LRU, LFU and volatile-ttl are implemented using approximated -# randomized algorithms. -# -# Note: with any of the above policies, Redis will return an error on write -# operations, when there are no suitable keys for eviction. -# -# At the date of writing these commands are: set setnx setex append -# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd -# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby -# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby -# getset mset msetnx exec sort -# -# The default is: -# -maxmemory-policy noeviction - -# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can tune it for speed or -# accuracy. For default Redis will check five keys and pick the one that was -# used less recently, you can change the sample size using the following -# configuration directive. -# -# The default of 5 produces good enough results. 10 Approximates very closely -# true LRU but costs more CPU. 3 is faster but not very accurate. -# -# maxmemory-samples 5 - -############################# LAZY FREEING #################################### - -# Redis has two primitives to delete keys. One is called DEL and is a blocking -# deletion of the object. It means that the server stops processing new commands -# in order to reclaim all the memory associated with an object in a synchronous -# way. If the key deleted is associated with a small object, the time needed -# in order to execute the DEL command is very small and comparable to most other -# O(1) or O(log_N) commands in Redis. However if the key is associated with an -# aggregated value containing millions of elements, the server can block for -# a long time (even seconds) in order to complete the operation. -# -# For the above reasons Redis also offers non blocking deletion primitives -# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and -# FLUSHDB commands, in order to reclaim memory in background. Those commands -# are executed in constant time. Another thread will incrementally free the -# object in the background as fast as possible. -# -# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. -# It's up to the design of the application to understand when it is a good -# idea to use one or the other. However the Redis server sometimes has to -# delete keys or flush the whole database as a side effect of other operations. -# Specifically Redis deletes objects independently of a user call in the -# following scenarios: -# -# 1) On eviction, because of the maxmemory and maxmemory policy configurations, -# in order to make room for new data, without going over the specified -# memory limit. -# 2) Because of expire: when a key with an associated time to live (see the -# EXPIRE command) must be deleted from memory. -# 3) Because of a side effect of a command that stores data on a key that may -# already exist. For example the RENAME command may delete the old key -# content when it is replaced with another one. Similarly SUNIONSTORE -# or SORT with STORE option may delete existing keys. The SET command -# itself removes any old content of the specified key in order to replace -# it with the specified string. -# 4) During replication, when a slave performs a full resynchronization with -# its master, the content of the whole database is removed in order to -# load the RDB file just transfered. -# -# In all the above cases the default is to delete objects in a blocking way, -# like if DEL was called. However you can configure each case specifically -# in order to instead release memory in a non-blocking way like if UNLINK -# was called, using the following configuration directives: - -lazyfree-lazy-eviction no -lazyfree-lazy-expire no -lazyfree-lazy-server-del no -slave-lazy-flush no - -############################## APPEND ONLY MODE ############################### - -# By default Redis asynchronously dumps the dataset on disk. This mode is -# good enough in many applications, but an issue with the Redis process or -# a power outage may result into a few minutes of writes lost (depending on -# the configured save points). -# -# The Append Only File is an alternative persistence mode that provides -# much better durability. For instance using the default data fsync policy -# (see later in the config file) Redis can lose just one second of writes in a -# dramatic event like a server power outage, or a single write if something -# wrong with the Redis process itself happens, but the operating system is -# still running correctly. -# -# AOF and RDB persistence can be enabled at the same time without problems. -# If the AOF is enabled on startup Redis will load the AOF, that is the file -# with the better durability guarantees. -# -# Please check http://redis.io/topics/persistence for more information. - -appendonly no - -# The name of the append only file (default: "appendonly.aof") - -appendfilename "appendonly.aof" - -# The fsync() call tells the Operating System to actually write data on disk -# instead of waiting for more data in the output buffer. Some OS will really flush -# data on disk, some other OS will just try to do it ASAP. -# -# Redis supports three different modes: -# -# no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log. Slow, Safest. -# everysec: fsync only one time every second. Compromise. -# -# The default is "everysec", as that's usually the right compromise between -# speed and data safety. It's up to you to understand if you can relax this to -# "no" that will let the operating system flush the output buffer when -# it wants, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting), -# or on the contrary, use "always" that's very slow but a bit safer than -# everysec. -# -# More details please check the following article: -# http://antirez.com/post/redis-persistence-demystified.html -# -# If unsure, use "everysec". - -# appendfsync always -appendfsync everysec -# appendfsync no - -# When the AOF fsync policy is set to always or everysec, and a background -# saving process (a background save or AOF log background rewriting) is -# performing a lot of I/O against the disk, in some Linux configurations -# Redis may block too long on the fsync() call. Note that there is no fix for -# this currently, as even performing fsync in a different thread will block -# our synchronous write(2) call. -# -# In order to mitigate this problem it's possible to use the following option -# that will prevent fsync() from being called in the main process while a -# BGSAVE or BGREWRITEAOF is in progress. -# -# This means that while another child is saving, the durability of Redis is -# the same as "appendfsync none". In practical terms, this means that it is -# possible to lose up to 30 seconds of log in the worst scenario (with the -# default Linux settings). -# -# If you have latency problems turn this to "yes". Otherwise leave it as -# "no" that is the safest pick from the point of view of durability. - -no-appendfsync-on-rewrite no - -# Automatic rewrite of the append only file. -# Redis is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size grows by the specified percentage. -# -# This is how it works: Redis remembers the size of the AOF file after the -# latest rewrite (if no rewrite has happened since the restart, the size of -# the AOF at startup is used). -# -# This base size is compared to the current size. If the current size is -# bigger than the specified percentage, the rewrite is triggered. Also -# you need to specify a minimal size for the AOF file to be rewritten, this -# is useful to avoid rewriting the AOF file even if the percentage increase -# is reached but it is still pretty small. -# -# Specify a percentage of zero in order to disable the automatic AOF -# rewrite feature. - -auto-aof-rewrite-percentage 100 -auto-aof-rewrite-min-size 64mb - -# An AOF file may be found to be truncated at the end during the Redis -# startup process, when the AOF data gets loaded back into memory. -# This may happen when the system where Redis is running -# crashes, especially when an ext4 filesystem is mounted without the -# data=ordered option (however this can't happen when Redis itself -# crashes or aborts but the operating system still works correctly). -# -# Redis can either exit with an error when this happens, or load as much -# data as possible (the default now) and start if the AOF file is found -# to be truncated at the end. The following option controls this behavior. -# -# If aof-load-truncated is set to yes, a truncated AOF file is loaded and -# the Redis server starts emitting a log to inform the user of the event. -# Otherwise if the option is set to no, the server aborts with an error -# and refuses to start. When the option is set to no, the user requires -# to fix the AOF file using the "redis-check-aof" utility before to restart -# the server. -# -# Note that if the AOF file will be found to be corrupted in the middle -# the server will still exit with an error. This option only applies when -# Redis will try to read more data from the AOF file but not enough bytes -# will be found. -aof-load-truncated yes - -# When rewriting the AOF file, Redis is able to use an RDB preamble in the -# AOF file for faster rewrites and recoveries. When this option is turned -# on the rewritten AOF file is composed of two different stanzas: -# -# [RDB file][AOF tail] -# -# When loading Redis recognizes that the AOF file starts with the "REDIS" -# string and loads the prefixed RDB file, and continues loading the AOF -# tail. -# -# This is currently turned off by default in order to avoid the surprise -# of a format change, but will at some point be used as the default. -aof-use-rdb-preamble no - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceeds the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write command was -# already issued by the script but the user doesn't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -lua-time-limit 5000 - -################################ REDIS CLUSTER ############################### -# -# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however -# in order to mark it as "mature" we need to wait for a non trivial percentage -# of users to deploy it in production. -# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# Normal Redis instances can't be part of a Redis Cluster; only nodes that are -# started as cluster nodes can. In order to start a Redis instance as a -# cluster node enable the cluster support uncommenting the following: -# -# cluster-enabled yes - -# Every cluster node has a cluster configuration file. This file is not -# intended to be edited by hand. It is created and updated by Redis nodes. -# Every Redis Cluster node requires a different cluster configuration file. -# Make sure that instances running in the same system do not have -# overlapping cluster configuration file names. -# -# cluster-config-file nodes-6379.conf - -# Cluster node timeout is the amount of milliseconds a node must be unreachable -# for it to be considered in failure state. -# Most other internal time limits are multiple of the node timeout. -# -# cluster-node-timeout 15000 - -# A slave of a failing master will avoid to start a failover if its data -# looks too old. -# -# There is no simple way for a slave to actually have an exact measure of -# its "data age", so the following two checks are performed: -# -# 1) If there are multiple slaves able to failover, they exchange messages -# in order to try to give an advantage to the slave with the best -# replication offset (more data from the master processed). -# Slaves will try to get their rank by offset, and apply to the start -# of the failover a delay proportional to their rank. -# -# 2) Every single slave computes the time of the last interaction with -# its master. This can be the last ping or command received (if the master -# is still in the "connected" state), or the time that elapsed since the -# disconnection with the master (if the replication link is currently down). -# If the last interaction is too old, the slave will not try to failover -# at all. -# -# The point "2" can be tuned by user. Specifically a slave will not perform -# the failover if, since the last interaction with the master, the time -# elapsed is greater than: -# -# (node-timeout * slave-validity-factor) + repl-ping-slave-period -# -# So for example if node-timeout is 30 seconds, and the slave-validity-factor -# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the -# slave will not try to failover if it was not able to talk with the master -# for longer than 310 seconds. -# -# A large slave-validity-factor may allow slaves with too old data to failover -# a master, while a too small value may prevent the cluster from being able to -# elect a slave at all. -# -# For maximum availability, it is possible to set the slave-validity-factor -# to a value of 0, which means, that slaves will always try to failover the -# master regardless of the last time they interacted with the master. -# (However they'll always try to apply a delay proportional to their -# offset rank). -# -# Zero is the only value able to guarantee that when all the partitions heal -# the cluster will always be able to continue. -# -# cluster-slave-validity-factor 10 - -# Cluster slaves are able to migrate to orphaned masters, that are masters -# that are left without working slaves. This improves the cluster ability -# to resist to failures as otherwise an orphaned master can't be failed over -# in case of failure if it has no working slaves. -# -# Slaves migrate to orphaned masters only if there are still at least a -# given number of other working slaves for their old master. This number -# is the "migration barrier". A migration barrier of 1 means that a slave -# will migrate only if there is at least 1 other working slave for its master -# and so forth. It usually reflects the number of slaves you want for every -# master in your cluster. -# -# Default is 1 (slaves migrate only if their masters remain with at least -# one slave). To disable migration just set it to a very large value. -# A value of 0 can be set but is useful only for debugging and dangerous -# in production. -# -# cluster-migration-barrier 1 - -# By default Redis Cluster nodes stop accepting queries if they detect there -# is at least an hash slot uncovered (no available node is serving it). -# This way if the cluster is partially down (for example a range of hash slots -# are no longer covered) all the cluster becomes, eventually, unavailable. -# It automatically returns available as soon as all the slots are covered again. -# -# However sometimes you want the subset of the cluster which is working, -# to continue to accept queries for the part of the key space that is still -# covered. In order to do so, just set the cluster-require-full-coverage -# option to no. -# -# cluster-require-full-coverage yes - -# This option, when set to yes, prevents slaves from trying to failover its -# master during master failures. However the master can still perform a -# manual failover, if forced to do so. -# -# This is useful in different scenarios, especially in the case of multiple -# data center operations, where we want one side to never be promoted if not -# in the case of a total DC failure. -# -# cluster-slave-no-failover no - -# In order to setup your cluster make sure to read the documentation -# available at http://redis.io web site. - -########################## CLUSTER DOCKER/NAT support ######################## - -# In certain deployments, Redis Cluster nodes address discovery fails, because -# addresses are NAT-ted or because ports are forwarded (the typical case is -# Docker and other containers). -# -# In order to make Redis Cluster working in such environments, a static -# configuration where each node knows its public address is needed. The -# following two options are used for this scope, and are: -# -# * cluster-announce-ip -# * cluster-announce-port -# * cluster-announce-bus-port -# -# Each instruct the node about its address, client port, and cluster message -# bus port. The information is then published in the header of the bus packets -# so that other nodes will be able to correctly map the address of the node -# publishing the information. -# -# If the above options are not used, the normal Redis Cluster auto-detection -# will be used instead. -# -# Note that when remapped, the bus port may not be at the fixed offset of -# clients port + 10000, so you can specify any port and bus-port depending -# on how they get remapped. If the bus-port is not set, a fixed offset of -# 10000 will be used as usually. -# -# Example: -# -# cluster-announce-ip 10.1.1.5 -# cluster-announce-port 6379 -# cluster-announce-bus-port 6380 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -################################ LATENCY MONITOR ############################## - -# The Redis latency monitoring subsystem samples different operations -# at runtime in order to collect data related to possible sources of -# latency of a Redis instance. -# -# Via the LATENCY command this information is available to the user that can -# print graphs and obtain reports. -# -# The system only logs operations that were performed in a time equal or -# greater than the amount of milliseconds specified via the -# latency-monitor-threshold configuration directive. When its value is set -# to zero, the latency monitor is turned off. -# -# By default latency monitoring is disabled since it is mostly not needed -# if you don't have latency issues, and collecting data has a performance -# impact, that while very small, can be measured under big load. Latency -# monitoring can easily be enabled at runtime using the command -# "CONFIG SET latency-monitor-threshold " if needed. -latency-monitor-threshold 0 - -############################# EVENT NOTIFICATION ############################## - -# Redis can notify Pub/Sub clients about events happening in the key space. -# This feature is documented at http://redis.io/topics/notifications -# -# For instance if keyspace events notification is enabled, and a client -# performs a DEL operation on key "foo" stored in the Database 0, two -# messages will be published via Pub/Sub: -# -# PUBLISH __keyspace@0__:foo del -# PUBLISH __keyevent@0__:del foo -# -# It is possible to select the events that Redis will notify among a set -# of classes. Every class is identified by a single character: -# -# K Keyspace events, published with __keyspace@__ prefix. -# E Keyevent events, published with __keyevent@__ prefix. -# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... -# $ String commands -# l List commands -# s Set commands -# h Hash commands -# z Sorted set commands -# x Expired events (events generated every time a key expires) -# e Evicted events (events generated when a key is evicted for maxmemory) -# A Alias for g$lshzxe, so that the "AKE" string means all the events. -# -# The "notify-keyspace-events" takes as argument a string that is composed -# of zero or multiple characters. The empty string means that notifications -# are disabled. -# -# Example: to enable list and generic events, from the point of view of the -# event name, use: -# -# notify-keyspace-events Elg -# -# Example 2: to get the stream of the expired keys subscribing to channel -# name __keyevent@0__:expired use: -# -# notify-keyspace-events Ex -# -# By default all notifications are disabled because most users don't need -# this feature and the feature has some overhead. Note that if you don't -# specify at least one of K or E, no events will be delivered. -notify-keyspace-events "" - -############################### ADVANCED CONFIG ############################### - -# Hashes are encoded using a memory efficient data structure when they have a -# small number of entries, and the biggest entry does not exceed a given -# threshold. These thresholds can be configured using the following directives. -hash-max-ziplist-entries 512 -hash-max-ziplist-value 64 - -# Lists are also encoded in a special way to save a lot of space. -# The number of entries allowed per internal list node can be specified -# as a fixed maximum size or a maximum number of elements. -# For a fixed maximum size, use -5 through -1, meaning: -# -5: max size: 64 Kb <-- not recommended for normal workloads -# -4: max size: 32 Kb <-- not recommended -# -3: max size: 16 Kb <-- probably not recommended -# -2: max size: 8 Kb <-- good -# -1: max size: 4 Kb <-- good -# Positive numbers mean store up to _exactly_ that number of elements -# per list node. -# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), -# but if your use case is unique, adjust the settings as necessary. -list-max-ziplist-size -2 - -# Lists may also be compressed. -# Compress depth is the number of quicklist ziplist nodes from *each* side of -# the list to *exclude* from compression. The head and tail of the list -# are always uncompressed for fast push/pop operations. Settings are: -# 0: disable all list compression -# 1: depth 1 means "don't start compressing until after 1 node into the list, -# going from either the head or tail" -# So: [head]->node->node->...->node->[tail] -# [head], [tail] will always be uncompressed; inner nodes will compress. -# 2: [head]->[next]->node->node->...->node->[prev]->[tail] -# 2 here means: don't compress head or head->next or tail->prev or tail, -# but compress all nodes between them. -# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] -# etc. -list-compress-depth 0 - -# Sets have a special encoding in just one case: when a set is composed -# of just strings that happen to be integers in radix 10 in the range -# of 64 bit signed integers. -# The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -set-max-intset-entries 512 - -# Similarly to hashes and lists, sorted sets are also specially encoded in -# order to save a lot of space. This encoding is only used when the length and -# elements of a sorted set are below the following limits: -zset-max-ziplist-entries 128 -zset-max-ziplist-value 64 - -# HyperLogLog sparse representation bytes limit. The limit includes the -# 16 bytes header. When an HyperLogLog using the sparse representation crosses -# this limit, it is converted into the dense representation. -# -# A value greater than 16000 is totally useless, since at that point the -# dense representation is more memory efficient. -# -# The suggested value is ~ 3000 in order to have the benefits of -# the space efficient encoding without slowing down too much PFADD, -# which is O(N) with the sparse encoding. The value can be raised to -# ~ 10000 when CPU is not a concern, but space is, and the data set is -# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. -hll-sparse-max-bytes 3000 - -# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in -# order to help rehashing the main Redis hash table (the one mapping top-level -# keys to values). The hash table implementation Redis uses (see dict.c) -# performs a lazy rehashing: the more operation you run into a hash table -# that is rehashing, the more rehashing "steps" are performed, so if the -# server is idle the rehashing is never complete and some more memory is used -# by the hash table. -# -# The default is to use this millisecond 10 times every second in order to -# actively rehash the main dictionaries, freeing memory when possible. -# -# If unsure: -# use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that Redis can reply from time to time -# to queries with 2 milliseconds delay. -# -# use "activerehashing yes" if you don't have such hard requirements but -# want to free memory asap when possible. -activerehashing yes - -# The client output buffer limits can be used to force disconnection of clients -# that are not reading data from the server fast enough for some reason (a -# common reason is that a Pub/Sub client can't consume messages as fast as the -# publisher can produce them). -# -# The limit can be set differently for the three different classes of clients: -# -# normal -> normal clients including MONITOR clients -# slave -> slave clients -# pubsub -> clients subscribed to at least one pubsub channel or pattern -# -# The syntax of every client-output-buffer-limit directive is the following: -# -# client-output-buffer-limit -# -# A client is immediately disconnected once the hard limit is reached, or if -# the soft limit is reached and remains reached for the specified number of -# seconds (continuously). -# So for instance if the hard limit is 32 megabytes and the soft limit is -# 16 megabytes / 10 seconds, the client will get disconnected immediately -# if the size of the output buffers reach 32 megabytes, but will also get -# disconnected if the client reaches 16 megabytes and continuously overcomes -# the limit for 10 seconds. -# -# By default normal clients are not limited because they don't receive data -# without asking (in a push way), but just after a request, so only -# asynchronous clients may create a scenario where data is requested faster -# than it can read. -# -# Instead there is a default limit for pubsub and slave clients, since -# subscribers and slaves receive data in a push fashion. -# -# Both the hard or the soft limit can be disabled by setting them to zero. -client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit slave 256mb 64mb 60 -client-output-buffer-limit pubsub 32mb 8mb 60 - -# Client query buffers accumulate new commands. They are limited to a fixed -# amount by default in order to avoid that a protocol desynchronization (for -# instance due to a bug in the client) will lead to unbound memory usage in -# the query buffer. However you can configure it here if you have very special -# needs, such us huge multi/exec requests or alike. -# -# client-query-buffer-limit 1gb - -# In the Redis protocol, bulk requests, that are, elements representing single -# strings, are normally limited ot 512 mb. However you can change this limit -# here. -# -# proto-max-bulk-len 512mb - -# Redis calls an internal function to perform many background tasks, like -# closing connections of clients in timeout, purging expired keys that are -# never requested, and so forth. -# -# Not all tasks are performed with the same frequency, but Redis checks for -# tasks to perform according to the specified "hz" value. -# -# By default "hz" is set to 10. Raising the value will use more CPU when -# Redis is idle, but at the same time will make Redis more responsive when -# there are many keys expiring at the same time, and timeouts may be -# handled with more precision. -# -# The range is between 1 and 500, however a value over 100 is usually not -# a good idea. Most users should use the default of 10 and raise this up to -# 100 only in environments where very low latency is required. -hz 10 - -# When a child rewrites the AOF file, if the following option is enabled -# the file will be fsync-ed every 32 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -aof-rewrite-incremental-fsync yes - -# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good -# idea to start with the default settings and only change them after investigating -# how to improve the performances and how the keys LFU change over time, which -# is possible to inspect via the OBJECT FREQ command. -# -# There are two tunable parameters in the Redis LFU implementation: the -# counter logarithm factor and the counter decay time. It is important to -# understand what the two parameters mean before changing them. -# -# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis -# uses a probabilistic increment with logarithmic behavior. Given the value -# of the old counter, when a key is accessed, the counter is incremented in -# this way: -# -# 1. A random number R between 0 and 1 is extracted. -# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). -# 3. The counter is incremented only if R < P. -# -# The default lfu-log-factor is 10. This is a table of how the frequency -# counter changes with a different number of accesses with different -# logarithmic factors: -# -# +--------+------------+------------+------------+------------+------------+ -# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | -# +--------+------------+------------+------------+------------+------------+ -# | 0 | 104 | 255 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 1 | 18 | 49 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 10 | 10 | 18 | 142 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 100 | 8 | 11 | 49 | 143 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# -# NOTE: The above table was obtained by running the following commands: -# -# redis-benchmark -n 1000000 incr foo -# redis-cli object freq foo -# -# NOTE 2: The counter initial value is 5 in order to give new objects a chance -# to accumulate hits. -# -# The counter decay time is the time, in minutes, that must elapse in order -# for the key counter to be divided by two (or decremented if it has a value -# less <= 10). -# -# The default value for the lfu-decay-time is 1. A Special value of 0 means to -# decay the counter every time it happens to be scanned. -# -# lfu-log-factor 10 -# lfu-decay-time 1 - -########################### ACTIVE DEFRAGMENTATION ####################### -# -# WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested -# even in production and manually tested by multiple engineers for some -# time. -# -# What is active defragmentation? -# ------------------------------- -# -# Active (online) defragmentation allows a Redis server to compact the -# spaces left between small allocations and deallocations of data in memory, -# thus allowing to reclaim back memory. -# -# Fragmentation is a natural process that happens with every allocator (but -# less so with Jemalloc, fortunately) and certain workloads. Normally a server -# restart is needed in order to lower the fragmentation, or at least to flush -# away all the data and create it again. However thanks to this feature -# implemented by Oran Agra for Redis 4.0 this process can happen at runtime -# in an "hot" way, while the server is running. -# -# Basically when the fragmentation is over a certain level (see the -# configuration options below) Redis will start to create new copies of the -# values in contiguous memory regions by exploiting certain specific Jemalloc -# features (in order to understand if an allocation is causing fragmentation -# and to allocate it in a better place), and at the same time, will release the -# old copies of the data. This process, repeated incrementally for all the keys -# will cause the fragmentation to drop back to normal values. -# -# Important things to understand: -# -# 1. This feature is disabled by default, and only works if you compiled Redis -# to use the copy of Jemalloc we ship with the source code of Redis. -# This is the default with Linux builds. -# -# 2. You never need to enable this feature if you don't have fragmentation -# issues. -# -# 3. Once you experience fragmentation, you can enable this feature when -# needed with the command "CONFIG SET activedefrag yes". -# -# The configuration parameters are able to fine tune the behavior of the -# defragmentation process. If you are not sure about what they mean it is -# a good idea to leave the defaults untouched. - -# Enabled active defragmentation -# activedefrag yes - -# Minimum amount of fragmentation waste to start active defrag -# active-defrag-ignore-bytes 100mb - -# Minimum percentage of fragmentation to start active defrag -# active-defrag-threshold-lower 10 - -# Maximum percentage of fragmentation at which we use maximum effort -# active-defrag-threshold-upper 100 - -# Minimal effort for defrag in CPU percentage -# active-defrag-cycle-min 25 - -# Maximal effort for defrag in CPU percentage -# active-defrag-cycle-max 75 From 9708b02387d1be44c52ffa825d7921c646365277 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sat, 8 Aug 2020 18:32:36 -0400 Subject: [PATCH 173/376] update pipeline --- pillar/logstash/manager.sls | 7 ++----- pillar/logstash/search.sls | 4 ---- setup/so-functions | 2 +- 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/pillar/logstash/manager.sls b/pillar/logstash/manager.sls index dcf222ae4..6f3ba495b 100644 --- a/pillar/logstash/manager.sls +++ b/pillar/logstash/manager.sls @@ -1,12 +1,9 @@ -{%- set PIPELINE = salt['pillar.get']('global:pipeline', 'minio') %} +{%- set PIPELINE = salt['pillar.get']('global:pipeline', 'redis') %} logstash: pipelines: manager: config: - so/0009_input_beats.conf - so/0010_input_hhbeats.conf - {%- if PIPELINE == "minio"%} - - so/9998_output_minio.conf.jinja - {%- else %} - so/9999_output_redis.conf.jinja - {%- endif %} \ No newline at end of file + \ No newline at end of file diff --git a/pillar/logstash/search.sls b/pillar/logstash/search.sls index 22f73c5d4..7a5aeec39 100644 --- a/pillar/logstash/search.sls +++ b/pillar/logstash/search.sls @@ -3,11 +3,7 @@ logstash: pipelines: search: config: - {%- if PIPELINE == "minio"%} - - so/0899_input_minio.conf.jinja - {%- else %} - so/0900_input_redis.conf.jinja - {%- endif %} - so/9000_output_zeek.conf.jinja - so/9002_output_import.conf.jinja - so/9034_output_syslog.conf.jinja diff --git a/setup/so-functions b/setup/so-functions index 7253856ba..00d9b7e1e 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1049,7 +1049,7 @@ manager_global() { " wazuh: $WAZUH"\ " managerupdate: $MANAGERUPDATES"\ " imagerepo: $IMAGEREPO"\ - " pipeline: minio"\ + " pipeline: redis"\ "pcap:"\ " sensor_checkin_interval_ms: $SENSOR_CHECKIN_INTERVAL_MS"\ "strelka:"\ From f154d2fa78c49b2fd52e8478dbb4613c3ff83d4c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sat, 8 Aug 2020 20:04:19 -0400 Subject: [PATCH 174/376] Upodate SSL --- .../pipelines/config/so/0900_input_redis.conf.jinja | 8 +++----- .../pipelines/config/so/9999_output_redis.conf.jinja | 2 +- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja b/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja index 6e736f22f..b2b4dc864 100644 --- a/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja +++ b/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja @@ -1,13 +1,11 @@ -{%- if grains.role == 'so-heavynode' %} -{%- set MANAGER = salt['pillar.get']('elasticsearch:mainip', '') %} -{%- else %} -{%- set MANAGER = salt['pillar.get']('global:managerip', '') %} -{% endif -%} +{%- set MANAGER = salt['grains.get']('master') %} {%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %} input { redis { host => '{{ MANAGER }}' + port => 6380 + ssl => true data_type => 'list' key => 'logstash:unparsed' type => 'redis-input' diff --git a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja index 25620e501..4b38a684d 100644 --- a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja +++ b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja @@ -1,4 +1,4 @@ -{% set MANAGER = salt['pillar.get']('global:managerip', '') %} +{%- set MANAGER = salt['grains.get']('master') %} {% set BATCH = salt['pillar.get']('logstash_settings:ls_pipeline_batch_size', 125) %} output { redis { From 112dba454911d84bb5f4c06b1469dac3422c546c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sat, 8 Aug 2020 20:12:17 -0400 Subject: [PATCH 175/376] Upodate SSL --- salt/redis/etc/redis.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/redis/etc/redis.conf b/salt/redis/etc/redis.conf index 6ee29b440..46f0c46e8 100644 --- a/salt/redis/etc/redis.conf +++ b/salt/redis/etc/redis.conf @@ -90,6 +90,7 @@ tls-cert-file /certs/redis.crt tls-key-file /certs/redis.key tls-ca-cert-file /certs/ca.crt tls-port 6380 +tls-auth-clients no # Accept connections on the specified port, default is 6379 (IANA #815344). # If port 0 is specified Redis will not listen on a TCP socket. From 9248896a205b62632bb7c4aa2f1d6914791738d3 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sat, 8 Aug 2020 20:24:30 -0400 Subject: [PATCH 176/376] fix redis ports --- salt/firewall/portgroups.yaml | 2 +- salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja | 2 +- salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja | 2 +- salt/redis/etc/redis.conf | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/firewall/portgroups.yaml b/salt/firewall/portgroups.yaml index db7450364..8771df8ef 100644 --- a/salt/firewall/portgroups.yaml +++ b/salt/firewall/portgroups.yaml @@ -64,7 +64,7 @@ firewall: redis: tcp: - 6379 - - 6380 + - 9696 salt_manager: tcp: - 4505 diff --git a/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja b/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja index b2b4dc864..c98a2a388 100644 --- a/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja +++ b/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja @@ -4,7 +4,7 @@ input { redis { host => '{{ MANAGER }}' - port => 6380 + port => 9696 ssl => true data_type => 'list' key => 'logstash:unparsed' diff --git a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja index 4b38a684d..5505ca636 100644 --- a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja +++ b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja @@ -3,7 +3,7 @@ output { redis { host => '{{ MANAGER }}' - port => 6380 + port => 9696 data_type => 'list' key => 'logstash:unparsed' congestion_interval => 1 diff --git a/salt/redis/etc/redis.conf b/salt/redis/etc/redis.conf index 46f0c46e8..cf43bc04c 100644 --- a/salt/redis/etc/redis.conf +++ b/salt/redis/etc/redis.conf @@ -89,7 +89,7 @@ protected-mode no tls-cert-file /certs/redis.crt tls-key-file /certs/redis.key tls-ca-cert-file /certs/ca.crt -tls-port 6380 +tls-port 9696 tls-auth-clients no # Accept connections on the specified port, default is 6379 (IANA #815344). From bc09a89a0112ed3ce90bad20c92208c05f938139 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sat, 8 Aug 2020 20:36:28 -0400 Subject: [PATCH 177/376] output plugin to normal port --- salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja index 5505ca636..626ed62c3 100644 --- a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja +++ b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja @@ -3,13 +3,12 @@ output { redis { host => '{{ MANAGER }}' - port => 9696 + port => 6379 data_type => 'list' key => 'logstash:unparsed' congestion_interval => 1 congestion_threshold => 50000000 batch => true batch_events => {{ BATCH }} - ssl => true } } From 63031a965a622c4d5c36897c17a94602f6a704c7 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sat, 8 Aug 2020 20:48:46 -0400 Subject: [PATCH 178/376] fix ports --- pillar/firewall/ports.sls | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pillar/firewall/ports.sls b/pillar/firewall/ports.sls index 4f7c06bec..1e0be460b 100644 --- a/pillar/firewall/ports.sls +++ b/pillar/firewall/ports.sls @@ -33,6 +33,8 @@ firewall: - 9300 - 9400 - 9500 + - 9595 + - 9696 udp: - 1514 minions: From 32fe3ed961f79e5ad97da750849864b05685019a Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sat, 8 Aug 2020 20:59:13 -0400 Subject: [PATCH 179/376] fix ports --- salt/redis/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/redis/init.sls b/salt/redis/init.sls index 6969883dd..3f24ba079 100644 --- a/salt/redis/init.sls +++ b/salt/redis/init.sls @@ -53,7 +53,7 @@ so-redis: - user: socore - port_bindings: - 0.0.0.0:6379:6379 - - 0.0.0.0:6380:6380 + - 0.0.0.0:9696:9696 - binds: - /opt/so/log/redis:/var/log/redis:rw - /opt/so/conf/redis/etc/redis.conf:/usr/local/etc/redis/redis.conf:ro From ab7014d70a8add747767ebe907bbb8cde181920c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 10 Aug 2020 10:19:25 -0400 Subject: [PATCH 180/376] upgrading to salt 3001.1 --- salt/salt/master.defaults.yaml | 2 +- salt/salt/minion.defaults.yaml | 2 +- setup/so-functions | 20 ++++++++++---------- setup/yum_repos/saltstack.repo | 6 +++--- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/salt/salt/master.defaults.yaml b/salt/salt/master.defaults.yaml index c366ae6ce..8694ffbc7 100644 --- a/salt/salt/master.defaults.yaml +++ b/salt/salt/master.defaults.yaml @@ -2,4 +2,4 @@ # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions salt: master: - version: 3001 \ No newline at end of file + version: 3001.1 \ No newline at end of file diff --git a/salt/salt/minion.defaults.yaml b/salt/salt/minion.defaults.yaml index cd061237b..31c313df6 100644 --- a/salt/salt/minion.defaults.yaml +++ b/salt/salt/minion.defaults.yaml @@ -2,4 +2,4 @@ # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions salt: minion: - version: 3001 \ No newline at end of file + version: 3001.1 \ No newline at end of file diff --git a/setup/so-functions b/setup/so-functions index 00d9b7e1e..088aac7ad 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1269,7 +1269,7 @@ saltify() { if [ $OS = 'centos' ]; then set_progress_str 5 'Installing Salt repo' { - sudo rpm --import https://repo.saltstack.com/py3/redhat/7/x86_64/3001/SALTSTACK-GPG-KEY.pub; + sudo rpm --import https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001.1/SALTSTACK-GPG-KEY.pub; cp ./yum_repos/saltstack.repo /etc/yum.repos.d/saltstack.repo; } >> "$setup_log" 2>&1 set_progress_str 6 'Installing various dependencies' @@ -1281,12 +1281,12 @@ saltify() { yum -y install sqlite argon2 curl mariadb-devel >> "$setup_log" 2>&1 # Download Ubuntu Keys in case manager updates = 1 mkdir -p /opt/so/gpg >> "$setup_log" 2>&1 - wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3001/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 + wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3001.1/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1 wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1 cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo >> "$setup_log" 2>&1 set_progress_str 7 'Installing salt-master' - yum -y install salt-master-3001 >> "$setup_log" 2>&1 + yum -y install salt-master-3001.1 >> "$setup_log" 2>&1 systemctl enable salt-master >> "$setup_log" 2>&1 ;; *) @@ -1349,8 +1349,8 @@ saltify() { 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE') # TODO: should this also be HELIXSENSOR? # Add saltstack repo(s) - wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3001/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1 - echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3001 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" + wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3001.1/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1 + echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3001.1 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" # Add Docker repo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - >> "$setup_log" 2>&1 @@ -1358,7 +1358,7 @@ saltify() { # Get gpg keys mkdir -p /opt/so/gpg >> "$setup_log" 2>&1 - wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/archive/3001/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 + wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/archive/3001.1/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1 wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1 @@ -1371,7 +1371,7 @@ saltify() { set_progress_str 6 'Installing various dependencies' apt-get -y install sqlite3 argon2 libssl-dev >> "$setup_log" 2>&1 set_progress_str 7 'Installing salt-master' - apt-get -y install salt-master=3001+ds-1 >> "$setup_log" 2>&1 + apt-get -y install salt-master=3001.1+ds-1 >> "$setup_log" 2>&1 apt-mark hold salt-master >> "$setup_log" 2>&1 ;; *) @@ -1382,14 +1382,14 @@ saltify() { echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1 apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1 - echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3001/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" + echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3001.1/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log" ;; esac apt-get update >> "$setup_log" 2>&1 set_progress_str 8 'Installing salt-minion & python modules' - apt-get -y install salt-minion=3001+ds-1\ - salt-common=3001+ds-1 >> "$setup_log" 2>&1 + apt-get -y install salt-minion=3001.1+ds-1\ + salt-common=3001.1+ds-1 >> "$setup_log" 2>&1 apt-mark hold salt-minion salt-common >> "$setup_log" 2>&1 if [ "$OSVER" != 'xenial' ]; then apt-get -y install python3-dateutil python3-m2crypto python3-mysqldb >> "$setup_log" 2>&1 diff --git a/setup/yum_repos/saltstack.repo b/setup/yum_repos/saltstack.repo index f04f02be0..2e1b425fb 100644 --- a/setup/yum_repos/saltstack.repo +++ b/setup/yum_repos/saltstack.repo @@ -1,6 +1,6 @@ -[saltstack-repo] +[saltstack] name=SaltStack repo for RHEL/CentOS $releasever PY3 -baseurl=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001/ +baseurl=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001.1/ enabled=1 gpgcheck=1 -gpgkey=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001/SALTSTACK-GPG-KEY.pub +gpgkey=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001.1/SALTSTACK-GPG-KEY.pub \ No newline at end of file From 8146930b802d9f9ee51bf70203d7110a7a7872dc Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 10 Aug 2020 12:22:42 -0400 Subject: [PATCH 181/376] fix --exclude, add salt-minion-3001.1 where missed --- setup/so-functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 088aac7ad..db8e3d6f1 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1306,7 +1306,7 @@ saltify() { set_progress_str 8 'Installing salt-minion & python modules' { yum -y install epel-release - yum -y install salt-minion-3001\ + yum -y install salt-minion-3001.1\ python3\ python36-docker\ python36-dateutil\ @@ -1317,7 +1317,7 @@ saltify() { lvm2\ openssl\ jq; - yum -y update exclude=salt*; + yum -y update --exclude=salt*; systemctl enable salt-minion; } >> "$setup_log" 2>&1 yum versionlock salt* From 1f3ceb50dacf44b077a32b71637f27b88361ea54 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 10 Aug 2020 13:04:19 -0400 Subject: [PATCH 182/376] add replace: False to get rid of warning, eventhough it doesntt. bug report submitted on saltstack gh. --- salt/ca/init.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/ca/init.sls b/salt/ca/init.sls index dcec40d9a..62b89d351 100644 --- a/salt/ca/init.sls +++ b/salt/ca/init.sls @@ -36,6 +36,7 @@ pki_private_key: - days_valid: 3650 - days_remaining: 0 - backup: True + - replace: False - require: - file: /etc/pki From e659af346623132a9b9ad96fea0415c558c7f316 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 14:26:56 -0400 Subject: [PATCH 183/376] ES basic SSL --- salt/kibana/etc/kibana.yml | 5 ++ salt/logstash/init.sls | 3 +- .../config/so/9000_output_zeek.conf.jinja | 6 ++ .../config/so/9002_output_import.conf.jinja | 6 ++ .../config/so/9004_output_flow.conf.jinja | 6 ++ .../config/so/9033_output_snort.conf.jinja | 6 ++ .../config/so/9034_output_syslog.conf.jinja | 6 ++ .../config/so/9100_output_osquery.conf.jinja | 6 ++ .../config/so/9200_output_firewall.conf.jinja | 6 ++ .../config/so/9400_output_suricata.conf.jinja | 6 ++ .../config/so/9500_output_beats.conf.jinja | 6 ++ .../config/so/9600_output_ossec.conf.jinja | 6 ++ .../config/so/9700_output_strelka.conf.jinja | 6 ++ salt/soc/files/soc/soc.json | 5 ++ salt/ssl/init.sls | 76 ++++++++++++++++++- 15 files changed, 152 insertions(+), 3 deletions(-) diff --git a/salt/kibana/etc/kibana.yml b/salt/kibana/etc/kibana.yml index 4d19b251b..89e568df9 100644 --- a/salt/kibana/etc/kibana.yml +++ b/salt/kibana/etc/kibana.yml @@ -1,10 +1,15 @@ --- # Default Kibana configuration from kibana-docker. {%- set ES = salt['pillar.get']('manager:mainip', '') -%} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} server.name: kibana server.host: "0" server.basePath: /kibana +{% if FEATURES %} +elasticsearch.hosts: [ "https://{{ ES }}:9200" ] +{%- else %} elasticsearch.hosts: [ "http://{{ ES }}:9200" ] +{%- endif %} #kibana.index: ".kibana" #elasticsearch.username: elastic #elasticsearch.password: changeme diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index 9f9a5c51b..07af6bbeb 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -167,7 +167,8 @@ so-logstash: - /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro - /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem + - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro + - /etc/pki/ca.cer:/ca/ca.crt:ro {%- if grains['role'] == 'so-eval' %} - /nsm/zeek:/nsm/zeek:ro - /nsm/suricata:/suricata:ro diff --git a/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja b/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja index f86bf946c..f9dbcccfa 100644 --- a/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja +++ b/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja @@ -3,11 +3,17 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if [module] =~ "zeek" and "import" not in [tags] { elasticsearch { pipeline => "%{module}.%{dataset}" + {%- if FEATURES %} + hosts => "https://{{ ES }}" + cacert => '/ca/ca.crt' + {%- else %} hosts => "{{ ES }}" + {%- endif %} index => "so-zeek-%{+YYYY.MM.dd}" template_name => "so-zeek" template => "/templates/so-zeek-template.json" diff --git a/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja b/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja index 52c9f034a..5be2c2640 100644 --- a/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja +++ b/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja @@ -3,11 +3,17 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if "import" in [tags] { elasticsearch { pipeline => "%{module}.%{dataset}" + {%- if FEATURES %} + hosts => "https://{{ ES }}" + cacert => '/ca/ca.crt' + {%- else %} hosts => "{{ ES }}" + {%- endif %} index => "so-import-%{+YYYY.MM.dd}" template_name => "so-import" template => "/templates/so-import-template.json" diff --git a/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja b/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja index 740676367..f71cf5d52 100644 --- a/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja +++ b/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja @@ -3,10 +3,16 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if [event_type] == "sflow" { elasticsearch { + {%- if FEATURES %} + hosts => "https://{{ ES }}" + cacert => '/ca/ca.crt' + {%- else %} hosts => "{{ ES }}" + {%- endif %} index => "so-flow-%{+YYYY.MM.dd}" template_name => "so-flow" template => "/templates/so-flow-template.json" diff --git a/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja b/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja index fed1ffdf5..f7a29415a 100644 --- a/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja +++ b/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja @@ -3,10 +3,16 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if [event_type] == "ids" and "import" not in [tags] { elasticsearch { + {%- if FEATURES %} + hosts => "https://{{ ES }}" + cacert => '/ca/ca.crt' + {%- else %} hosts => "{{ ES }}" + {%- endif %} index => "so-ids-%{+YYYY.MM.dd}" template_name => "so-ids" template => "/templates/so-ids-template.json" diff --git a/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja b/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja index 5087f41da..403ba1f2e 100644 --- a/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja +++ b/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja @@ -3,11 +3,17 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if [module] =~ "syslog" { elasticsearch { pipeline => "%{module}" + {%- if FEATURES %} + hosts => "https://{{ ES }}" + cacert => '/ca/ca.crt' + {%- else %} hosts => "{{ ES }}" + {%- endif %} index => "so-syslog-%{+YYYY.MM.dd}" template_name => "so-syslog" template => "/templates/so-syslog-template.json" diff --git a/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja b/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja index 01436cf5f..a8c8910d9 100644 --- a/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja +++ b/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja @@ -3,11 +3,17 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if [module] =~ "osquery" { elasticsearch { pipeline => "%{module}.%{dataset}" + {%- if FEATURES %} + hosts => "https://{{ ES }}" + cacert => '/ca/ca.crt' + {%- else %} hosts => "{{ ES }}" + {%- endif %} index => "so-osquery-%{+YYYY.MM.dd}" template_name => "so-osquery" template => "/templates/so-osquery-template.json" diff --git a/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja b/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja index a295b5f7a..8f006c90e 100644 --- a/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja +++ b/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja @@ -3,10 +3,16 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if "firewall" in [tags] { elasticsearch { + {%- if FEATURES %} + hosts => "https://{{ ES }}" + cacert => '/ca/ca.crt' + {%- else %} hosts => "{{ ES }}" + {%- endif %} index => "so-firewall-%{+YYYY.MM.dd}" template_name => "so-firewall" template => "/templates/so-firewall-template.json" diff --git a/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja b/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja index ace7cccf1..35f9f35b4 100644 --- a/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja +++ b/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja @@ -3,11 +3,17 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if [module] =~ "suricata" and "import" not in [tags] { elasticsearch { pipeline => "%{module}.%{dataset}" + {%- if FEATURES %} + hosts => "https://{{ ES }}" + cacert => '/ca/ca.crt' + {%- else %} hosts => "{{ ES }}" + {%- endif %} index => "so-ids-%{+YYYY.MM.dd}" template_name => "so-ids" template => "/templates/so-ids-template.json" diff --git a/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja b/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja index ed513f597..e923e5044 100644 --- a/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja +++ b/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja @@ -3,11 +3,17 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if "beat-ext" in [tags] and "import" not in [tags] { elasticsearch { pipeline => "beats.common" + {%- if FEATURES %} + hosts => "https://{{ ES }}" + cacert => '/ca/ca.crt' + {%- else %} hosts => "{{ ES }}" + {%- endif %} index => "so-beats-%{+YYYY.MM.dd}" template_name => "so-beats" template => "/templates/so-beats-template.json" diff --git a/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja b/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja index 14a9bc1d1..080c8e4e1 100644 --- a/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja +++ b/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja @@ -3,11 +3,17 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if [module] =~ "ossec" { elasticsearch { pipeline => "%{module}.%{dataset}" + {%- if FEATURES %} + hosts => "https://{{ ES }}" + cacert => '/ca/ca.crt' + {%- else %} hosts => "{{ ES }}" + {%- endif %} index => "so-ossec-%{+YYYY.MM.dd}" template_name => "so-ossec" template => "/templates/so-ossec-template.json" diff --git a/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja b/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja index 0e6977e29..8e5230af6 100644 --- a/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja +++ b/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja @@ -3,11 +3,17 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if [module] =~ "strelka" { elasticsearch { pipeline => "%{module}.%{dataset}" + {%- if FEATURES %} + hosts => "https://{{ ES }}" + cacert => '/ca/ca.crt' + {%- else %} hosts => "{{ ES }}" + {%- endif %} index => "so-strelka-%{+YYYY.MM.dd}" template_name => "so-strelka" template => "/templates/so-strelka-template.json" diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json index 999819356..86bad6cf4 100644 --- a/salt/soc/files/soc/soc.json +++ b/salt/soc/files/soc/soc.json @@ -1,5 +1,6 @@ {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') -%} {%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') -%} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} { "logFilename": "/opt/sensoroni/logs/sensoroni-server.log", "server": { @@ -15,7 +16,11 @@ "hostUrl": "http://{{ MANAGERIP }}:4434/" }, "elastic": { + {%- if FEATURES %} + "hostUrl": "https://{{ MANAGERIP }}:9200", + {%- else %} "hostUrl": "http://{{ MANAGERIP }}:9200", + {%- endif %} "username": "", "password": "", "verifyCert": false diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 9691c861f..595910b1b 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -194,7 +194,7 @@ regkeyperms: - x509: /etc/pki/minio.crt {%- endif %} -# Create a cert for the docker registry +# Create a cert for minio /etc/pki/minio.crt: x509.certificate_managed: - ca_server: {{ ca_server }} @@ -229,6 +229,41 @@ miniokeyperms: - x509: /etc/pki/redis.crt {%- endif %} +# Create a cert for elasticsearch +/etc/pki/elasticsearch.crt: + x509.certificate_managed: + - ca_server: {{ ca_server }} + - signing_policy: registry + - public_key: /etc/pki/ealsticsearch.key + - CN: {{ manager }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' + +miniokeyperms: + file.managed: + - replace: False + - name: /etc/pki/elasticsearch.key + - mode: 640 + - group: 939 + +/etc/pki/elasticsearch.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%} + - prereq: + - x509: /etc/pki/elasticsearch.crt + {%- endif %} + # Create a cert for the docker registry /etc/pki/redis.crt: x509.certificate_managed: @@ -457,4 +492,41 @@ fleetkeyperms: - mode: 640 - group: 939 -{% endif %} \ No newline at end of file +{% endif %} + +{% if grains['role'] in ['so-search', 'so-heavynode'] %} +# Create a cert for elasticsearch +/etc/pki/elasticsearch.crt: + x509.certificate_managed: + - ca_server: {{ ca_server }} + - signing_policy: registry + - public_key: /etc/pki/ealsticsearch.key + - CN: {{ manager }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' + +miniokeyperms: + file.managed: + - replace: False + - name: /etc/pki/elasticsearch.key + - mode: 640 + - group: 939 + +/etc/pki/elasticsearch.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%} + - prereq: + - x509: /etc/pki/elasticsearch.crt + {%- endif %} +{%- endif %} \ No newline at end of file From 523e42bec83ed9eed5dd1a376526f4373c4864bd Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 14:40:11 -0400 Subject: [PATCH 184/376] Fix ssl state --- salt/ssl/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 595910b1b..700083be6 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -244,7 +244,7 @@ miniokeyperms: # Will trigger 5 days (432000 sec) from cert expiration - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' -miniokeyperms: +ealstickeyperms: file.managed: - replace: False - name: /etc/pki/elasticsearch.key From 788864310c380c03d66a1aae379437eb70d820a7 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 14:52:20 -0400 Subject: [PATCH 185/376] Fix ssl state --- salt/ssl/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 700083be6..9677bdda2 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -234,7 +234,7 @@ miniokeyperms: x509.certificate_managed: - ca_server: {{ ca_server }} - signing_policy: registry - - public_key: /etc/pki/ealsticsearch.key + - public_key: /etc/pki/elasticsearch.key - CN: {{ manager }} - days_remaining: 0 - days_valid: 820 From 28806513d9e4788b0a16720966ce948c6be19c12 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 20:53:56 -0400 Subject: [PATCH 186/376] Logstash logic fix --- salt/logstash/init.sls | 2 +- salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja | 2 +- salt/logstash/pipelines/config/so/9002_output_import.conf.jinja | 2 +- salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja | 2 +- salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja | 2 +- salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja | 2 +- .../logstash/pipelines/config/so/9100_output_osquery.conf.jinja | 2 +- .../pipelines/config/so/9200_output_firewall.conf.jinja | 2 +- .../pipelines/config/so/9400_output_suricata.conf.jinja | 2 +- salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja | 2 +- salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja | 2 +- .../logstash/pipelines/config/so/9700_output_strelka.conf.jinja | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index 07af6bbeb..1a85a081d 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -17,7 +17,7 @@ {% set MANAGER = salt['grains.get']('master') %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %} -{% if FEATURES %} +{%- if FEATURES is sameas true %} {% set FEATURES = "-features" %} {% else %} {% set FEATURES = '' %} diff --git a/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja b/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja index f9dbcccfa..e075918f6 100644 --- a/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja +++ b/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja @@ -8,7 +8,7 @@ output { if [module] =~ "zeek" and "import" not in [tags] { elasticsearch { pipeline => "%{module}.%{dataset}" - {%- if FEATURES %} + {%- if FEATURES is sameas true %} hosts => "https://{{ ES }}" cacert => '/ca/ca.crt' {%- else %} diff --git a/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja b/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja index 5be2c2640..ae0a619fe 100644 --- a/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja +++ b/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja @@ -8,7 +8,7 @@ output { if "import" in [tags] { elasticsearch { pipeline => "%{module}.%{dataset}" - {%- if FEATURES %} + {%- if FEATURES is sameas true %} hosts => "https://{{ ES }}" cacert => '/ca/ca.crt' {%- else %} diff --git a/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja b/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja index f71cf5d52..c888a9752 100644 --- a/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja +++ b/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja @@ -7,7 +7,7 @@ output { if [event_type] == "sflow" { elasticsearch { - {%- if FEATURES %} + {%- if FEATURES is sameas true %} hosts => "https://{{ ES }}" cacert => '/ca/ca.crt' {%- else %} diff --git a/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja b/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja index f7a29415a..daddd4b0a 100644 --- a/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja +++ b/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja @@ -7,7 +7,7 @@ output { if [event_type] == "ids" and "import" not in [tags] { elasticsearch { - {%- if FEATURES %} + {%- if FEATURES is sameas true %} hosts => "https://{{ ES }}" cacert => '/ca/ca.crt' {%- else %} diff --git a/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja b/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja index 403ba1f2e..d554adf16 100644 --- a/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja +++ b/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja @@ -8,7 +8,7 @@ output { if [module] =~ "syslog" { elasticsearch { pipeline => "%{module}" - {%- if FEATURES %} + {%- if FEATURES is sameas true %} hosts => "https://{{ ES }}" cacert => '/ca/ca.crt' {%- else %} diff --git a/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja b/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja index a8c8910d9..c1e6ae59f 100644 --- a/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja +++ b/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja @@ -8,7 +8,7 @@ output { if [module] =~ "osquery" { elasticsearch { pipeline => "%{module}.%{dataset}" - {%- if FEATURES %} + {%- if FEATURES is sameas true %} hosts => "https://{{ ES }}" cacert => '/ca/ca.crt' {%- else %} diff --git a/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja b/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja index 8f006c90e..14e741b9d 100644 --- a/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja +++ b/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja @@ -7,7 +7,7 @@ output { if "firewall" in [tags] { elasticsearch { - {%- if FEATURES %} + {%- if FEATURES is sameas true %} hosts => "https://{{ ES }}" cacert => '/ca/ca.crt' {%- else %} diff --git a/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja b/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja index 35f9f35b4..a684e2412 100644 --- a/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja +++ b/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja @@ -8,7 +8,7 @@ output { if [module] =~ "suricata" and "import" not in [tags] { elasticsearch { pipeline => "%{module}.%{dataset}" - {%- if FEATURES %} + {%- if FEATURES is sameas true %} hosts => "https://{{ ES }}" cacert => '/ca/ca.crt' {%- else %} diff --git a/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja b/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja index e923e5044..321566bac 100644 --- a/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja +++ b/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja @@ -8,7 +8,7 @@ output { if "beat-ext" in [tags] and "import" not in [tags] { elasticsearch { pipeline => "beats.common" - {%- if FEATURES %} + {%- if FEATURES is sameas true %} hosts => "https://{{ ES }}" cacert => '/ca/ca.crt' {%- else %} diff --git a/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja b/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja index 080c8e4e1..4af0839c4 100644 --- a/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja +++ b/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja @@ -8,7 +8,7 @@ output { if [module] =~ "ossec" { elasticsearch { pipeline => "%{module}.%{dataset}" - {%- if FEATURES %} + {%- if FEATURES is sameas true %} hosts => "https://{{ ES }}" cacert => '/ca/ca.crt' {%- else %} diff --git a/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja b/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja index 8e5230af6..a0e9950de 100644 --- a/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja +++ b/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja @@ -8,7 +8,7 @@ output { if [module] =~ "strelka" { elasticsearch { pipeline => "%{module}.%{dataset}" - {%- if FEATURES %} + {%- if FEATURES is sameas true %} hosts => "https://{{ ES }}" cacert => '/ca/ca.crt' {%- else %} From 92cc176b6d8ae0a7302486ac1a42cbda586ec05b Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 20:59:41 -0400 Subject: [PATCH 187/376] Fix features logic in all states that use it --- salt/elasticsearch/init.sls | 2 +- salt/filebeat/init.sls | 2 +- salt/kibana/init.sls | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 5f87a430c..2a675cc45 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -17,7 +17,7 @@ {% set MANAGER = salt['grains.get']('master') %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %} -{% if FEATURES %} +{%- if FEATURES is sameas true %} {% set FEATURES = "-features" %} {% else %} {% set FEATURES = '' %} diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls index a4fa36b14..ee7c5ae10 100644 --- a/salt/filebeat/init.sls +++ b/salt/filebeat/init.sls @@ -16,7 +16,7 @@ {% set MANAGER = salt['grains.get']('master') %} {% set MANAGERIP = salt['pillar.get']('global:managerip', '') %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %} -{% if FEATURES %} +{%- if FEATURES is sameas true %} {% set FEATURES = "-features" %} {% else %} {% set FEATURES = '' %} diff --git a/salt/kibana/init.sls b/salt/kibana/init.sls index a1dccd137..8711d47d1 100644 --- a/salt/kibana/init.sls +++ b/salt/kibana/init.sls @@ -2,7 +2,7 @@ {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %} -{% if FEATURES %} +{%- if FEATURES is sameas true %} {% set FEATURES = "-features" %} {% else %} {% set FEATURES = '' %} From e7cd527d4934cdfcf6b0c4312573a47c8cf2a281 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 21:18:03 -0400 Subject: [PATCH 188/376] Enable SSL in elastic --- salt/elasticsearch/files/elasticsearch.yml | 12 ++++++++++++ salt/elasticsearch/init.sls | 5 +++++ 2 files changed, 17 insertions(+) diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml index 4d5d5b2e4..0f5e9e59f 100644 --- a/salt/elasticsearch/files/elasticsearch.yml +++ b/salt/elasticsearch/files/elasticsearch.yml @@ -5,6 +5,7 @@ {%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:esclustername', '') %} {%- endif %} {%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} cluster.name: "{{ ESCLUSTERNAME }}" network.host: 0.0.0.0 @@ -22,6 +23,17 @@ cluster.routing.allocation.disk.threshold_enabled: true cluster.routing.allocation.disk.watermark.low: 95% cluster.routing.allocation.disk.watermark.high: 98% cluster.routing.allocation.disk.watermark.flood_stage: 98% +{%- if FEATURES is sameas true %} +xpack.security.enabled: true +xpack.security.http.ssl.enabled: true +xpack.security.transport.ssl.enabled: true +xpack.security.http.ssl.key: /ca/elasticsearch.key +xpack.security.http.ssl.certificate: /ca/elasticsearch.crt +xpack.security.http.ssl.certificate_authorities: /ca/ca.crt +xpack.security.transport.ssl.key: /ca/elasticsearch.key +xpack.security.transport.ssl.certificate: /ca/elasticsearch.crt +xpack.security.transport.ssl.certificate_authorities: /ca/ca.crt +{%- endif %} node.attr.box_type: {{ NODE_ROUTE_TYPE }} node.name: {{ ESCLUSTERNAME }} script.max_compilations_rate: 1000/1m diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 2a675cc45..d343f19c1 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -192,6 +192,11 @@ so-elasticsearch: - /nsm/elasticsearch:/usr/share/elasticsearch/data:rw - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro + {%- if FEATURES is sameas true %} + - /etc/pki/ca.crt:/ca/ca.cert:ro + - /etc/pki/elasticsearch.key:/ca/elasticsearch.key:ro + - /etc/pki/elasticsearch.crt:/ca/elasticsearch.crt:ro + {%- endif %} - watch: - file: cacertz From e28619604cda6a437fd2cc5c1101ea5c5a377341 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 21:26:00 -0400 Subject: [PATCH 189/376] Change certs path on elstic --- salt/elasticsearch/init.sls | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index d343f19c1..6819f4796 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -193,9 +193,9 @@ so-elasticsearch: - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro {%- if FEATURES is sameas true %} - - /etc/pki/ca.crt:/ca/ca.cert:ro - - /etc/pki/elasticsearch.key:/ca/elasticsearch.key:ro - - /etc/pki/elasticsearch.crt:/ca/elasticsearch.crt:ro + - /etc/pki/ca.crt:/usr/share/elasticsearch/ca/ca.cert:ro + - /etc/pki/elasticsearch.key:/usr/share/elasticsearch/ca/elasticsearch.key:ro + - /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/ca/elasticsearch.crt:ro {%- endif %} - watch: - file: cacertz From cf5c29d01c00089c099ea224a4500a1d9e338809 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 21:30:53 -0400 Subject: [PATCH 190/376] Change certs path on elstic --- salt/elasticsearch/files/elasticsearch.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml index 0f5e9e59f..cbfede50e 100644 --- a/salt/elasticsearch/files/elasticsearch.yml +++ b/salt/elasticsearch/files/elasticsearch.yml @@ -27,12 +27,12 @@ cluster.routing.allocation.disk.watermark.flood_stage: 98% xpack.security.enabled: true xpack.security.http.ssl.enabled: true xpack.security.transport.ssl.enabled: true -xpack.security.http.ssl.key: /ca/elasticsearch.key -xpack.security.http.ssl.certificate: /ca/elasticsearch.crt -xpack.security.http.ssl.certificate_authorities: /ca/ca.crt -xpack.security.transport.ssl.key: /ca/elasticsearch.key -xpack.security.transport.ssl.certificate: /ca/elasticsearch.crt -xpack.security.transport.ssl.certificate_authorities: /ca/ca.crt +xpack.security.http.ssl.key: /usr/share/elasticsearch/ca/elasticsearch.key +xpack.security.http.ssl.certificate: /usr/share/elasticsearch/ca/elasticsearch.crt +xpack.security.http.ssl.certificate_authorities: /usr/share/elasticsearch/ca/ca.crt +xpack.security.transport.ssl.key: /usr/share/elasticsearch/ca/elasticsearch.key +xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/ca/elasticsearch.crt +xpack.security.transport.ssl.certificate_authorities: /usr/share/elasticsearch/ca/ca.crt {%- endif %} node.attr.box_type: {{ NODE_ROUTE_TYPE }} node.name: {{ ESCLUSTERNAME }} From 08d544e527ef60e41cf3846d43cf603457edb528 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 21:44:45 -0400 Subject: [PATCH 191/376] Fix SSL perms --- salt/ssl/init.sls | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 9677bdda2..71daecfc6 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -216,7 +216,8 @@ miniokeyperms: - mode: 640 - group: 939 -/etc/pki/redis.key: +# Create a cert for elasticsearch +/etc/pki/elasticsearch.key: x509.private_key_managed: - CN: {{ manager }} - bits: 4096 @@ -224,12 +225,11 @@ miniokeyperms: - days_valid: 820 - backup: True - new: True - {% if salt['file.file_exists']('/etc/pki/redis.key') -%} + {% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%} - prereq: - - x509: /etc/pki/redis.crt + - x509: /etc/pki/elasticsearch.crt {%- endif %} -# Create a cert for elasticsearch /etc/pki/elasticsearch.crt: x509.certificate_managed: - ca_server: {{ ca_server }} @@ -249,9 +249,10 @@ ealstickeyperms: - replace: False - name: /etc/pki/elasticsearch.key - mode: 640 - - group: 939 + - group: 930 -/etc/pki/elasticsearch.key: +# Create a cert for Redis encryption +/etc/pki/redis.key: x509.private_key_managed: - CN: {{ manager }} - bits: 4096 @@ -259,12 +260,11 @@ ealstickeyperms: - days_valid: 820 - backup: True - new: True - {% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%} + {% if salt['file.file_exists']('/etc/pki/redis.key') -%} - prereq: - - x509: /etc/pki/elasticsearch.crt + - x509: /etc/pki/redis.crt {%- endif %} -# Create a cert for the docker registry /etc/pki/redis.crt: x509.certificate_managed: - ca_server: {{ ca_server }} From 811da5732a8beaf4206b032327ecedea34bb8c89 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 21:51:29 -0400 Subject: [PATCH 192/376] Elastic logic fix --- salt/elasticsearch/init.sls | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 6819f4796..a983b809b 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -18,9 +18,9 @@ {% set FEATURES = salt['pillar.get']('elastic:features', False) %} {%- if FEATURES is sameas true %} - {% set FEATURES = "-features" %} + {% set FEATUREZ = "-features" %} {% else %} - {% set FEATURES = '' %} + {% set FEATUREZ = '' %} {% endif %} {% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %} @@ -168,7 +168,7 @@ eslogdir: so-elasticsearch: docker_container.running: - - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }}{{ FEATURES }} + - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }}{{ FEATUREZ }} - hostname: elasticsearch - name: so-elasticsearch - user: elasticsearch From cdda46ce587fc112635f3b4d9d4d6fdaa31b8e8c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 21:54:36 -0400 Subject: [PATCH 193/376] ca typeo --- salt/elasticsearch/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index a983b809b..ef846ec5e 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -193,7 +193,7 @@ so-elasticsearch: - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro {%- if FEATURES is sameas true %} - - /etc/pki/ca.crt:/usr/share/elasticsearch/ca/ca.cert:ro + - /etc/pki/ca.crt:/usr/share/elasticsearch/ca/ca.crt:ro - /etc/pki/elasticsearch.key:/usr/share/elasticsearch/ca/elasticsearch.key:ro - /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/ca/elasticsearch.crt:ro {%- endif %} From 6d2be9af7e82b34add972f87d686b84572340b17 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 21:58:44 -0400 Subject: [PATCH 194/376] Things like this are why I hate Java --- salt/elasticsearch/init.sls | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index ef846ec5e..b3f570c21 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -193,9 +193,9 @@ so-elasticsearch: - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro {%- if FEATURES is sameas true %} - - /etc/pki/ca.crt:/usr/share/elasticsearch/ca/ca.crt:ro - - /etc/pki/elasticsearch.key:/usr/share/elasticsearch/ca/elasticsearch.key:ro - - /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/ca/elasticsearch.crt:ro + - /etc/pki/ca.crt:/etc/elasticsearch/ca/ca.crt:ro + - /etc/pki/elasticsearch.key:/etc/elasticsearch/ca/elasticsearch.key:ro + - /etc/pki/elasticsearch.crt:/etc/elasticsearch/ca/elasticsearch.crt:ro {%- endif %} - watch: - file: cacertz From 31ab1e8ed8d6ba2f7fb53388e03ffe7aa6d02587 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 22:03:24 -0400 Subject: [PATCH 195/376] Things like this are why I hate Java --- salt/elasticsearch/init.sls | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index b3f570c21..3d407f3fd 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -193,9 +193,9 @@ so-elasticsearch: - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro {%- if FEATURES is sameas true %} - - /etc/pki/ca.crt:/etc/elasticsearch/ca/ca.crt:ro - - /etc/pki/elasticsearch.key:/etc/elasticsearch/ca/elasticsearch.key:ro - - /etc/pki/elasticsearch.crt:/etc/elasticsearch/ca/elasticsearch.crt:ro + - /etc/pki/ca.crt:/etc/elasticsearch/ca.crt:ro + - /etc/pki/elasticsearch.key:/etc/elasticsearch/elasticsearch.key:ro + - /etc/pki/elasticsearch.crt:/etc/elasticsearch/elasticsearch.crt:ro {%- endif %} - watch: - file: cacertz From d00231af066d9f4b8f4506995237288cde919dab Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 22:05:46 -0400 Subject: [PATCH 196/376] Things like this are why I hate Java --- salt/elasticsearch/files/elasticsearch.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml index cbfede50e..f54195467 100644 --- a/salt/elasticsearch/files/elasticsearch.yml +++ b/salt/elasticsearch/files/elasticsearch.yml @@ -27,12 +27,12 @@ cluster.routing.allocation.disk.watermark.flood_stage: 98% xpack.security.enabled: true xpack.security.http.ssl.enabled: true xpack.security.transport.ssl.enabled: true -xpack.security.http.ssl.key: /usr/share/elasticsearch/ca/elasticsearch.key -xpack.security.http.ssl.certificate: /usr/share/elasticsearch/ca/elasticsearch.crt -xpack.security.http.ssl.certificate_authorities: /usr/share/elasticsearch/ca/ca.crt -xpack.security.transport.ssl.key: /usr/share/elasticsearch/ca/elasticsearch.key -xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/ca/elasticsearch.crt -xpack.security.transport.ssl.certificate_authorities: /usr/share/elasticsearch/ca/ca.crt +xpack.security.http.ssl.key: /etc/elasticsearch/elasticsearch.key +xpack.security.http.ssl.certificate: /etc/elasticsearch/elasticsearch.crt +xpack.security.http.ssl.certificate_authorities: /etc/elasticsearch/ca.crt +xpack.security.transport.ssl.key: /etc/elasticsearch/elasticsearch.key +xpack.security.transport.ssl.certificate: /etc/elasticsearch/elasticsearch.crt +xpack.security.transport.ssl.certificate_authorities: /etc/elasticsearch/ca.crt {%- endif %} node.attr.box_type: {{ NODE_ROUTE_TYPE }} node.name: {{ ESCLUSTERNAME }} From 6007a6c4d8373a239ccf1955a88391fc267e6785 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 22:10:03 -0400 Subject: [PATCH 197/376] Things like this are why I hate Java --- salt/elasticsearch/files/elasticsearch.yml | 12 ++++++------ salt/elasticsearch/init.sls | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml index f54195467..cb1526eba 100644 --- a/salt/elasticsearch/files/elasticsearch.yml +++ b/salt/elasticsearch/files/elasticsearch.yml @@ -27,12 +27,12 @@ cluster.routing.allocation.disk.watermark.flood_stage: 98% xpack.security.enabled: true xpack.security.http.ssl.enabled: true xpack.security.transport.ssl.enabled: true -xpack.security.http.ssl.key: /etc/elasticsearch/elasticsearch.key -xpack.security.http.ssl.certificate: /etc/elasticsearch/elasticsearch.crt -xpack.security.http.ssl.certificate_authorities: /etc/elasticsearch/ca.crt -xpack.security.transport.ssl.key: /etc/elasticsearch/elasticsearch.key -xpack.security.transport.ssl.certificate: /etc/elasticsearch/elasticsearch.crt -xpack.security.transport.ssl.certificate_authorities: /etc/elasticsearch/ca.crt +xpack.security.http.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key +xpack.security.http.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt +xpack.security.http.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt +xpack.security.transport.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key +xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt +xpack.security.transport.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt {%- endif %} node.attr.box_type: {{ NODE_ROUTE_TYPE }} node.name: {{ ESCLUSTERNAME }} diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 3d407f3fd..802957bd2 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -193,9 +193,9 @@ so-elasticsearch: - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro {%- if FEATURES is sameas true %} - - /etc/pki/ca.crt:/etc/elasticsearch/ca.crt:ro - - /etc/pki/elasticsearch.key:/etc/elasticsearch/elasticsearch.key:ro - - /etc/pki/elasticsearch.crt:/etc/elasticsearch/elasticsearch.crt:ro + - /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro + - /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro + - /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro {%- endif %} - watch: - file: cacertz From c3d8c599cc19b09ccbebd1f63f48aa7259c6145d Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 22:13:17 -0400 Subject: [PATCH 198/376] Turn off user auth --- salt/elasticsearch/files/elasticsearch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml index cb1526eba..f3b6bf1f5 100644 --- a/salt/elasticsearch/files/elasticsearch.yml +++ b/salt/elasticsearch/files/elasticsearch.yml @@ -24,7 +24,7 @@ cluster.routing.allocation.disk.watermark.low: 95% cluster.routing.allocation.disk.watermark.high: 98% cluster.routing.allocation.disk.watermark.flood_stage: 98% {%- if FEATURES is sameas true %} -xpack.security.enabled: true +xpack.security.enabled: false xpack.security.http.ssl.enabled: true xpack.security.transport.ssl.enabled: true xpack.security.http.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key From 52cc56bebbfdcfebc29c185dbd2c4efba9423d9a Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 22:56:15 -0400 Subject: [PATCH 199/376] Add transport hostname --- salt/elasticsearch/files/elasticsearch.yml | 4 ++-- salt/elasticsearch/init.sls | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml index f3b6bf1f5..f8e62c701 100644 --- a/salt/elasticsearch/files/elasticsearch.yml +++ b/salt/elasticsearch/files/elasticsearch.yml @@ -16,7 +16,7 @@ discovery.zen.minimum_master_nodes: 1 # This is a test -- if this is here, then the volume is mounted correctly. path.logs: /var/log/elasticsearch action.destructive_requires_name: true -transport.bind_host: 0.0.0.0 +transport.bind_host: {{ grains.host }} transport.publish_host: {{ NODEIP }} transport.publish_port: 9300 cluster.routing.allocation.disk.threshold_enabled: true @@ -25,7 +25,7 @@ cluster.routing.allocation.disk.watermark.high: 98% cluster.routing.allocation.disk.watermark.flood_stage: 98% {%- if FEATURES is sameas true %} xpack.security.enabled: false -xpack.security.http.ssl.enabled: true +xpack.security.http.ssl.enabled: false xpack.security.transport.ssl.enabled: true xpack.security.http.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key xpack.security.http.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 802957bd2..6686054ef 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -172,6 +172,8 @@ so-elasticsearch: - hostname: elasticsearch - name: so-elasticsearch - user: elasticsearch + - extra_hosts: + - {{ grains.host }} - environment: - discovery.type=single-node #- bootstrap.memory_lock=true From 730e389aae9e3887c9b310077cb3a858da47c5a4 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 22:57:49 -0400 Subject: [PATCH 200/376] Add transport hostname --- salt/elasticsearch/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 6686054ef..4d92291ae 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -173,7 +173,7 @@ so-elasticsearch: - name: so-elasticsearch - user: elasticsearch - extra_hosts: - - {{ grains.host }} + - {{ grains.host }}:127.0.0.1 - environment: - discovery.type=single-node #- bootstrap.memory_lock=true From ac3f490299b5ab4974bf948bba11506bbb3daa7d Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 23:02:03 -0400 Subject: [PATCH 201/376] Add transport hostname --- salt/elasticsearch/files/elasticsearch.yml | 2 +- salt/elasticsearch/init.sls | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml index f8e62c701..b26e759a5 100644 --- a/salt/elasticsearch/files/elasticsearch.yml +++ b/salt/elasticsearch/files/elasticsearch.yml @@ -17,7 +17,7 @@ discovery.zen.minimum_master_nodes: 1 path.logs: /var/log/elasticsearch action.destructive_requires_name: true transport.bind_host: {{ grains.host }} -transport.publish_host: {{ NODEIP }} +transport.publish_host: {{ grains.host }} transport.publish_port: 9300 cluster.routing.allocation.disk.threshold_enabled: true cluster.routing.allocation.disk.watermark.low: 95% diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 4d92291ae..738f7928b 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -16,6 +16,8 @@ {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %} +{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%} + {%- if FEATURES is sameas true %} {% set FEATUREZ = "-features" %} @@ -173,7 +175,7 @@ so-elasticsearch: - name: so-elasticsearch - user: elasticsearch - extra_hosts: - - {{ grains.host }}:127.0.0.1 + - {{ grains.host }}:{{ NODEIP }} - environment: - discovery.type=single-node #- bootstrap.memory_lock=true From 59292425c0999c821258d5e718f961fd13844669 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 10 Aug 2020 23:03:54 -0400 Subject: [PATCH 202/376] Add transport hostname --- salt/elasticsearch/files/elasticsearch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml index b26e759a5..625d8c8d9 100644 --- a/salt/elasticsearch/files/elasticsearch.yml +++ b/salt/elasticsearch/files/elasticsearch.yml @@ -16,7 +16,7 @@ discovery.zen.minimum_master_nodes: 1 # This is a test -- if this is here, then the volume is mounted correctly. path.logs: /var/log/elasticsearch action.destructive_requires_name: true -transport.bind_host: {{ grains.host }} +transport.bind_host: 0.0.0.0 transport.publish_host: {{ grains.host }} transport.publish_port: 9300 cluster.routing.allocation.disk.threshold_enabled: true From 854cc487f7a9e781f02eb663614531c6c252dd66 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 11 Aug 2020 09:21:06 -0400 Subject: [PATCH 203/376] Always disable screen blanking, to simplify logic --- setup/so-setup | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/setup/so-setup b/setup/so-setup index 7f127fc57..1e49b325f 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -118,9 +118,7 @@ if [ "$OS" == ubuntu ]; then update-alternatives --set newt-palette /etc/newt/palette.original >> $setup_log 2>&1 fi -if [ $automated == no ]; then - setterm -blank 0 >> $setup_log 2>&1 -fi +setterm -blank 0 >> $setup_log 2>&1 if [ "$setup_type" == 'iso' ] || (whiptail_you_sure); then true From 32f8ea3158d4ff0fdca567da4f46784e8a18b14d Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 10:02:00 -0400 Subject: [PATCH 204/376] Removes https from rest port --- salt/kibana/etc/kibana.yml | 6 +----- salt/soc/files/soc/soc.json | 4 ---- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/salt/kibana/etc/kibana.yml b/salt/kibana/etc/kibana.yml index 89e568df9..4bcc22016 100644 --- a/salt/kibana/etc/kibana.yml +++ b/salt/kibana/etc/kibana.yml @@ -1,15 +1,11 @@ --- # Default Kibana configuration from kibana-docker. {%- set ES = salt['pillar.get']('manager:mainip', '') -%} -{% set FEATURES = salt['pillar.get']('elastic:features', False) %} +{%- set FEATURES = salt['pillar.get']('elastic:features', False) %} server.name: kibana server.host: "0" server.basePath: /kibana -{% if FEATURES %} -elasticsearch.hosts: [ "https://{{ ES }}:9200" ] -{%- else %} elasticsearch.hosts: [ "http://{{ ES }}:9200" ] -{%- endif %} #kibana.index: ".kibana" #elasticsearch.username: elastic #elasticsearch.password: changeme diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json index 86bad6cf4..b44733cb1 100644 --- a/salt/soc/files/soc/soc.json +++ b/salt/soc/files/soc/soc.json @@ -16,11 +16,7 @@ "hostUrl": "http://{{ MANAGERIP }}:4434/" }, "elastic": { - {%- if FEATURES %} - "hostUrl": "https://{{ MANAGERIP }}:9200", - {%- else %} "hostUrl": "http://{{ MANAGERIP }}:9200", - {%- endif %} "username": "", "password": "", "verifyCert": false From d94120947963b3ca3b0dde7139142a0040a0eb44 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 10:17:28 -0400 Subject: [PATCH 205/376] Walk nodes tab --- salt/elasticsearch/init.sls | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 738f7928b..846bb63f9 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -176,6 +176,12 @@ so-elasticsearch: - user: elasticsearch - extra_hosts: - {{ grains.host }}:{{ NODEIP }} + {%- if ismanager %} + {%- if salt['pillar.get']('nodestab', {}) %} + {%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} + - {{ SN }}:{{ SNDATA.ip }} + {%- endif %} + {%- endif %} - environment: - discovery.type=single-node #- bootstrap.memory_lock=true From b84d7d818f430677ca69ae06a3300e5ce84766fa Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 10:20:02 -0400 Subject: [PATCH 206/376] Fix for loop --- salt/elasticsearch/init.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 846bb63f9..1df063ae6 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -180,6 +180,7 @@ so-elasticsearch: {%- if salt['pillar.get']('nodestab', {}) %} {%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} - {{ SN }}:{{ SNDATA.ip }} + {%- endfor %} {%- endif %} {%- endif %} - environment: From 7e0249c3772571609491fb94a80ccdc7e101904f Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 10:28:21 -0400 Subject: [PATCH 207/376] ES cleanup --- salt/elasticsearch/init.sls | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 1df063ae6..c93b6a900 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -185,12 +185,8 @@ so-elasticsearch: {%- endif %} - environment: - discovery.type=single-node - #- bootstrap.memory_lock=true - #- cluster.name={{ esclustername }} - ES_JAVA_OPTS=-Xms{{ esheap }} -Xmx{{ esheap }} - #- http.host=0.0.0.0 - #- transport.host=127.0.0.1 - - ulimits: + ulimits: - memlock=-1:-1 - nofile=65536:65536 - nproc=4096 From a5131da5c9e52cdb42834207b60c42059d8dacf2 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 11:07:34 -0400 Subject: [PATCH 208/376] fix ssl certs for SN --- salt/ssl/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 71daecfc6..0336ee84b 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -494,7 +494,7 @@ fleetkeyperms: {% endif %} -{% if grains['role'] in ['so-search', 'so-heavynode'] %} +{% if grains['role'] in ['so-node', 'so-heavynode'] %} # Create a cert for elasticsearch /etc/pki/elasticsearch.crt: x509.certificate_managed: From 32c407231ffdab96c012b92af98b2acbd86a711c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 11:08:49 -0400 Subject: [PATCH 209/376] fix ssl certs for SN --- salt/ssl/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 0336ee84b..0fabe832d 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -500,7 +500,7 @@ fleetkeyperms: x509.certificate_managed: - ca_server: {{ ca_server }} - signing_policy: registry - - public_key: /etc/pki/ealsticsearch.key + - public_key: /etc/pki/elasticsearch.key - CN: {{ manager }} - days_remaining: 0 - days_valid: 820 From cbba473c2d687638d7e96610ed64916bd72639e2 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 11:10:27 -0400 Subject: [PATCH 210/376] fix ssl certs for SN --- salt/ssl/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 0fabe832d..6751c4b15 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -515,7 +515,7 @@ miniokeyperms: - replace: False - name: /etc/pki/elasticsearch.key - mode: 640 - - group: 939 + - group: 930 /etc/pki/elasticsearch.key: x509.private_key_managed: From 05a05b5e9b391317acd280b3b1275f0d500daf0e Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 11:15:57 -0400 Subject: [PATCH 211/376] use hostname for cross cluster --- salt/utility/bin/crossthestreams | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/utility/bin/crossthestreams b/salt/utility/bin/crossthestreams index d21e3c1a4..6301a4f71 100644 --- a/salt/utility/bin/crossthestreams +++ b/salt/utility/bin/crossthestreams @@ -1,6 +1,8 @@ #!/bin/bash {% set ES = salt['pillar.get']('manager:mainip', '') %} {%- set MANAGER = salt['grains.get']('master') %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} + # Wait for ElasticSearch to come up, so that we can query for version infromation echo -n "Waiting for ElasticSearch..." @@ -35,6 +37,10 @@ echo "Applying cross cluster search config..." {%- if salt['pillar.get']('nodestab', {}) %} {%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} + {%- if FEATURES is sameas true %} +curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN }}:9300"]}}}}}' + {%- else %} curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SNDATA.ip }}:9300"]}}}}}' + {%- endif %} {%- endfor %} {%- endif %} From 348f7f39cc4ea84cec77ba4f925bef774d59f910 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 11:37:53 -0400 Subject: [PATCH 212/376] strip node suffix --- salt/elasticsearch/init.sls | 2 +- salt/utility/bin/crossthestreams | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index c93b6a900..28db606f1 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -179,7 +179,7 @@ so-elasticsearch: {%- if ismanager %} {%- if salt['pillar.get']('nodestab', {}) %} {%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} - - {{ SN }}:{{ SNDATA.ip }} + - {{ SN.split('_')|first }}:{{ SNDATA.ip }} {%- endfor %} {%- endif %} {%- endif %} diff --git a/salt/utility/bin/crossthestreams b/salt/utility/bin/crossthestreams index 6301a4f71..9c398ae6d 100644 --- a/salt/utility/bin/crossthestreams +++ b/salt/utility/bin/crossthestreams @@ -38,7 +38,7 @@ echo "Applying cross cluster search config..." {%- if salt['pillar.get']('nodestab', {}) %} {%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} {%- if FEATURES is sameas true %} -curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN }}:9300"]}}}}}' +curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN.split('_')|first }}": {"skip_unavailable": "true", "seeds": ["{{ SN }}:9300"]}}}}}' {%- else %} curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SNDATA.ip }}:9300"]}}}}}' {%- endif %} From 95367f8d236102e7c0bcd5738158022231dcaf3e Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 12:00:58 -0400 Subject: [PATCH 213/376] Fix cross cluster --- salt/utility/bin/crossthestreams | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utility/bin/crossthestreams b/salt/utility/bin/crossthestreams index 9c398ae6d..a057e261b 100644 --- a/salt/utility/bin/crossthestreams +++ b/salt/utility/bin/crossthestreams @@ -38,7 +38,7 @@ echo "Applying cross cluster search config..." {%- if salt['pillar.get']('nodestab', {}) %} {%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} {%- if FEATURES is sameas true %} -curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN.split('_')|first }}": {"skip_unavailable": "true", "seeds": ["{{ SN }}:9300"]}}}}}' +curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}' {%- else %} curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SNDATA.ip }}:9300"]}}}}}' {%- endif %} From f6a85ac852cf69e39405a7208bb2f983046fa5f6 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 11 Aug 2020 12:27:21 -0400 Subject: [PATCH 214/376] top and seed registry for importpcap node --- salt/top.sls | 18 ++++++++++++++++++ setup/so-functions | 17 ++++++++++++++++- setup/so-setup | 8 ++++---- 3 files changed, 38 insertions(+), 5 deletions(-) diff --git a/salt/top.sls b/salt/top.sls index 34b825355..316523f08 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -386,3 +386,21 @@ base: - fleet - fleet.install_package - filebeat + + '*_importpcap and G@saltversion:{{saltversion}}': + - match: compound + - ca + - ssl + - registry + - manager + - common + - nginx + - soc + - firewall + - suricata.manager + - elasticsearch + - kibana + - suricata + - filebeat + - utility + - schedule diff --git a/setup/so-functions b/setup/so-functions index 5ae4b7716..9bc2bacfd 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -718,6 +718,20 @@ docker_seed_registry() { local VERSION="$SOVERSION" if ! [ -f /nsm/docker-registry/docker/registry.tar ]; then + if [ "$install_type" == 'IMPORTPCAP' ]; then + local TRUSTED_CONTAINERS=(\ + "so-nginx:$VERSION" \ + "so-filebeat:$VERSION" \ + "so-suricata:$VERSION" \ + "so-soc:$VERSION" \ + "so-elasticsearch:$VERSION" \ + "so-kibana:$VERSION" \ + "so-kratos:$VERSION" \ + "so-suricata:$VERSION" \ + "so-registry:$VERSION" \ + "so-zeek:$VERSION" + ) + else local TRUSTED_CONTAINERS=(\ "so-nginx:$VERSION" \ "so-filebeat:$VERSION" \ @@ -729,7 +743,8 @@ docker_seed_registry() { "so-telegraf:$VERSION" \ "so-zeek:$VERSION" ) - if [ "$install_type" != 'HELIXSENSOR' ]; then + fi + if [ "$install_type" != 'HELIXSENSOR' ] && [ "$install_type" != 'IMPORTPCAP' ]; then TRUSTED_CONTAINERS=("${TRUSTED_CONTAINERS[@]}" \ "so-acng:$VERSION" \ "so-thehive-cortex:$VERSION" \ diff --git a/setup/so-setup b/setup/so-setup index cae5d1029..dd9e73b32 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -529,10 +529,10 @@ fi set_progress_str 26 'Downloading containers from the internet' fi - if [[ ! $is_importpcap ]]; then - salt-call state.apply -l info registry >> $setup_log 2>&1 - docker_seed_registry 2>> "$setup_log" # ~ 60% when finished - fi + + salt-call state.apply -l info registry >> $setup_log 2>&1 + docker_seed_registry 2>> "$setup_log" # ~ 60% when finished + set_progress_str 60 "$(print_salt_state_apply 'manager')" salt-call state.apply -l info manager >> $setup_log 2>&1 From ec62668eb74c5e9809b9a4b2ca812d4d2d837a66 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 11 Aug 2020 12:31:37 -0400 Subject: [PATCH 215/376] firewall rules for importpcap node --- salt/firewall/assigned_hostgroups.map.yaml | 3 +++ setup/so-functions | 2 +- setup/so-setup | 2 -- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/salt/firewall/assigned_hostgroups.map.yaml b/salt/firewall/assigned_hostgroups.map.yaml index 5cee13b10..fe7e12135 100644 --- a/salt/firewall/assigned_hostgroups.map.yaml +++ b/salt/firewall/assigned_hostgroups.map.yaml @@ -499,6 +499,9 @@ role: - {{ portgroups.influxdb }} - {{ portgroups.elasticsearch_rest }} - {{ portgroups.elasticsearch_node }} + minion: + portgroups: + - {{ portgroups.docker_registry }} sensor: portgroups: - {{ portgroups.beats_5044 }} diff --git a/setup/so-functions b/setup/so-functions index 9bc2bacfd..fd2e88516 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1568,7 +1568,7 @@ set_initial_firewall_policy() { $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost minion "$MAINIP" $default_salt_dir/pillar/data/addtotab.sh managertab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" ;; - 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE') + 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORTPCAP') $default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP" $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP" $default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP" diff --git a/setup/so-setup b/setup/so-setup index dd9e73b32..0994bad06 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -529,10 +529,8 @@ fi set_progress_str 26 'Downloading containers from the internet' fi - salt-call state.apply -l info registry >> $setup_log 2>&1 docker_seed_registry 2>> "$setup_log" # ~ 60% when finished - set_progress_str 60 "$(print_salt_state_apply 'manager')" salt-call state.apply -l info manager >> $setup_log 2>&1 From b95f8a9314a68e34fa2ec320fc0e1a31e77ff1a4 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 11 Aug 2020 12:57:57 -0400 Subject: [PATCH 216/376] Update Redis maxmemory settings --- salt/redis/etc/redis.conf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/redis/etc/redis.conf b/salt/redis/etc/redis.conf index cf43bc04c..7679a789e 100644 --- a/salt/redis/etc/redis.conf +++ b/salt/redis/etc/redis.conf @@ -858,7 +858,7 @@ acllog-max-len 128 # limit for maxmemory so that there is some free RAM on the system for replica # output buffers (but this is not needed if the policy is 'noeviction'). # -# maxmemory +maxmemory 817m # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory # is reached. You can select one from the following behaviors: @@ -889,7 +889,7 @@ acllog-max-len 128 # # The default is: # -# maxmemory-policy noeviction +maxmemory-policy noeviction # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated # algorithms (in order to save memory), so you can tune it for speed or @@ -1834,4 +1834,4 @@ jemalloc-bg-thread yes # aof_rewrite_cpulist 8-11 # # Set bgsave child process to cpu affinity 1,10,11 -# bgsave_cpulist 1,10-11 \ No newline at end of file +# bgsave_cpulist 1,10-11 From 362749ca85a77f21447fe42387bb57fe7063da3a Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 13:00:42 -0400 Subject: [PATCH 217/376] Make hostnames default in cross cluster --- salt/utility/bin/crossthestreams | 4 ---- 1 file changed, 4 deletions(-) diff --git a/salt/utility/bin/crossthestreams b/salt/utility/bin/crossthestreams index a057e261b..e67ce9f57 100644 --- a/salt/utility/bin/crossthestreams +++ b/salt/utility/bin/crossthestreams @@ -37,10 +37,6 @@ echo "Applying cross cluster search config..." {%- if salt['pillar.get']('nodestab', {}) %} {%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} - {%- if FEATURES is sameas true %} curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}' - {%- else %} -curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SNDATA.ip }}:9300"]}}}}}' - {%- endif %} {%- endfor %} {%- endif %} From ee914504243295c7c4db26e49236391d339c74ec Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 11 Aug 2020 13:30:41 -0400 Subject: [PATCH 218/376] fix patch schedule name for importpcap node --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 0994bad06..9a52cb64a 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -246,7 +246,7 @@ if [[ $is_node ]]; then fi if [[ $is_importpcap ]]; then - patch_schedule=Automatic + PATCHSCHEDULENAME=Automatic MTU=1500 RULESETUP=ETOPEN NSMSETUP=BASIC From 8daf11f085e2da3e309935bd81fa8bf50149667d Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 13:58:28 -0400 Subject: [PATCH 219/376] Fix logstash outputs --- .../logstash/pipelines/config/so/9000_output_zeek.conf.jinja | 5 ----- .../pipelines/config/so/9002_output_import.conf.jinja | 5 ----- .../logstash/pipelines/config/so/9004_output_flow.conf.jinja | 5 ----- .../pipelines/config/so/9033_output_snort.conf.jinja | 5 ----- .../pipelines/config/so/9034_output_syslog.conf.jinja | 5 ----- .../pipelines/config/so/9100_output_osquery.conf.jinja | 5 ----- .../pipelines/config/so/9200_output_firewall.conf.jinja | 5 ----- .../pipelines/config/so/9400_output_suricata.conf.jinja | 5 ----- .../pipelines/config/so/9500_output_beats.conf.jinja | 5 ----- .../pipelines/config/so/9600_output_ossec.conf.jinja | 5 ----- .../pipelines/config/so/9700_output_strelka.conf.jinja | 5 ----- 11 files changed, 55 deletions(-) diff --git a/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja b/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja index e075918f6..98a842b2d 100644 --- a/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja +++ b/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja @@ -8,12 +8,7 @@ output { if [module] =~ "zeek" and "import" not in [tags] { elasticsearch { pipeline => "%{module}.%{dataset}" - {%- if FEATURES is sameas true %} - hosts => "https://{{ ES }}" - cacert => '/ca/ca.crt' - {%- else %} hosts => "{{ ES }}" - {%- endif %} index => "so-zeek-%{+YYYY.MM.dd}" template_name => "so-zeek" template => "/templates/so-zeek-template.json" diff --git a/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja b/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja index ae0a619fe..315c892e2 100644 --- a/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja +++ b/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja @@ -8,12 +8,7 @@ output { if "import" in [tags] { elasticsearch { pipeline => "%{module}.%{dataset}" - {%- if FEATURES is sameas true %} - hosts => "https://{{ ES }}" - cacert => '/ca/ca.crt' - {%- else %} hosts => "{{ ES }}" - {%- endif %} index => "so-import-%{+YYYY.MM.dd}" template_name => "so-import" template => "/templates/so-import-template.json" diff --git a/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja b/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja index c888a9752..889a3567f 100644 --- a/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja +++ b/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja @@ -7,12 +7,7 @@ output { if [event_type] == "sflow" { elasticsearch { - {%- if FEATURES is sameas true %} - hosts => "https://{{ ES }}" - cacert => '/ca/ca.crt' - {%- else %} hosts => "{{ ES }}" - {%- endif %} index => "so-flow-%{+YYYY.MM.dd}" template_name => "so-flow" template => "/templates/so-flow-template.json" diff --git a/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja b/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja index daddd4b0a..96d2ae5ba 100644 --- a/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja +++ b/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja @@ -7,12 +7,7 @@ output { if [event_type] == "ids" and "import" not in [tags] { elasticsearch { - {%- if FEATURES is sameas true %} - hosts => "https://{{ ES }}" - cacert => '/ca/ca.crt' - {%- else %} hosts => "{{ ES }}" - {%- endif %} index => "so-ids-%{+YYYY.MM.dd}" template_name => "so-ids" template => "/templates/so-ids-template.json" diff --git a/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja b/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja index d554adf16..ee5c57c5a 100644 --- a/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja +++ b/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja @@ -8,12 +8,7 @@ output { if [module] =~ "syslog" { elasticsearch { pipeline => "%{module}" - {%- if FEATURES is sameas true %} - hosts => "https://{{ ES }}" - cacert => '/ca/ca.crt' - {%- else %} hosts => "{{ ES }}" - {%- endif %} index => "so-syslog-%{+YYYY.MM.dd}" template_name => "so-syslog" template => "/templates/so-syslog-template.json" diff --git a/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja b/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja index c1e6ae59f..a9e5ac64d 100644 --- a/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja +++ b/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja @@ -8,12 +8,7 @@ output { if [module] =~ "osquery" { elasticsearch { pipeline => "%{module}.%{dataset}" - {%- if FEATURES is sameas true %} - hosts => "https://{{ ES }}" - cacert => '/ca/ca.crt' - {%- else %} hosts => "{{ ES }}" - {%- endif %} index => "so-osquery-%{+YYYY.MM.dd}" template_name => "so-osquery" template => "/templates/so-osquery-template.json" diff --git a/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja b/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja index 14e741b9d..f8aa07b1b 100644 --- a/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja +++ b/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja @@ -7,12 +7,7 @@ output { if "firewall" in [tags] { elasticsearch { - {%- if FEATURES is sameas true %} - hosts => "https://{{ ES }}" - cacert => '/ca/ca.crt' - {%- else %} hosts => "{{ ES }}" - {%- endif %} index => "so-firewall-%{+YYYY.MM.dd}" template_name => "so-firewall" template => "/templates/so-firewall-template.json" diff --git a/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja b/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja index a684e2412..e65952cca 100644 --- a/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja +++ b/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja @@ -8,12 +8,7 @@ output { if [module] =~ "suricata" and "import" not in [tags] { elasticsearch { pipeline => "%{module}.%{dataset}" - {%- if FEATURES is sameas true %} - hosts => "https://{{ ES }}" - cacert => '/ca/ca.crt' - {%- else %} hosts => "{{ ES }}" - {%- endif %} index => "so-ids-%{+YYYY.MM.dd}" template_name => "so-ids" template => "/templates/so-ids-template.json" diff --git a/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja b/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja index 321566bac..10700733e 100644 --- a/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja +++ b/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja @@ -8,12 +8,7 @@ output { if "beat-ext" in [tags] and "import" not in [tags] { elasticsearch { pipeline => "beats.common" - {%- if FEATURES is sameas true %} - hosts => "https://{{ ES }}" - cacert => '/ca/ca.crt' - {%- else %} hosts => "{{ ES }}" - {%- endif %} index => "so-beats-%{+YYYY.MM.dd}" template_name => "so-beats" template => "/templates/so-beats-template.json" diff --git a/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja b/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja index 4af0839c4..89d1a9466 100644 --- a/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja +++ b/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja @@ -8,12 +8,7 @@ output { if [module] =~ "ossec" { elasticsearch { pipeline => "%{module}.%{dataset}" - {%- if FEATURES is sameas true %} - hosts => "https://{{ ES }}" - cacert => '/ca/ca.crt' - {%- else %} hosts => "{{ ES }}" - {%- endif %} index => "so-ossec-%{+YYYY.MM.dd}" template_name => "so-ossec" template => "/templates/so-ossec-template.json" diff --git a/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja b/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja index a0e9950de..cdc340b39 100644 --- a/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja +++ b/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja @@ -8,12 +8,7 @@ output { if [module] =~ "strelka" { elasticsearch { pipeline => "%{module}.%{dataset}" - {%- if FEATURES is sameas true %} - hosts => "https://{{ ES }}" - cacert => '/ca/ca.crt' - {%- else %} hosts => "{{ ES }}" - {%- endif %} index => "so-strelka-%{+YYYY.MM.dd}" template_name => "so-strelka" template => "/templates/so-strelka-template.json" From f553a8e27aaae8067e0d9f7f0a7d73abf802a6f1 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 14:40:34 -0400 Subject: [PATCH 220/376] anon user hack --- salt/elasticsearch/files/elasticsearch.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml index 625d8c8d9..54b1d9a94 100644 --- a/salt/elasticsearch/files/elasticsearch.yml +++ b/salt/elasticsearch/files/elasticsearch.yml @@ -24,7 +24,7 @@ cluster.routing.allocation.disk.watermark.low: 95% cluster.routing.allocation.disk.watermark.high: 98% cluster.routing.allocation.disk.watermark.flood_stage: 98% {%- if FEATURES is sameas true %} -xpack.security.enabled: false +xpack.security.enabled: true xpack.security.http.ssl.enabled: false xpack.security.transport.ssl.enabled: true xpack.security.http.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key @@ -33,6 +33,11 @@ xpack.security.http.ssl.certificate_authorities: /usr/share/elasticsearch/config xpack.security.transport.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt xpack.security.transport.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt +xpack.security.authc: + anonymous: + username: anonymous_user + roles: elasticsearch + authz_exception: true {%- endif %} node.attr.box_type: {{ NODE_ROUTE_TYPE }} node.name: {{ ESCLUSTERNAME }} From 42c9653669752b4af5f9dc7707c622a5712aea55 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 14:45:55 -0400 Subject: [PATCH 221/376] anon user hack --- salt/elasticsearch/files/elasticsearch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml index 54b1d9a94..c1052035a 100644 --- a/salt/elasticsearch/files/elasticsearch.yml +++ b/salt/elasticsearch/files/elasticsearch.yml @@ -36,7 +36,7 @@ xpack.security.transport.ssl.certificate_authorities: /usr/share/elasticsearch/c xpack.security.authc: anonymous: username: anonymous_user - roles: elasticsearch + roles: superuser authz_exception: true {%- endif %} node.attr.box_type: {{ NODE_ROUTE_TYPE }} From a81d14463cf46dd789abadbc3e3e8a3416906925 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 11 Aug 2020 15:01:20 -0400 Subject: [PATCH 222/376] add logstash to registry for importpcap, change PATCHSCHEDULENAME=auto --- setup/so-functions | 1 + setup/so-setup | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index fd2e88516..d9f00d42c 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -720,6 +720,7 @@ docker_seed_registry() { if ! [ -f /nsm/docker-registry/docker/registry.tar ]; then if [ "$install_type" == 'IMPORTPCAP' ]; then local TRUSTED_CONTAINERS=(\ + "so-logstash:$VERSION" \ "so-nginx:$VERSION" \ "so-filebeat:$VERSION" \ "so-suricata:$VERSION" \ diff --git a/setup/so-setup b/setup/so-setup index 48153becb..3bdc82c0e 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -244,7 +244,7 @@ if [[ $is_node ]]; then fi if [[ $is_importpcap ]]; then - PATCHSCHEDULENAME=Automatic + PATCHSCHEDULENAME=auto MTU=1500 RULESETUP=ETOPEN NSMSETUP=BASIC From b724d40376ce9454e3e9315017fb6362e8157309 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Tue, 11 Aug 2020 15:07:16 -0400 Subject: [PATCH 223/376] Playbook Stability Fixes --- salt/elastalert/files/elastalert_config.yaml | 2 +- salt/soctopus/files/templates/generic.template | 4 ++-- salt/soctopus/files/templates/osquery.template | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/elastalert/files/elastalert_config.yaml b/salt/elastalert/files/elastalert_config.yaml index ba2b79448..c3e60c78e 100644 --- a/salt/elastalert/files/elastalert_config.yaml +++ b/salt/elastalert/files/elastalert_config.yaml @@ -16,7 +16,7 @@ disable_rules_on_error: false # How often ElastAlert will query Elasticsearch # The unit can be anything from weeks to seconds run_every: - minutes: 1 + minutes: 3 # ElastAlert will buffer results from the most recent # period of time, in case some log sources are not in real time diff --git a/salt/soctopus/files/templates/generic.template b/salt/soctopus/files/templates/generic.template index 1f56bc134..e93bc30f8 100644 --- a/salt/soctopus/files/templates/generic.template +++ b/salt/soctopus/files/templates/generic.template @@ -1,4 +1,4 @@ -{% set es = salt['pillar.get']('global:managerip', '') %} +{% set es = salt['pillar.get']('manager:url_base', '') %} {% set hivehost = salt['pillar.get']('global:managerip', '') %} {% set hivekey = salt['pillar.get']('global:hivekey', '') %} alert: @@ -15,7 +15,7 @@ hive_proxies: https: '' hive_alert_config: - title: '{rule[name]} - ' + title: "{rule[name]} - " type: 'playbook' source: 'SecurityOnion' description: "`Play:` https://{{es}}/playbook/issues/6000 \n\n `View Event:` \n\n `Raw Data:` {match[message]}" diff --git a/salt/soctopus/files/templates/osquery.template b/salt/soctopus/files/templates/osquery.template index 44214afa3..de1d1cf0c 100644 --- a/salt/soctopus/files/templates/osquery.template +++ b/salt/soctopus/files/templates/osquery.template @@ -1,4 +1,4 @@ -{% set es = salt['pillar.get']('global:managerip', '') %} +{% set es = salt['pillar.get']('manager:url_base', '') %} {% set hivehost = salt['pillar.get']('global:managerip', '') %} {% set hivekey = salt['pillar.get']('global:hivekey', '') %} alert: @@ -21,7 +21,7 @@ hive_observable_data_mapping: - other: '{match[osquery][hostname]}' hive_alert_config: - title: '{rule[name]} -- {match[osquery][hostname]} -- {match[osquery][name]}' + title: "{rule[name]} -- {match[osquery][hostname]} -- {match[osquery][name]}" type: 'osquery' source: 'SecurityOnion' description: "`Play:` https://{{es}}/playbook/issues/6000 \n\n `View Event:` \n\n `Hostname:` __{match[osquery][hostname]}__ `Live Query:`__[Pivot Link](https://{{es}}/fleet/queries/new?host_uuids={match[osquery][LiveQuery]})__ `Pack:` __{match[osquery][name]}__ `Data:` {match[osquery][columns]}" From 5f30c947c95fc3df00559731cace19ffd1297652 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 15:12:23 -0400 Subject: [PATCH 224/376] SSL intraca --- salt/elasticsearch/init.sls | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 28db606f1..6aa1257bf 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -200,9 +200,13 @@ so-elasticsearch: - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro {%- if FEATURES is sameas true %} + {%- if grains['role'] in ['so-node','so-heavynode'] %} + - /etc/ssl/certs/intca.crt:/usr/share/elasticsearch/config/ca.crt:ro + {%- else %} - /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro - /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro - /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro + {%- endif %} {%- endif %} - watch: - file: cacertz From e8b61a3828a3f91e6f05bc91c00d59c80a79dc6a Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 15:14:29 -0400 Subject: [PATCH 225/376] SSL intraca --- salt/elasticsearch/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 6aa1257bf..66bd0ec21 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -204,9 +204,9 @@ so-elasticsearch: - /etc/ssl/certs/intca.crt:/usr/share/elasticsearch/config/ca.crt:ro {%- else %} - /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro + {%- endif %} - /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro - /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro - {%- endif %} {%- endif %} - watch: - file: cacertz From a817465318bc39688997633988a0b919b9c70050 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 15:25:09 -0400 Subject: [PATCH 226/376] SSL intraca --- salt/ssl/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 6751c4b15..6d8674c92 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -501,7 +501,7 @@ fleetkeyperms: - ca_server: {{ ca_server }} - signing_policy: registry - public_key: /etc/pki/elasticsearch.key - - CN: {{ manager }} + - CN: {{ HOSTNAME }} - days_remaining: 0 - days_valid: 820 - backup: True From 5a0aae5fe7ce53f598d98c01c8c8b1f4ba0d1d6f Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 15:34:07 -0400 Subject: [PATCH 227/376] SSL intraca --- salt/ssl/init.sls | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 6d8674c92..a0cade9f6 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -496,6 +496,18 @@ fleetkeyperms: {% if grains['role'] in ['so-node', 'so-heavynode'] %} # Create a cert for elasticsearch +/etc/pki/elasticsearch.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%} + - prereq: + - x509: /etc/pki/elasticsearch.crt + /etc/pki/elasticsearch.crt: x509.certificate_managed: - ca_server: {{ ca_server }} @@ -516,17 +528,5 @@ miniokeyperms: - name: /etc/pki/elasticsearch.key - mode: 640 - group: 930 - -/etc/pki/elasticsearch.key: - x509.private_key_managed: - - CN: {{ manager }} - - bits: 4096 - - days_remaining: 0 - - days_valid: 820 - - backup: True - - new: True - {% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%} - - prereq: - - x509: /etc/pki/elasticsearch.crt {%- endif %} {%- endif %} \ No newline at end of file From f8621333239bc70dace93928ea80241af9325153 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 15:37:55 -0400 Subject: [PATCH 228/376] SSL intraca --- salt/elasticsearch/init.sls | 4 ---- 1 file changed, 4 deletions(-) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 66bd0ec21..28db606f1 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -200,11 +200,7 @@ so-elasticsearch: - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro {%- if FEATURES is sameas true %} - {%- if grains['role'] in ['so-node','so-heavynode'] %} - - /etc/ssl/certs/intca.crt:/usr/share/elasticsearch/config/ca.crt:ro - {%- else %} - /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro - {%- endif %} - /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro - /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro {%- endif %} From 65d535d893f1ec4081c633e1811f7ca4c9532b05 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 15:45:17 -0400 Subject: [PATCH 229/376] SSL intraca --- salt/elasticsearch/files/elasticsearch.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml index c1052035a..1398e03a0 100644 --- a/salt/elasticsearch/files/elasticsearch.yml +++ b/salt/elasticsearch/files/elasticsearch.yml @@ -33,6 +33,7 @@ xpack.security.http.ssl.certificate_authorities: /usr/share/elasticsearch/config xpack.security.transport.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt xpack.security.transport.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt +ssl.verification_mode: none xpack.security.authc: anonymous: username: anonymous_user From 0f7074a4997b792b248dd9747ac8a6b529b09aef Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 11 Aug 2020 15:49:04 -0400 Subject: [PATCH 230/376] SSL intraca --- salt/elasticsearch/files/elasticsearch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml index 1398e03a0..6f49c9584 100644 --- a/salt/elasticsearch/files/elasticsearch.yml +++ b/salt/elasticsearch/files/elasticsearch.yml @@ -33,7 +33,7 @@ xpack.security.http.ssl.certificate_authorities: /usr/share/elasticsearch/config xpack.security.transport.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt xpack.security.transport.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt -ssl.verification_mode: none +xpack.security.http.ssl.client_authentication: none xpack.security.authc: anonymous: username: anonymous_user From de054032379a490f871f375e9e0f254067871516 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 11 Aug 2020 15:52:15 -0400 Subject: [PATCH 231/376] ensure nids rules dir exists --- salt/suricata/manager.sls | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/salt/suricata/manager.sls b/salt/suricata/manager.sls index 5998a484b..e287069cb 100644 --- a/salt/suricata/manager.sls +++ b/salt/suricata/manager.sls @@ -1,3 +1,10 @@ +nidsrulesdir: + file.directory: + - name: /opt/so/rules/nids + - user: 939 + - group: 939 + - makedirs: True + surilocaldir: file.directory: - name: /opt/so/saltstack/local/salt/suricata From 53b4a73bb902f508d15baf3a7d1d5497b7e029a4 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 11 Aug 2020 15:59:08 -0400 Subject: [PATCH 232/376] add idstools to importpcap node --- salt/suricata/manager.sls | 7 ------- salt/top.sls | 1 + setup/so-setup | 6 ++---- 3 files changed, 3 insertions(+), 11 deletions(-) diff --git a/salt/suricata/manager.sls b/salt/suricata/manager.sls index e287069cb..5998a484b 100644 --- a/salt/suricata/manager.sls +++ b/salt/suricata/manager.sls @@ -1,10 +1,3 @@ -nidsrulesdir: - file.directory: - - name: /opt/so/rules/nids - - user: 939 - - group: 939 - - makedirs: True - surilocaldir: file.directory: - name: /opt/so/saltstack/local/salt/suricata diff --git a/salt/top.sls b/salt/top.sls index 316523f08..509f6b2c2 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -397,6 +397,7 @@ base: - nginx - soc - firewall + - idstools - suricata.manager - elasticsearch - kibana diff --git a/setup/so-setup b/setup/so-setup index 3bdc82c0e..d9a13210c 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -533,10 +533,8 @@ fi set_progress_str 60 "$(print_salt_state_apply 'manager')" salt-call state.apply -l info manager >> $setup_log 2>&1 - if [[ ! $is_importpcap ]]; then - set_progress_str 61 "$(print_salt_state_apply 'idstools')" - salt-call state.apply -l info idstools >> $setup_log 2>&1 - fi + set_progress_str 61 "$(print_salt_state_apply 'idstools')" + salt-call state.apply -l info idstools >> $setup_log 2>&1 set_progress_str 61 "$(print_salt_state_apply 'suricata.manager')" salt-call state.apply -l info suricata.manager >> $setup_log 2>&1 From 6260a0aeaaba73099fdafeb0978e92a761c7393d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 11 Aug 2020 16:29:35 -0400 Subject: [PATCH 233/376] add idstools to docker registry for importpcap node --- setup/so-functions | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index d9f00d42c..dda15b65e 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -720,7 +720,8 @@ docker_seed_registry() { if ! [ -f /nsm/docker-registry/docker/registry.tar ]; then if [ "$install_type" == 'IMPORTPCAP' ]; then local TRUSTED_CONTAINERS=(\ - "so-logstash:$VERSION" \ + "so-logstash:$VERSION" \ + "so-idstools:$VERSION" \ "so-nginx:$VERSION" \ "so-filebeat:$VERSION" \ "so-suricata:$VERSION" \ From 5a0df2719311303b854fe9fe79afb5d21375422f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 12 Aug 2020 10:27:15 -0400 Subject: [PATCH 234/376] rename importpcap node to import --- .../assigned_hostgroups.local.map.yaml | 2 +- pillar/top.sls | 4 +- ...{importpcap.map.jinja => import.map.jinja} | 4 +- salt/elasticsearch/init.sls | 2 +- salt/firewall/assigned_hostgroups.map.yaml | 2 +- salt/{importpcap => import}/bond.sls | 0 salt/ssl/init.sls | 6 +-- salt/top.sls | 2 +- setup/so-functions | 28 +++++------ setup/so-setup | 46 +++++++++---------- setup/so-whiptail | 2 +- 11 files changed, 48 insertions(+), 50 deletions(-) rename salt/common/maps/{importpcap.map.jinja => import.map.jinja} (74%) rename salt/{importpcap => import}/bond.sls (100%) diff --git a/files/firewall/assigned_hostgroups.local.map.yaml b/files/firewall/assigned_hostgroups.local.map.yaml index b39d34ae7..50ef751a4 100644 --- a/files/firewall/assigned_hostgroups.local.map.yaml +++ b/files/firewall/assigned_hostgroups.local.map.yaml @@ -13,7 +13,7 @@ role: fleet: heavynode: helixsensor: - importpcap: + import: manager: managersearch: standalone: diff --git a/pillar/top.sls b/pillar/top.sls index 44f56edc5..73d66ef2a 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -2,7 +2,7 @@ base: '*': - patch.needs_restarting - '*_eval or *_helix or *_heavynode or *_sensor or *_standalone or *_importpcap': + '*_eval or *_helix or *_heavynode or *_sensor or *_standalone or *_import': - match: compound - zeek @@ -81,7 +81,7 @@ base: - elasticsearch.search - minions.{{ grains.id }} - '*_importpcap': + '*_import': - zeeklogs - secrets - elasticsearch.eval diff --git a/salt/common/maps/importpcap.map.jinja b/salt/common/maps/import.map.jinja similarity index 74% rename from salt/common/maps/importpcap.map.jinja rename to salt/common/maps/import.map.jinja index f412a030e..adb266809 100644 --- a/salt/common/maps/importpcap.map.jinja +++ b/salt/common/maps/import.map.jinja @@ -7,8 +7,6 @@ 'so-elasticsearch', 'so-kibana', 'so-suricata', - 'so-zeek', - 'so-soctopus', - 'so-sensoroni' + 'so-zeek' ] } %} \ No newline at end of file diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 0f92a5d9c..48c4c99aa 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -23,7 +23,7 @@ {% set FEATURES = '' %} {% endif %} -{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone', 'so-importpcap'] %} +{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone', 'so-import'] %} {% set esclustername = salt['pillar.get']('manager:esclustername', '') %} {% set esheap = salt['pillar.get']('manager:esheap', '') %} {% set ismanager = True %} diff --git a/salt/firewall/assigned_hostgroups.map.yaml b/salt/firewall/assigned_hostgroups.map.yaml index fe7e12135..ef9e6fe0c 100644 --- a/salt/firewall/assigned_hostgroups.map.yaml +++ b/salt/firewall/assigned_hostgroups.map.yaml @@ -488,7 +488,7 @@ role: localhost: portgroups: - {{ portgroups.all }} - importpcap: + import: chain: DOCKER-USER: hostgroups: diff --git a/salt/importpcap/bond.sls b/salt/import/bond.sls similarity index 100% rename from salt/importpcap/bond.sls rename to salt/import/bond.sls diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index af9495e59..c0b48dd1f 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -7,7 +7,7 @@ {% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %} {% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %} -{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone', 'importpcap'] %} +{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone', 'import'] %} {% set trusttheca_text = salt['mine.get'](grains.id, 'x509.get_pem_entries')[grains.id]['/etc/pki/ca.crt']|replace('\n', '') %} {% set ca_server = grains.id %} {% else %} @@ -72,7 +72,7 @@ influxkeyperms: - mode: 640 - group: 939 -{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-importpcap'] %} +{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import'] %} /etc/pki/filebeat.key: x509.private_key_managed: @@ -322,7 +322,7 @@ fleetkeyperms: - group: 939 {% endif %} -{% if grains['role'] in ['so-sensor', 'so-manager', 'so-node', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-importpcap'] %} +{% if grains['role'] in ['so-sensor', 'so-manager', 'so-node', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-import'] %} fbcertdir: file.directory: diff --git a/salt/top.sls b/salt/top.sls index 509f6b2c2..a51978b1e 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -387,7 +387,7 @@ base: - fleet.install_package - filebeat - '*_importpcap and G@saltversion:{{saltversion}}': + '*_import and G@saltversion:{{saltversion}}': - match: compound - ca - ssl diff --git a/setup/so-functions b/setup/so-functions index dda15b65e..26d80c98a 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -302,7 +302,7 @@ configure_minion() { 'helix') echo "master: $HOSTNAME" >> "$minion_config" ;; - 'manager' | 'eval' | 'managersearch' | 'standalone' | 'importpcap') + 'manager' | 'eval' | 'managersearch' | 'standalone' | 'import') printf '%s\n'\ "master: $HOSTNAME"\ "mysql.host: '$MAINIP'"\ @@ -358,7 +358,7 @@ check_requirements() { req_cores=4 if [[ "$node_type" == 'sensor' ]]; then req_nics=2; else req_nics=1; fi if [[ "$node_type" == 'fleet' ]]; then req_mem=4; fi - elif [[ "$standalone_or_dist" == 'importpcap' ]]; then + elif [[ "$standalone_or_dist" == 'import' ]]; then req_mem=4 req_cores=2 req_nics=1 @@ -366,7 +366,7 @@ check_requirements() { if [[ $setup_type == 'network' ]] ; then if [[ -n $nsm_mount ]]; then - if [[ "$standalone_or_dist" == 'importpcap' ]]; then + if [[ "$standalone_or_dist" == 'import' ]]; then req_storage=50 else req_storage=100 @@ -378,7 +378,7 @@ check_requirements() { whiptail_storage_requirements "/nsm" "${free_space_nsm} GB" "${req_storage} GB" fi else - if [[ "$standalone_or_dist" == 'importpcap' ]]; then + if [[ "$standalone_or_dist" == 'import' ]]; then req_storage=50 else req_storage=200 @@ -417,7 +417,7 @@ copy_salt_master_config() { copy_minion_tmp_files() { case "$install_type" in - 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORTPCAP') + 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') echo "Copying pillar and salt files in $temp_install_dir to $local_salt_dir" cp -Rv "$temp_install_dir"/pillar/ $local_salt_dir/ >> "$setup_log" 2>&1 if [ -d "$temp_install_dir"/salt ] ; then @@ -674,7 +674,7 @@ docker_install() { else case "$install_type" in - 'MANAGER' | 'EVAL' | 'STANDALONE' | 'MANAGERSEARCH' | 'IMPORTPCAP') + 'MANAGER' | 'EVAL' | 'STANDALONE' | 'MANAGERSEARCH' | 'IMPORT') apt-get update >> "$setup_log" 2>&1 ;; *) @@ -718,7 +718,7 @@ docker_seed_registry() { local VERSION="$SOVERSION" if ! [ -f /nsm/docker-registry/docker/registry.tar ]; then - if [ "$install_type" == 'IMPORTPCAP' ]; then + if [ "$install_type" == 'IMPORT' ]; then local TRUSTED_CONTAINERS=(\ "so-logstash:$VERSION" \ "so-idstools:$VERSION" \ @@ -746,7 +746,7 @@ docker_seed_registry() { "so-zeek:$VERSION" ) fi - if [ "$install_type" != 'HELIXSENSOR' ] && [ "$install_type" != 'IMPORTPCAP' ]; then + if [ "$install_type" != 'HELIXSENSOR' ] && [ "$install_type" != 'IMPORT' ]; then TRUSTED_CONTAINERS=("${TRUSTED_CONTAINERS[@]}" \ "so-acng:$VERSION" \ "so-thehive-cortex:$VERSION" \ @@ -874,7 +874,7 @@ got_root() { get_minion_type() { local minion_type case "$install_type" in - 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'FLEET' | 'STANDALONE' | 'IMPORTPCAP') + 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'FLEET' | 'STANDALONE' | 'IMPORT') minion_type=$(echo "$install_type" | tr '[:upper:]' '[:lower:]') ;; 'HELIXSENSOR') @@ -1246,7 +1246,7 @@ saltify() { set_progress_str 6 'Installing various dependencies' yum -y install wget nmap-ncat >> "$setup_log" 2>&1 case "$install_type" in - 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORTPCAP') + 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORT') reserve_group_ids >> "$setup_log" 2>&1 yum -y install epel-release >> "$setup_log" 2>&1 yum -y install sqlite argon2 curl mariadb-devel >> "$setup_log" 2>&1 @@ -1317,7 +1317,7 @@ saltify() { 'FLEET') if [ "$OSVER" != 'xenial' ]; then apt-get -y install python3-mysqldb >> "$setup_log" 2>&1; else apt-get -y install python-mysqldb >> "$setup_log" 2>&1; fi ;; - 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORTPCAP') # TODO: should this also be HELIXSENSOR? + 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') # TODO: should this also be HELIXSENSOR? # Add saltstack repo(s) wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3001.1/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1 @@ -1374,7 +1374,7 @@ saltify() { salt_checkin() { case "$install_type" in - 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORTPCAP') # Fix Mine usage + 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') # Fix Mine usage { echo "Building Certificate Authority"; salt-call state.apply ca; @@ -1538,7 +1538,7 @@ set_hostname() { set_hostname_iso - if [[ ! $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE|IMPORTPCAP)$ ]]; then + if [[ ! $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE|IMPORT)$ ]]; then if ! getent hosts "$MSRV"; then echo "$MSRVIP $MSRV" >> /etc/hosts fi @@ -1570,7 +1570,7 @@ set_initial_firewall_policy() { $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost minion "$MAINIP" $default_salt_dir/pillar/data/addtotab.sh managertab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" ;; - 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORTPCAP') + 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') $default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP" $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP" $default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP" diff --git a/setup/so-setup b/setup/so-setup index d9a13210c..4a8cf259e 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -163,8 +163,8 @@ elif [ "$install_type" = 'FLEET' ]; then OSQUERY=1 elif [ "$install_type" = 'HELIXSENSOR' ]; then is_helix=true -elif [ "$install_type" = 'IMPORTPCAP' ]; then - is_importpcap=true +elif [ "$install_type" = 'IMPORT' ]; then + is_import=true fi if [[ $is_manager && $is_sensor ]]; then @@ -173,13 +173,13 @@ elif [[ $is_fleet_standalone ]]; then check_requirements "dist" "fleet" elif [[ $is_sensor && ! $is_eval ]]; then check_requirements "dist" "sensor" -elif [[ $is_distmanager || $is_minion ]] && [[ ! $is_importpcap ]]; then +elif [[ $is_distmanager || $is_minion ]] && [[ ! $is_import ]]; then check_requirements "dist" -elif [[ $is_importpcap ]]; then - check_requirements "importpcap" +elif [[ $is_import ]]; then + check_requirements "import" fi -if [[ ! $is_importpcap ]]; then +if [[ ! $is_import ]]; then whiptail_patch_schedule fi @@ -243,7 +243,7 @@ if [[ $is_node ]]; then CURCLOSEDAYS=30 fi -if [[ $is_importpcap ]]; then +if [[ $is_import ]]; then PATCHSCHEDULENAME=auto MTU=1500 RULESETUP=ETOPEN @@ -271,11 +271,11 @@ if [[ $is_helix || $is_sensor ]]; then whiptail_sensor_nics fi -if [[ $is_helix || $is_sensor || $is_importpcap ]]; then +if [[ $is_helix || $is_sensor || $is_import ]]; then calculate_useable_cores fi -if [[ $is_helix || $is_manager || $is_importpcap ]]; then +if [[ $is_helix || $is_manager || $is_import ]]; then whiptail_homenet_manager fi @@ -306,7 +306,7 @@ if [[ $is_manager ]]; then fi fi -if [[ $is_manager || $is_importpcap ]]; then +if [[ $is_manager || $is_import ]]; then collect_webuser_inputs get_redirect fi @@ -368,7 +368,7 @@ else FLEETNODEPASSWD1=$WEBPASSWD1 fi -if [[ $is_manager || $is_importpcap ]]; then whiptail_so_allow; fi +if [[ $is_manager || $is_import ]]; then whiptail_so_allow; fi whiptail_make_changes @@ -392,7 +392,7 @@ fi } >> $setup_log 2>&1 -if [[ $is_manager || $is_importpcap ]]; then +if [[ $is_manager || $is_import ]]; then { generate_passwords; secrets_pillar; @@ -411,7 +411,7 @@ fi host_pillar >> $setup_log 2>&1 -if [[ $is_minion || $is_importpcap ]]; then +if [[ $is_minion || $is_import ]]; then set_updates >> $setup_log 2>&1 copy_ssh_key >> $setup_log 2>&1 fi @@ -434,7 +434,7 @@ fi configure_network_sensor >> $setup_log 2>&1 fi - if [[ $is_sensor || $is_helix || $is_importpcap ]]; then + if [[ $is_sensor || $is_helix || $is_import ]]; then set_progress_str 4 'Generating sensor pillar' sensor_pillar >> $setup_log 2>&1 fi @@ -451,7 +451,7 @@ fi set_progress_str 9 'Initializing Salt minion' configure_minion "$minion_type" >> $setup_log 2>&1 - if [[ $is_manager || $is_helix || $is_importpcap ]]; then + if [[ $is_manager || $is_helix || $is_import ]]; then set_progress_str 10 'Configuring Salt master' { create_local_directories; @@ -496,7 +496,7 @@ fi accept_salt_key_remote >> $setup_log 2>&1 fi - if [[ $is_manager || $is_importpcap ]]; then + if [[ $is_manager || $is_import ]]; then set_progress_str 20 'Accepting Salt key' salt-key -ya "$MINION_ID" >> $setup_log 2>&1 fi @@ -509,15 +509,15 @@ fi salt-call state.apply salt.minion -l info >> $setup_log 2>&1 fi - if [[ $is_importpcap ]]; then + if [[ $is_import ]]; then set_progress_str 22 'Configuring bond interface' - salt-call state.apply importpcap.bond -l info >> $setup_log 2>&1 + salt-call state.apply import.bond -l info >> $setup_log 2>&1 fi set_progress_str 23 'Generating CA and checking in' salt_checkin >> $setup_log 2>&1 - if [[ $is_manager || $is_helix || $is_importpcap ]]; then + if [[ $is_manager || $is_helix || $is_import ]]; then set_progress_str 25 'Configuring firewall' set_initial_firewall_policy >> $setup_log 2>&1 @@ -555,7 +555,7 @@ fi set_progress_str 64 "$(print_salt_state_apply 'nginx')" salt-call state.apply -l info nginx >> $setup_log 2>&1 - if [[ $is_manager || $is_node || $is_importpcap ]]; then + if [[ $is_manager || $is_node || $is_import ]]; then set_progress_str 64 "$(print_salt_state_apply 'elasticsearch')" salt-call state.apply -l info elasticsearch >> $setup_log 2>&1 fi @@ -565,7 +565,7 @@ fi salt-call state.apply -l info pcap >> $setup_log 2>&1 fi - if [[ $is_sensor || $is_importpcap ]]; then + if [[ $is_sensor || $is_import ]]; then set_progress_str 66 "$(print_salt_state_apply 'suricata')" salt-call state.apply -l info suricata >> $setup_log 2>&1 @@ -578,7 +578,7 @@ fi salt-call state.apply -l info curator >> $setup_log 2>&1 fi - if [[ $is_manager || $is_importpcap ]]; then + if [[ $is_manager || $is_import ]]; then set_progress_str 69 "$(print_salt_state_apply 'soc')" salt-call state.apply -l info soc >> $setup_log 2>&1 @@ -644,7 +644,7 @@ fi fi fi - if [[ $is_manager || $is_helix || $is_importpcap ]]; then + if [[ $is_manager || $is_helix || $is_import ]]; then set_progress_str 81 "$(print_salt_state_apply 'utility')" salt-call state.apply -l info utility >> $setup_log 2>&1 fi diff --git a/setup/so-whiptail b/setup/so-whiptail index 264390d30..f84090e4f 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -475,7 +475,7 @@ whiptail_install_type() { "EVAL" "Evaluation mode (not for production) " ON \ "STANDALONE" "Standalone production install " OFF \ "DISTRIBUTED" "Distributed install submenu " OFF \ - "IMPORTPCAP" "Import PCAP mode " OFF \ + "IMPORT" "Standalone to import PCAP or log files " OFF \ 3>&1 1>&2 2>&3 ) From 0f53b4d7038a591a04e8e0b4809176e7c0c42541 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 12 Aug 2020 10:39:31 -0400 Subject: [PATCH 235/376] set esheapsize and filebeat config for import node --- salt/filebeat/etc/filebeat.yml | 4 ++-- setup/so-setup | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml index 2b8a4118f..a4525b494 100644 --- a/salt/filebeat/etc/filebeat.yml +++ b/salt/filebeat/etc/filebeat.yml @@ -74,7 +74,7 @@ filebeat.modules: # List of prospectors to fetch data. filebeat.inputs: #------------------------------ Log prospector -------------------------------- -{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" or grains['role'] == "so-helix" or grains['role'] == "so-heavynode" or grains['role'] == "so-standalone" %} +{%- if grains['role'] in ['so-sensor', "so-eval", "so-helix", "so-heavynode", "so-standalone", "so-import"] %} - type: udp enabled: true host: "0.0.0.0:514" @@ -253,7 +253,7 @@ output.{{ type }}: {%- endfor %} {%- else %} #----------------------------- Elasticsearch/Logstash output --------------------------------- - {%- if grains['role'] == "so-eval" %} + {%- if grains['role'] in ["so-eval", "so-import"] %} output.elasticsearch: enabled: true hosts: ["{{ MANAGER }}:9200"] diff --git a/setup/so-setup b/setup/so-setup index 4a8cf259e..9c28ab814 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -279,7 +279,7 @@ if [[ $is_helix || $is_manager || $is_import ]]; then whiptail_homenet_manager fi -if [[ $is_helix || $is_manager || $is_node ]]; then +if [[ $is_helix || $is_manager || $is_node || $is_import ]]; then set_base_heapsizes fi From dfd3a1de6aedebdd2cf77c063283bb3c8207c4aa Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 12 Aug 2020 10:42:07 -0400 Subject: [PATCH 236/376] set monitor interface to bond0 for import node --- setup/so-setup | 1 + 1 file changed, 1 insertion(+) diff --git a/setup/so-setup b/setup/so-setup index 9c28ab814..d64a86549 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -251,6 +251,7 @@ if [[ $is_import ]]; then HNSENSOR=inherit MANAGERUPDATES=0 MANAGERADV=BASIC + INTERFACE=bond0 ZEEKVERSION=ZEEK NIDS=Suricata RULESETUP=ETOPEN From 32083132e56587fae2f68f8acb7c8656040f8b0b Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 12 Aug 2020 11:10:36 -0400 Subject: [PATCH 237/376] Back out some ES settings --- salt/elasticsearch/files/elasticsearch.yml | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml index 6f49c9584..411f5bdf5 100644 --- a/salt/elasticsearch/files/elasticsearch.yml +++ b/salt/elasticsearch/files/elasticsearch.yml @@ -24,21 +24,22 @@ cluster.routing.allocation.disk.watermark.low: 95% cluster.routing.allocation.disk.watermark.high: 98% cluster.routing.allocation.disk.watermark.flood_stage: 98% {%- if FEATURES is sameas true %} -xpack.security.enabled: true +xpack.security.enabled: false xpack.security.http.ssl.enabled: false -xpack.security.transport.ssl.enabled: true +xpack.security.transport.ssl.enabled: false xpack.security.http.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key xpack.security.http.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt xpack.security.http.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt xpack.security.transport.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt xpack.security.transport.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt -xpack.security.http.ssl.client_authentication: none -xpack.security.authc: - anonymous: - username: anonymous_user - roles: superuser - authz_exception: true +#xpack.security.transport.ssl.verification_mode: none +#xpack.security.http.ssl.client_authentication: none +#xpack.security.authc: +# anonymous: +# username: anonymous_user +# roles: superuser +# authz_exception: true {%- endif %} node.attr.box_type: {{ NODE_ROUTE_TYPE }} node.name: {{ ESCLUSTERNAME }} From b5c9d44d91f352609f2eeeeda6b14776ae8532ba Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 12 Aug 2020 11:15:14 -0400 Subject: [PATCH 238/376] nginx config for import node --- .../etc/{nginx.conf.so-importpcap => nginx.conf.so-import} | 0 setup/so-functions | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename salt/nginx/etc/{nginx.conf.so-importpcap => nginx.conf.so-import} (100%) diff --git a/salt/nginx/etc/nginx.conf.so-importpcap b/salt/nginx/etc/nginx.conf.so-import similarity index 100% rename from salt/nginx/etc/nginx.conf.so-importpcap rename to salt/nginx/etc/nginx.conf.so-import diff --git a/setup/so-functions b/setup/so-functions index 26d80c98a..35617d492 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1775,7 +1775,7 @@ es_heapsize() { fi export ES_HEAP_SIZE - if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE)$ ]]; then + if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE|IMPORT)$ ]]; then NODE_ES_HEAP_SIZE=ES_HEAP_SIZE export NODE_ES_HEAP_SIZE fi From 41afe0ab2e1c94821ccc80ed776fc119c8b2dfa3 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 12 Aug 2020 11:33:10 -0400 Subject: [PATCH 239/376] remove tab --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 35617d492..d3ddc3daa 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -721,7 +721,7 @@ docker_seed_registry() { if [ "$install_type" == 'IMPORT' ]; then local TRUSTED_CONTAINERS=(\ "so-logstash:$VERSION" \ - "so-idstools:$VERSION" \ + "so-idstools:$VERSION" \ "so-nginx:$VERSION" \ "so-filebeat:$VERSION" \ "so-suricata:$VERSION" \ From c166bc84f3e945abe424ab3fe9692e34e0275f1b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 12 Aug 2020 11:48:22 -0400 Subject: [PATCH 240/376] add zeek to import node top --- salt/top.sls | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/top.sls b/salt/top.sls index a51978b1e..01eed5343 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -401,7 +401,8 @@ base: - suricata.manager - elasticsearch - kibana - - suricata - filebeat - utility + - suricata + - zeek - schedule From dcd5e95b38917c20ba80752965cd526b3519bef5 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 12 Aug 2020 11:57:13 -0400 Subject: [PATCH 241/376] add so-pcaptools to registry for import node --- setup/so-functions | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index d3ddc3daa..61fa60521 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -730,7 +730,8 @@ docker_seed_registry() { "so-kibana:$VERSION" \ "so-kratos:$VERSION" \ "so-suricata:$VERSION" \ - "so-registry:$VERSION" \ + "so-registry:$VERSION" \ + "so-pcaptools:$VERSION" \ "so-zeek:$VERSION" ) else From 68f5c1c3c54829392a956b0ffca65d89cd1c5ee3 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 12 Aug 2020 12:01:25 -0400 Subject: [PATCH 242/376] create web user during setup for import node --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index d64a86549..5b2caed27 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -662,7 +662,7 @@ fi filter_unused_nics >> $setup_log 2>&1 network_setup >> $setup_log 2>&1 - if [[ $is_manager ]]; then + if [[ $is_manager || $is_import ]]; then set_progress_str 87 'Adding user to SOC' add_web_user >> $setup_log 2>&1 fi From 69e7285e302c2800ab90a837db78fb029004fd06 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 12 Aug 2020 12:44:55 -0400 Subject: [PATCH 243/376] Fix a bug where minio passwrods cause issues --- salt/elasticsearch/files/scripts/so-catrust | 2 +- salt/elasticsearch/files/sotls.yaml | 12 ++++++++++++ salt/elasticsearch/init.sls | 2 -- setup/so-functions | 4 ++-- 4 files changed, 15 insertions(+), 5 deletions(-) create mode 100644 salt/elasticsearch/files/sotls.yaml diff --git a/salt/elasticsearch/files/scripts/so-catrust b/salt/elasticsearch/files/scripts/so-catrust index 02ea12726..aee83a379 100644 --- a/salt/elasticsearch/files/scripts/so-catrust +++ b/salt/elasticsearch/files/scripts/so-catrust @@ -20,7 +20,7 @@ . /usr/sbin/so-common # Check to see if we have extracted the ca cert. if [ ! -f /opt/so/saltstack/local/salt/common/cacerts ]; then - docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-elasticsearchca --user root --entrypoint keytool {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-logstash:{{ VERSION }} -keystore /etc/pki/ca-trust/extracted/java/cacerts -alias SOSCA -import -file /etc/pki/ca.crt -storepass changeit -noprompt + docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }} -keystore /etc/pki/ca-trust/extracted/java/cacerts -alias SOSCA -import -file /etc/pki/ca.crt -storepass changeit -noprompt docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/java/cacerts /opt/so/saltstack/local/salt/common/cacerts docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/common/tls-ca-bundle.pem docker rm so-elasticsearchca diff --git a/salt/elasticsearch/files/sotls.yaml b/salt/elasticsearch/files/sotls.yaml new file mode 100644 index 000000000..1b6353856 --- /dev/null +++ b/salt/elasticsearch/files/sotls.yaml @@ -0,0 +1,12 @@ +keystore.path: /etc/pki/ca-trust/extracted/java/sokeys +keystore.password: changeit +keystore.algorithm: SunX509 +truststore.path: /etc/pki/ca-trust/extracted/java/cacerts +truststore.password: changeit +truststore.algorithm: PKIX +protocols: +- TLSv1.2 +ciphers: +- TLS_RSA_WITH_AES_128_CBC_SHA256 +transport.encrypted: true +http.encrypted: false \ No newline at end of file diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 28db606f1..5bc9ddbb6 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -199,11 +199,9 @@ so-elasticsearch: - /nsm/elasticsearch:/usr/share/elasticsearch/data:rw - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - {%- if FEATURES is sameas true %} - /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro - /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro - /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro - {%- endif %} - watch: - file: cacertz diff --git a/setup/so-functions b/setup/so-functions index db8e3d6f1..e9574fa10 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1138,8 +1138,8 @@ minio_generate_keys() { local charSet="[:graph:]" - ACCESS_KEY=$(tr -cd "$charSet" < /dev/urandom | tr -d \' | tr -d \" | head -c 20) - ACCESS_SECRET=$(tr -cd "$charSet" < /dev/urandom | tr -d \' | tr -d \" | head -c 40) + ACCESS_KEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + ACCESS_SECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 40 | head -n 1) } From ddf3e6f943eff256c4115fa65b7ea9427cc2b0a7 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 12 Aug 2020 14:05:28 -0400 Subject: [PATCH 244/376] remove logstash from docker registry seed --- setup/so-functions | 1 - 1 file changed, 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 61fa60521..ddf771b7c 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -720,7 +720,6 @@ docker_seed_registry() { if ! [ -f /nsm/docker-registry/docker/registry.tar ]; then if [ "$install_type" == 'IMPORT' ]; then local TRUSTED_CONTAINERS=(\ - "so-logstash:$VERSION" \ "so-idstools:$VERSION" \ "so-nginx:$VERSION" \ "so-filebeat:$VERSION" \ From 683799d07734e13f3949534957acb09373fa9d19 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 12 Aug 2020 15:02:54 -0400 Subject: [PATCH 245/376] Convert ES cert to p12 --- salt/ssl/init.sls | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index a0cade9f6..9e0c1d9e8 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -243,7 +243,11 @@ miniokeyperms: # https://github.com/saltstack/salt/issues/52167 # Will trigger 5 days (432000 sec) from cert expiration - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' - + cmd.run: + - name: "/usr/bin/openssl pkcs12 -in /etc/pki/elasticsearch.key -topk12 -out /etc/pki/elasticsearch.p12 -nocrypt" + - onchanges: + - x509: /etc/pki/elasticsearch.key + ealstickeyperms: file.managed: - replace: False @@ -507,7 +511,7 @@ fleetkeyperms: {% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%} - prereq: - x509: /etc/pki/elasticsearch.crt - + /etc/pki/elasticsearch.crt: x509.certificate_managed: - ca_server: {{ ca_server }} @@ -521,6 +525,10 @@ fleetkeyperms: # https://github.com/saltstack/salt/issues/52167 # Will trigger 5 days (432000 sec) from cert expiration - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' + cmd.run: + - name: "/usr/bin/openssl pkcs12 -in /etc/pki/elasticsearch.key -topk12 -out /etc/pki/elasticsearch.p12 -nocrypt" + - onchanges: + - x509: /etc/pki/elasticsearch.key miniokeyperms: file.managed: From daaffd518562f1a85bad7366c76cae79c49371ed Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 12 Aug 2020 15:05:33 -0400 Subject: [PATCH 246/376] Convert ES cert to p12 --- salt/ssl/init.sls | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 9e0c1d9e8..2cb435ffc 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -244,10 +244,10 @@ miniokeyperms: # Will trigger 5 days (432000 sec) from cert expiration - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' cmd.run: - - name: "/usr/bin/openssl pkcs12 -in /etc/pki/elasticsearch.key -topk12 -out /etc/pki/elasticsearch.p12 -nocrypt" + - name: "/usr/bin/openssl pkcs12 -in /etc/pki/elasticsearch.key -export -out /etc/pki/elasticsearch.p12 -nocrypt" - onchanges: - x509: /etc/pki/elasticsearch.key - + ealstickeyperms: file.managed: - replace: False @@ -526,7 +526,7 @@ fleetkeyperms: # Will trigger 5 days (432000 sec) from cert expiration - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' cmd.run: - - name: "/usr/bin/openssl pkcs12 -in /etc/pki/elasticsearch.key -topk12 -out /etc/pki/elasticsearch.p12 -nocrypt" + - name: "/usr/bin/openssl pkcs12 -in /etc/pki/elasticsearch.key -export -out /etc/pki/elasticsearch.p12 -nocrypt" - onchanges: - x509: /etc/pki/elasticsearch.key From 82821fbb256056843ab5d827e8683c13bc954231 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 12 Aug 2020 15:09:52 -0400 Subject: [PATCH 247/376] Convert ES cert to p12 --- salt/ssl/init.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 2cb435ffc..3dd509861 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -244,7 +244,7 @@ miniokeyperms: # Will trigger 5 days (432000 sec) from cert expiration - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' cmd.run: - - name: "/usr/bin/openssl pkcs12 -in /etc/pki/elasticsearch.key -export -out /etc/pki/elasticsearch.p12 -nocrypt" + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nocrypt" - onchanges: - x509: /etc/pki/elasticsearch.key @@ -526,7 +526,7 @@ fleetkeyperms: # Will trigger 5 days (432000 sec) from cert expiration - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' cmd.run: - - name: "/usr/bin/openssl pkcs12 -in /etc/pki/elasticsearch.key -export -out /etc/pki/elasticsearch.p12 -nocrypt" + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12" - onchanges: - x509: /etc/pki/elasticsearch.key From 7e3e4d0f54d41725b294385a5535ea0049cf6a43 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 12 Aug 2020 15:16:12 -0400 Subject: [PATCH 248/376] Convert ES cert to p12 --- salt/ssl/init.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 3dd509861..a5cae35b8 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -244,7 +244,7 @@ miniokeyperms: # Will trigger 5 days (432000 sec) from cert expiration - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' cmd.run: - - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nocrypt" + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nodes -passout pass:" - onchanges: - x509: /etc/pki/elasticsearch.key @@ -526,7 +526,7 @@ fleetkeyperms: # Will trigger 5 days (432000 sec) from cert expiration - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' cmd.run: - - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12" + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nodes -passout pass:" - onchanges: - x509: /etc/pki/elasticsearch.key From 9980d0284473eee7bc8d51c74c8f0fae791e6785 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 12 Aug 2020 15:38:19 -0400 Subject: [PATCH 249/376] Elastic Transport TLSgit add . --- salt/elasticsearch/files/sotls.yaml | 2 +- salt/elasticsearch/init.sls | 12 ++++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/salt/elasticsearch/files/sotls.yaml b/salt/elasticsearch/files/sotls.yaml index 1b6353856..6fee1e8e2 100644 --- a/salt/elasticsearch/files/sotls.yaml +++ b/salt/elasticsearch/files/sotls.yaml @@ -1,4 +1,4 @@ -keystore.path: /etc/pki/ca-trust/extracted/java/sokeys +keystore.path: /usr/share/elasticsearch/config/sokeys keystore.password: changeit keystore.algorithm: SunX509 truststore.path: /etc/pki/ca-trust/extracted/java/cacerts diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 5bc9ddbb6..7cb887b05 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -139,6 +139,13 @@ esyml: - group: 939 - template: jinja +sotls: + file.managed: + - name: /opt/so/conf/elasticsearch/sotls.yml + - source: salt://elasticsearch/files/sotls.yml + - user: 930 + - group: 939 + #sync templates to /opt/so/conf/elasticsearch/templates {% for TEMPLATE in TEMPLATES %} es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}: @@ -200,8 +207,9 @@ so-elasticsearch: - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro - - /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro - - /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro + - /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro + - /opt/so/conf/elasticsearch/sotls.yml:/usr/share/elasticsearch/config/sotls.yml:ro + - watch: - file: cacertz From 5d5fcecdca8eff6ec99f97cd94d3f131213cf8c0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 12 Aug 2020 15:46:34 -0400 Subject: [PATCH 250/376] set the cluster for import node --- salt/utility/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utility/init.sls b/salt/utility/init.sls index 00899f69a..4779f9c1d 100644 --- a/salt/utility/init.sls +++ b/salt/utility/init.sls @@ -10,7 +10,7 @@ crossclusterson: - template: jinja {% endif %} -{% if grains['role'] == 'so-eval' %} +{% if grains['role'] in ['so-eval', 'so-import'] %} fixsearch: cmd.script: - shell: /bin/bash From f59b8683ae87b42f5278a53cbca537f3a139180f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 12 Aug 2020 15:48:34 -0400 Subject: [PATCH 251/376] allow soup to run on import node --- salt/common/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 608394530..0414ceb6e 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -27,7 +27,7 @@ exec 3>&1 1>${SOUP_LOG} 2>&1 manager_check() { # Check to see if this is a manager MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}') - if [[ "$MANAGERCHECK" =~ ^('so-eval'|'so-manager'|'so-standalone'|'so-managersearch')$ ]]; then + if [[ "$MANAGERCHECK" =~ ^('so-eval'|'so-manager'|'so-standalone'|'so-managersearch'|'so-import')$ ]]; then echo "This is a manager. We can proceed." else echo "Please run soup on the manager. The manager controls all updates." From 5640faef13bfcda794ea27b565683556a06f3349 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Wed, 12 Aug 2020 16:34:59 -0400 Subject: [PATCH 252/376] Kernel consoleblank is causing whiptail progress screen to appear to hang #1084 --- setup/so-setup | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 1e49b325f..3924e4a8e 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -118,7 +118,22 @@ if [ "$OS" == ubuntu ]; then update-alternatives --set newt-palette /etc/newt/palette.original >> $setup_log 2>&1 fi -setterm -blank 0 >> $setup_log 2>&1 +# Kernel consoleblank is causing whiptail progress screen to appear to hang #1084 +# https://github.com/Security-Onion-Solutions/securityonion/issues/1084 +if [ "$automated" == no ]; then + TTY=$(tty) + echo "Setup is running on TTY $TTY" >> $setup_log 2>&1 + if echo $TTY | grep -q "/dev/tty"; then + CONSOLEBLANK=$(cat /sys/module/kernel/parameters/consoleblank) + echo "Kernel consoleblank value before: $CONSOLEBLANK" >> $setup_log 2>&1 + if [ $CONSOLEBLANK -gt 0 ]; then + echo "Running 'setterm -blank 0' for TTY $TTY" >> $setup_log 2>&1 + TERM=linux setterm -blank 0 >$TTY <$TTY + CONSOLEBLANK=$(cat /sys/module/kernel/parameters/consoleblank) + echo "Kernel consoleblank value after: $CONSOLEBLANK" >> $setup_log 2>&1 + fi + fi +fi if [ "$setup_type" == 'iso' ] || (whiptail_you_sure); then true From a746d597bb0b3bdda772244fd49bafe5eea4d69b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 12 Aug 2020 17:42:45 -0400 Subject: [PATCH 253/376] rename to .yml --- salt/elasticsearch/files/{sotls.yaml => sotls.yml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename salt/elasticsearch/files/{sotls.yaml => sotls.yml} (100%) diff --git a/salt/elasticsearch/files/sotls.yaml b/salt/elasticsearch/files/sotls.yml similarity index 100% rename from salt/elasticsearch/files/sotls.yaml rename to salt/elasticsearch/files/sotls.yml From 59ddac57bfc4ce88faccce59b1a6a59705c57dc1 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 12 Aug 2020 17:48:37 -0400 Subject: [PATCH 254/376] Rename sotls.yaml to sotls.yml --- salt/elasticsearch/files/{sotls.yaml => sotls.yml} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename salt/elasticsearch/files/{sotls.yaml => sotls.yml} (93%) diff --git a/salt/elasticsearch/files/sotls.yaml b/salt/elasticsearch/files/sotls.yml similarity index 93% rename from salt/elasticsearch/files/sotls.yaml rename to salt/elasticsearch/files/sotls.yml index 6fee1e8e2..31dd149e9 100644 --- a/salt/elasticsearch/files/sotls.yaml +++ b/salt/elasticsearch/files/sotls.yml @@ -9,4 +9,4 @@ protocols: ciphers: - TLS_RSA_WITH_AES_128_CBC_SHA256 transport.encrypted: true -http.encrypted: false \ No newline at end of file +http.encrypted: false From 5a5319431330c2ff1cfab4f90f45fd5012882505 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 12 Aug 2020 21:12:48 -0400 Subject: [PATCH 255/376] Update sotls.yml --- salt/elasticsearch/files/sotls.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/sotls.yml b/salt/elasticsearch/files/sotls.yml index 31dd149e9..c676f4a56 100644 --- a/salt/elasticsearch/files/sotls.yml +++ b/salt/elasticsearch/files/sotls.yml @@ -1,7 +1,7 @@ keystore.path: /usr/share/elasticsearch/config/sokeys keystore.password: changeit keystore.algorithm: SunX509 -truststore.path: /etc/pki/ca-trust/extracted/java/cacerts +truststore.path: /etc/pki/java/cacerts truststore.password: changeit truststore.algorithm: PKIX protocols: From 9fafd5f72113c58e4eb47c33a95f10fb09b7d314 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 13 Aug 2020 08:32:51 -0400 Subject: [PATCH 256/376] update trusted containers for soup to minimize downloaded containers --- salt/common/tools/sbin/soup | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 0414ceb6e..8f0325a6c 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -93,7 +93,21 @@ pillar_changes() { update_dockers() { # List all the containers - if [ $MANAGERCHECK != 'so-helix' ]; then + if [ $MANAGERCHECK == 'so-import' ]; then + TRUSTED_CONTAINERS=( \ + "so-idstools" \ + "so-nginx" \ + "so-filebeat" \ + "so-suricata" \ + "so-soc" \ + "so-elasticsearch" \ + "so-kibana" \ + "so-kratos" \ + "so-suricata" \ + "so-registry" \ + "so-pcaptools" \ + "so-zeek" ) + elif [ $MANAGERCHECK != 'so-helix' ]; then TRUSTED_CONTAINERS=( \ "so-acng" \ "so-thehive-cortex" \ From 8ab1cd32f0558f5ce67ba07cd622e6daf81e0a1e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 13 Aug 2020 10:47:57 -0400 Subject: [PATCH 257/376] remove so-registry from docker see for import node as it doesnt even exist --- setup/so-functions | 1 - 1 file changed, 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index d50042e75..4f9d4938e 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -729,7 +729,6 @@ docker_seed_registry() { "so-kibana:$VERSION" \ "so-kratos:$VERSION" \ "so-suricata:$VERSION" \ - "so-registry:$VERSION" \ "so-pcaptools:$VERSION" \ "so-zeek:$VERSION" ) From 40b5b96e17fb446f8e01a909009537d05024b705 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 13 Aug 2020 15:00:44 -0400 Subject: [PATCH 258/376] Respond with 403 status code to unauthorized sensor requests --- salt/nginx/etc/nginx.conf.so-eval | 3 +++ salt/nginx/etc/nginx.conf.so-manager | 3 +++ salt/nginx/etc/nginx.conf.so-managersearch | 3 +++ salt/nginx/etc/nginx.conf.so-standalone | 3 +++ 4 files changed, 12 insertions(+) diff --git a/salt/nginx/etc/nginx.conf.so-eval b/salt/nginx/etc/nginx.conf.so-eval index 9c919c764..8032ed0ce 100644 --- a/salt/nginx/etc/nginx.conf.so-eval +++ b/salt/nginx/etc/nginx.conf.so-eval @@ -297,6 +297,9 @@ http { } location /sensoroniagents/ { + if ($http_authorization = "") { + return 403; + } proxy_pass http://{{ managerip }}:9822/; proxy_read_timeout 90; proxy_connect_timeout 90; diff --git a/salt/nginx/etc/nginx.conf.so-manager b/salt/nginx/etc/nginx.conf.so-manager index cf7545942..42caa7841 100644 --- a/salt/nginx/etc/nginx.conf.so-manager +++ b/salt/nginx/etc/nginx.conf.so-manager @@ -297,6 +297,9 @@ http { } location /sensoroniagents/ { + if ($http_authorization = "") { + return 403; + } proxy_pass http://{{ managerip }}:9822/; proxy_read_timeout 90; proxy_connect_timeout 90; diff --git a/salt/nginx/etc/nginx.conf.so-managersearch b/salt/nginx/etc/nginx.conf.so-managersearch index 4b9daba4e..0f0e052c8 100644 --- a/salt/nginx/etc/nginx.conf.so-managersearch +++ b/salt/nginx/etc/nginx.conf.so-managersearch @@ -296,6 +296,9 @@ http { } location /sensoroniagents/ { + if ($http_authorization = "") { + return 403; + } proxy_pass http://{{ managerip }}:9822/; proxy_read_timeout 90; proxy_connect_timeout 90; diff --git a/salt/nginx/etc/nginx.conf.so-standalone b/salt/nginx/etc/nginx.conf.so-standalone index cf7545942..42caa7841 100644 --- a/salt/nginx/etc/nginx.conf.so-standalone +++ b/salt/nginx/etc/nginx.conf.so-standalone @@ -297,6 +297,9 @@ http { } location /sensoroniagents/ { + if ($http_authorization = "") { + return 403; + } proxy_pass http://{{ managerip }}:9822/; proxy_read_timeout 90; proxy_connect_timeout 90; From 07ef464375de8ef3ba0ad1320ba51be9f8aac288 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 13 Aug 2020 16:01:53 -0400 Subject: [PATCH 259/376] https://github.com/Security-Onion-Solutions/securityonion/issues/1170 --- setup/so-functions | 79 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 75 insertions(+), 4 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 4f9d4938e..1ed19006f 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1370,6 +1370,34 @@ saltify() { } +check_service_status() { + + local service_name=$1 + systemctl status $service_name > /dev/null 2>&1 + local service_status=$? + if [ $service_status -gt 0 ]; then + service_status=1 + else + service_status=0 + fi + + return $service_status + +} + +check_salt_master_status() { + salt-call state.show_top >> "$setup_log" 2>&1 + local exit_code=$? + if [ $exit_code -gt 0 ]; then + exit_code=1 + else + exit_code=0 + fi + + return $exit_code + +} + salt_checkin() { case "$install_type" in @@ -1378,10 +1406,53 @@ salt_checkin() { echo "Building Certificate Authority"; salt-call state.apply ca; echo " *** Restarting Salt to fix any SSL errors. ***"; - systemctl restart salt-master; - sleep 5; - systemctl restart salt-minion; - sleep 15; + + local SALT_SERVICES=(\ + "salt-minion" \ + "salt-master" + ) + local LOOP_COUNT=0 + for service in "${SALT_SERVICES[@]}"; do + systemctl stop "$service"; + LOOP_COUNT=0 + while check_service_status "$service"; do + echo "$service still running" >> "$setup_log" 2>&1 + if [ LOOP_COUNT -gt 120 ]; then + echo "$service could not be stopped in 120 seconds" >> "$setup_log" 2>&1 + whiptail_setup_failed() + exit 1; + fi + sleep 1; + ((LOOP_COUNT+=1)) + done + + systemctl start "$service"; + LOOP_COUNT=0 + while ! check_service_status "$service"; do + echo "$service still not running" >> "$setup_log" 2>&1 + if [ LOOP_COUNT -gt 120 ]; then + echo "$service could not be started in 120 seconds" >> "$setup_log" 2>&1 + whiptail_setup_failed() + exit 1; + fi + sleep 1; + ((LOOP_COUNT+=1)) + done + + done + + LOOP_COUNT=0 + while check_salt_master_status; do + echo "salt-minion cannot talk to salt-master" >> "$setup_log" 2>&1 + if [ LOOP_COUNT -gt 20 ]; then + echo "salt-minion could not talk to salt-master after 20 attempts" >> "$setup_log" 2>&1 + whiptail_setup_failed() + exit 1; + fi + sleep 1; + ((LOOP_COUNT+=1)) + done + echo " Confirming existence of the CA certificate" cat /etc/pki/ca.crt echo " Applyng a mine hack"; From 1b4029f74b37280b504501a156caff7d13095562 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 13 Aug 2020 16:18:02 -0400 Subject: [PATCH 260/376] fix syntax errors --- setup/so-functions | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 1ed19006f..c7c296fd3 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1417,10 +1417,10 @@ salt_checkin() { LOOP_COUNT=0 while check_service_status "$service"; do echo "$service still running" >> "$setup_log" 2>&1 - if [ LOOP_COUNT -gt 120 ]; then + if [ $LOOP_COUNT -gt 120 ]; then echo "$service could not be stopped in 120 seconds" >> "$setup_log" 2>&1 - whiptail_setup_failed() - exit 1; + whiptail_setup_failed + exit 1 fi sleep 1; ((LOOP_COUNT+=1)) @@ -1430,10 +1430,10 @@ salt_checkin() { LOOP_COUNT=0 while ! check_service_status "$service"; do echo "$service still not running" >> "$setup_log" 2>&1 - if [ LOOP_COUNT -gt 120 ]; then + if [ $LOOP_COUNT -gt 120 ]; then echo "$service could not be started in 120 seconds" >> "$setup_log" 2>&1 - whiptail_setup_failed() - exit 1; + whiptail_setup_failed + exit 1 fi sleep 1; ((LOOP_COUNT+=1)) @@ -1444,10 +1444,10 @@ salt_checkin() { LOOP_COUNT=0 while check_salt_master_status; do echo "salt-minion cannot talk to salt-master" >> "$setup_log" 2>&1 - if [ LOOP_COUNT -gt 20 ]; then + if [ $LOOP_COUNT -gt 20 ]; then echo "salt-minion could not talk to salt-master after 20 attempts" >> "$setup_log" 2>&1 - whiptail_setup_failed() - exit 1; + whiptail_setup_failed + exit 1 fi sleep 1; ((LOOP_COUNT+=1)) From 3d20cc03412841498aa5db3729b09d290d78e8da Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 13 Aug 2020 16:34:18 -0400 Subject: [PATCH 261/376] some debugging --- setup/so-functions | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index c7c296fd3..b93e556f2 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1386,6 +1386,7 @@ check_service_status() { } check_salt_master_status() { + echo "Checking salt-master status" >> "$setup_log" 2>&1 salt-call state.show_top >> "$setup_log" 2>&1 local exit_code=$? if [ $exit_code -gt 0 ]; then @@ -1394,6 +1395,7 @@ check_salt_master_status() { exit_code=0 fi + echo "$exit_code" >> "$setup_log" 2>&1 return $exit_code } @@ -1413,26 +1415,26 @@ salt_checkin() { ) local LOOP_COUNT=0 for service in "${SALT_SERVICES[@]}"; do - systemctl stop "$service"; + systemctl stop "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 while check_service_status "$service"; do echo "$service still running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 120 ]; then echo "$service could not be stopped in 120 seconds" >> "$setup_log" 2>&1 - whiptail_setup_failed + #whiptail_setup_failed exit 1 fi sleep 1; ((LOOP_COUNT+=1)) done - systemctl start "$service"; + systemctl start "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 while ! check_service_status "$service"; do echo "$service still not running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 120 ]; then echo "$service could not be started in 120 seconds" >> "$setup_log" 2>&1 - whiptail_setup_failed + #whiptail_setup_failed exit 1 fi sleep 1; @@ -1444,9 +1446,9 @@ salt_checkin() { LOOP_COUNT=0 while check_salt_master_status; do echo "salt-minion cannot talk to salt-master" >> "$setup_log" 2>&1 - if [ $LOOP_COUNT -gt 20 ]; then - echo "salt-minion could not talk to salt-master after 20 attempts" >> "$setup_log" 2>&1 - whiptail_setup_failed + if [ $LOOP_COUNT -gt 120 ]; then + echo "salt-minion could not talk to salt-master after 120 attempts" >> "$setup_log" 2>&1 + #whiptail_setup_failed exit 1 fi sleep 1; From ed4bee0d0b2700a9997c272c2421b5c765202a01 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Thu, 13 Aug 2020 16:42:50 -0400 Subject: [PATCH 262/376] so-allow has no usage function #1133 --- salt/common/tools/sbin/so-allow | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-allow b/salt/common/tools/sbin/so-allow index f902d659c..a49a694a6 100755 --- a/salt/common/tools/sbin/so-allow +++ b/salt/common/tools/sbin/so-allow @@ -21,6 +21,30 @@ local_salt_dir=/opt/so/saltstack/local SKIP=0 +function usage { + +cat << EOF + +Usage: $0 [-abefhoprsw] [ -i IP ] + +This program allows you to add a firewall rule to allow connections from a new IP address or CIDR range. + +If you run this program with no arguments, it will present a menu for you to choose your options. + +If you want to automate and skip the menu, you can pass the desired options as command line arguments. + +EXAMPLES + +To add 10.1.2.3 to the analyst role: +so-allow -a -i 10.1.2.3 + +To add 10.1.2.0/24 to the osquery role: +so-allow -o -i 10.1.2.0/24 + +EOF + +} + while getopts "ahfesprbowi:" OPTION do case $OPTION in @@ -36,7 +60,7 @@ do FULLROLE="beats_endpoint" SKIP=1 ;; - e) + e) FULLROLE="elasticsearch_rest" SKIP=1 ;; From 6cf623e133948105ff444974966fd14c9dddc822 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 13 Aug 2020 16:52:39 -0400 Subject: [PATCH 263/376] some logic changes --- setup/so-functions | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index b93e556f2..5f6be3f8e 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1374,29 +1374,31 @@ check_service_status() { local service_name=$1 systemctl status $service_name > /dev/null 2>&1 - local service_status=$? + local status=$? + #true service is running false if not if [ $service_status -gt 0 ]; then - service_status=1 + status=false else - service_status=0 + status=true fi - return $service_status + return $status } check_salt_master_status() { echo "Checking salt-master status" >> "$setup_log" 2>&1 salt-call state.show_top >> "$setup_log" 2>&1 - local exit_code=$? + local status=$? + #true if we can talk to salt master false if not if [ $exit_code -gt 0 ]; then - exit_code=1 + status=false else - exit_code=0 + status=true fi - echo "$exit_code" >> "$setup_log" 2>&1 - return $exit_code + echo "$status" >> "$setup_log" 2>&1 + return $status } @@ -1421,7 +1423,6 @@ salt_checkin() { echo "$service still running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 120 ]; then echo "$service could not be stopped in 120 seconds" >> "$setup_log" 2>&1 - #whiptail_setup_failed exit 1 fi sleep 1; @@ -1434,7 +1435,6 @@ salt_checkin() { echo "$service still not running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 120 ]; then echo "$service could not be started in 120 seconds" >> "$setup_log" 2>&1 - #whiptail_setup_failed exit 1 fi sleep 1; @@ -1444,11 +1444,10 @@ salt_checkin() { done LOOP_COUNT=0 - while check_salt_master_status; do + while ! check_salt_master_status; do echo "salt-minion cannot talk to salt-master" >> "$setup_log" 2>&1 - if [ $LOOP_COUNT -gt 120 ]; then - echo "salt-minion could not talk to salt-master after 120 attempts" >> "$setup_log" 2>&1 - #whiptail_setup_failed + if [ $LOOP_COUNT -gt 40 ]; then + echo "salt-minion could not talk to salt-master after 40 attempts" >> "$setup_log" 2>&1 exit 1 fi sleep 1; From 829490da19f35cff0aaf9402d255cc18b9bc8568 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 13 Aug 2020 17:05:50 -0400 Subject: [PATCH 264/376] fix errors --- setup/so-functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 5f6be3f8e..7537ceaa7 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1376,7 +1376,7 @@ check_service_status() { systemctl status $service_name > /dev/null 2>&1 local status=$? #true service is running false if not - if [ $service_status -gt 0 ]; then + if [ $status -gt 0 ]; then status=false else status=true @@ -1391,7 +1391,7 @@ check_salt_master_status() { salt-call state.show_top >> "$setup_log" 2>&1 local status=$? #true if we can talk to salt master false if not - if [ $exit_code -gt 0 ]; then + if [ $status -gt 0 ]; then status=false else status=true From 7400bbd6c1472c600dbccf41d3d37dceed1a2229 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Thu, 13 Aug 2020 17:14:53 -0400 Subject: [PATCH 265/376] Elastalert Stability Fixes --- salt/elastalert/files/elastalert_config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elastalert/files/elastalert_config.yaml b/salt/elastalert/files/elastalert_config.yaml index c3e60c78e..28d26bac0 100644 --- a/salt/elastalert/files/elastalert_config.yaml +++ b/salt/elastalert/files/elastalert_config.yaml @@ -21,7 +21,7 @@ run_every: # ElastAlert will buffer results from the most recent # period of time, in case some log sources are not in real time buffer_time: - minutes: 5 + minutes: 10 # The maximum time between queries for ElastAlert to start at the most recently # run query. When ElastAlert starts, for each rule, it will search elastalert_metadata From 3c113a7a890d86c53108be8bd9dd56f50c7a7133 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 13 Aug 2020 17:29:45 -0400 Subject: [PATCH 266/376] Add system information at beginning of installation; provide logging functions to be used instead of echo commands --- setup/so-functions | 34 ++++++++++++++++++++++++++++++++++ setup/so-setup | 9 ++++----- 2 files changed, 38 insertions(+), 5 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 4f9d4938e..54add4e1b 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -21,6 +21,40 @@ source ./so-common-functions SOVERSION=$(cat ../VERSION) +log() { + msg=$1 + level=${2:-I} + now=$(TZ=GMT date +"%Y-%m-%dT%H:%M:%SZ") + echo -e "$now | $level | $msg" >> "$setup_log" 2>&1 +} + +error() { + log "$1" "E" +} + +info() { + log "$1" "I" +} + +header() { + echo -e "-----------------------------\n $1\n-----------------------------\n" >> "$setup_log" 2>&1 +} + +logCmd() { + cmd=$1 + info "Executing command: $cmd\n$($cmd)\n" +} + +analyze_system() { + header "System Characteristics" + logCmd "uptime" + logCmd "uname -a" + logCmd "free -h" + logCmd "lscpu" + logCmd "df -h" + logCmd "ip a" +} + accept_salt_key_remote() { systemctl restart salt-minion diff --git a/setup/so-setup b/setup/so-setup index c933abcce..22936fbb8 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -48,9 +48,11 @@ done # Begin Installation pre-processing parse_install_username -echo "Installing as the $INSTALLUSERNAME user." >> $setup_log 2>&1 -echo "---- Starting setup at $(date -u) ----" >> $setup_log 2>&1 +header "Initializing Setup" +info "Installing as the $INSTALLUSERNAME user" + +analyze_system automated=no function progress() { @@ -76,9 +78,6 @@ if [[ -f automation/$automation && $(basename $automation) == $automation ]]; th source automation/$automation automated=yes - echo "Checking network configuration" >> $setup_log 2>&1 - ip a >> $setup_log 2>&1 - attempt=1 attempts=60 ip a | grep "$MNIC:" | grep "state UP" >> $setup_log 2>&1 From f9f2744d3f2e043ebd501cfaf5cd1385f8e81242 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 13 Aug 2020 17:49:05 -0400 Subject: [PATCH 267/376] logic changes --- setup/so-functions | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 7537ceaa7..480c86604 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1377,9 +1377,9 @@ check_service_status() { local status=$? #true service is running false if not if [ $status -gt 0 ]; then - status=false + status=1 else - status=true + status=0 fi return $status @@ -1390,11 +1390,11 @@ check_salt_master_status() { echo "Checking salt-master status" >> "$setup_log" 2>&1 salt-call state.show_top >> "$setup_log" 2>&1 local status=$? - #true if we can talk to salt master false if not + #true if there is an issue talking to salt master if [ $status -gt 0 ]; then - status=false + status=1 else - status=true + status=0 fi echo "$status" >> "$setup_log" 2>&1 From 42c1e817fedb89da8add5cd4b83706474f4a7cb1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 13 Aug 2020 18:09:57 -0400 Subject: [PATCH 268/376] more logging and debugging --- setup/so-functions | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 480c86604..162c0e82b 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1373,7 +1373,8 @@ saltify() { check_service_status() { local service_name=$1 - systemctl status $service_name > /dev/null 2>&1 + echo "Checking service $service_name status" >> "$setup_log" 2>&1 + systemctl status $service_name >> "$setup_log" 2>&1 local status=$? #true service is running false if not if [ $status -gt 0 ]; then @@ -1454,6 +1455,9 @@ salt_checkin() { ((LOOP_COUNT+=1)) done + systemctl status salt-master; + systemctl status salt-minion; + echo " Confirming existence of the CA certificate" cat /etc/pki/ca.crt echo " Applyng a mine hack"; From e6830e9cba14c746f51bae65b224566f3a0a5f21 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 14 Aug 2020 01:09:47 -0400 Subject: [PATCH 269/376] Avoid reusing header function from so-common --- setup/so-functions | 4 ++-- setup/so-setup | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 54add4e1b..c43e668bc 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -36,7 +36,7 @@ info() { log "$1" "I" } -header() { +title() { echo -e "-----------------------------\n $1\n-----------------------------\n" >> "$setup_log" 2>&1 } @@ -46,7 +46,7 @@ logCmd() { } analyze_system() { - header "System Characteristics" + title "System Characteristics" logCmd "uptime" logCmd "uname -a" logCmd "free -h" diff --git a/setup/so-setup b/setup/so-setup index 22936fbb8..b1b142b8c 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -49,7 +49,7 @@ done # Begin Installation pre-processing parse_install_username -header "Initializing Setup" +title "Initializing Setup" info "Installing as the $INSTALLUSERNAME user" analyze_system From ee62faae72ef70201df5968425aaa80179265584 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 14 Aug 2020 09:10:28 -0400 Subject: [PATCH 270/376] Only show the web interface link when the redirect URL is available, such as on manager nodes --- setup/so-whiptail | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index f84090e4f..89fe784ae 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -1064,17 +1064,21 @@ whiptail_setup_complete() { [ -n "$TESTING" ] && return - if [[ -n $ALLOW_CIDR ]]; then - local sentence_prefix="Access" + if [[ -n "$REDIRECTIT" ]]; then + if [[ -n $ALLOW_CIDR ]]; then + local sentence_prefix="Access" + else + local sentence_prefix="Run so-allow after reboot to access" + fi + local accessMessage="\n${sentence_prefix} the web interface at: https://${REDIRECTIT}\n" else - local sentence_prefix="Run so-allow after reboot to access" + local accessMessage="" fi + read -r -d '' message <<- EOM Finished ${install_type} installation. - - ${sentence_prefix} the web interface at: https://${REDIRECTIT} - + $accessMessage Press ENTER to reboot. EOM From 78bceeb9e5e761e720e65efbbdbea2db9f2bb7b7 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 14 Aug 2020 09:17:25 -0400 Subject: [PATCH 271/376] Only show the web interface link when the redirect URL is available, such as on manager nodes --- setup/so-whiptail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 89fe784ae..f58e7cace 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -1064,7 +1064,7 @@ whiptail_setup_complete() { [ -n "$TESTING" ] && return - if [[ -n "$REDIRECTIT" ]]; then + if [[ -n "$REDIRECTIT" && is_manager ]]; then if [[ -n $ALLOW_CIDR ]]; then local sentence_prefix="Access" else From a3d8b7d0d3f92b8621e35135b1a4cfaf0d46c271 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 14 Aug 2020 09:40:38 -0400 Subject: [PATCH 272/376] Add watch statements --- salt/elasticsearch/files/elasticsearch.yml | 18 +++++++++--------- salt/elasticsearch/init.sls | 3 +++ 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml index 411f5bdf5..acad465d1 100644 --- a/salt/elasticsearch/files/elasticsearch.yml +++ b/salt/elasticsearch/files/elasticsearch.yml @@ -24,15 +24,15 @@ cluster.routing.allocation.disk.watermark.low: 95% cluster.routing.allocation.disk.watermark.high: 98% cluster.routing.allocation.disk.watermark.flood_stage: 98% {%- if FEATURES is sameas true %} -xpack.security.enabled: false -xpack.security.http.ssl.enabled: false -xpack.security.transport.ssl.enabled: false -xpack.security.http.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key -xpack.security.http.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt -xpack.security.http.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt -xpack.security.transport.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key -xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt -xpack.security.transport.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt +#xpack.security.enabled: false +#xpack.security.http.ssl.enabled: false +#xpack.security.transport.ssl.enabled: false +#xpack.security.http.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key +#xpack.security.http.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt +#xpack.security.http.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt +#xpack.security.transport.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key +#xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt +#xpack.security.transport.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt #xpack.security.transport.ssl.verification_mode: none #xpack.security.http.ssl.client_authentication: none #xpack.security.authc: diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index a507d3535..cc2d91537 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -212,6 +212,9 @@ so-elasticsearch: - watch: - file: cacertz + - file: esyml + - file: esingestconf + - file: so-elasticsearch-pipelines-file so-elasticsearch-pipelines-file: file.managed: From 283f91459aae043ccfc40a230cba0d3873fb2f22 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 14 Aug 2020 10:05:56 -0400 Subject: [PATCH 273/376] Fix rule update cron --- salt/common/tools/sbin/so-rule-update | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-rule-update b/salt/common/tools/sbin/so-rule-update index f50d49322..19466c2b3 100755 --- a/salt/common/tools/sbin/so-rule-update +++ b/salt/common/tools/sbin/so-rule-update @@ -10,4 +10,4 @@ got_root() { } got_root -docker exec -it so-idstools /bin/bash -c 'cd /opt/so/idstools/etc && idstools-rulecat' +docker exec -d so-idstools /bin/bash -c 'cd /opt/so/idstools/etc && idstools-rulecat' From 0eb0551b68968ca36711de64c3e3ef91b6e3d63a Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 10:15:54 -0400 Subject: [PATCH 274/376] add check if salt minion is returning jobs --- setup/so-functions | 51 +++++++++++++++++++++++++++++++++------------- 1 file changed, 37 insertions(+), 14 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 162c0e82b..14c0fd671 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1388,7 +1388,7 @@ check_service_status() { } check_salt_master_status() { - echo "Checking salt-master status" >> "$setup_log" 2>&1 + echo "Checking if we can talk to the salt master" >> "$setup_log" 2>&1 salt-call state.show_top >> "$setup_log" 2>&1 local status=$? #true if there is an issue talking to salt master @@ -1398,11 +1398,26 @@ check_salt_master_status() { status=0 fi - echo "$status" >> "$setup_log" 2>&1 return $status } +check_salt_minion_status() { + echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 + salt "$MINION_ID" test.ping >> "$setup_log" 2>&1 + local status=$? + #true if there is an issue getting a job response from the minion + if [ $status -gt 0 ]; then + status=1 + else + status=0 + fi + + return $status + +} + + salt_checkin() { case "$install_type" in @@ -1422,8 +1437,8 @@ salt_checkin() { LOOP_COUNT=0 while check_service_status "$service"; do echo "$service still running" >> "$setup_log" 2>&1 - if [ $LOOP_COUNT -gt 120 ]; then - echo "$service could not be stopped in 120 seconds" >> "$setup_log" 2>&1 + if [ $LOOP_COUNT -gt 60 ]; then + echo "$service could not be stopped in 60 seconds, exiting" >> "$setup_log" 2>&1 exit 1 fi sleep 1; @@ -1434,8 +1449,8 @@ salt_checkin() { LOOP_COUNT=0 while ! check_service_status "$service"; do echo "$service still not running" >> "$setup_log" 2>&1 - if [ $LOOP_COUNT -gt 120 ]; then - echo "$service could not be started in 120 seconds" >> "$setup_log" 2>&1 + if [ $LOOP_COUNT -gt 60 ]; then + echo "$service could not be started in 60 seconds, exiting" >> "$setup_log" 2>&1 exit 1 fi sleep 1; @@ -1446,25 +1461,33 @@ salt_checkin() { LOOP_COUNT=0 while ! check_salt_master_status; do - echo "salt-minion cannot talk to salt-master" >> "$setup_log" 2>&1 - if [ $LOOP_COUNT -gt 40 ]; then - echo "salt-minion could not talk to salt-master after 40 attempts" >> "$setup_log" 2>&1 + echo "salt minion cannot talk to salt master" >> "$setup_log" 2>&1 + if [ $LOOP_COUNT -gt 30 ]; then + echo "salt minion could not talk to salt master after 30 attempts, exiting" >> "$setup_log" 2>&1 exit 1 fi sleep 1; ((LOOP_COUNT+=1)) done - systemctl status salt-master; - systemctl status salt-minion; + LOOP_COUNT=0 + while ! check_salt_minion_status; do + echo "salt master not getting job response from salt minion" >> "$setup_log" 2>&1 + if [ $LOOP_COUNT -gt 30 ]; then + echo "salt master not getting job response from salt minion after 30 attempts, exiting" >> "$setup_log" 2>&1 + exit 1 + fi + sleep 1; + ((LOOP_COUNT+=1)) + done echo " Confirming existence of the CA certificate" cat /etc/pki/ca.crt echo " Applyng a mine hack"; - salt '*' mine.send x509.get_pem_entries glob_path=/etc/pki/ca.crt; - salt '*' mine.update; + salt "$MINION_ID" mine.send x509.get_pem_entries glob_path=/etc/pki/ca.crt; + salt "$MINION_ID" mine.update; echo " Confirming salt mine now contain the certificate"; - salt '*' mine.get '*' x509.get_pem_entries; + salt "$MINION_ID" mine.get '*' x509.get_pem_entries; echo " Applying SSL state"; salt-call state.apply ssl; } >> "$setup_log" 2>&1 From e2fbe59b7c6ee956739b64edab1b8e8691c591c3 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 10:30:01 -0400 Subject: [PATCH 275/376] additional logging --- setup/so-functions | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 14c0fd671..2a34dd0a9 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1374,12 +1374,14 @@ check_service_status() { local service_name=$1 echo "Checking service $service_name status" >> "$setup_log" 2>&1 - systemctl status $service_name >> "$setup_log" 2>&1 + systemctl status $service_name > /dev/null 2>&1 local status=$? #true service is running false if not if [ $status -gt 0 ]; then + echo "$service_name is running" >> "$setup_log" 2>&1 status=1 else + echo "$service_name is not running" >> "$setup_log" 2>&1 status=0 fi @@ -1389,10 +1391,11 @@ check_service_status() { check_salt_master_status() { echo "Checking if we can talk to the salt master" >> "$setup_log" 2>&1 - salt-call state.show_top >> "$setup_log" 2>&1 + salt-call state.show_top > /dev/null 2>&1 local status=$? #true if there is an issue talking to salt master if [ $status -gt 0 ]; then + echo "Cannot talk to salt master" >> "$setup_log" 2>&1 status=1 else status=0 @@ -1408,6 +1411,7 @@ check_salt_minion_status() { local status=$? #true if there is an issue getting a job response from the minion if [ $status -gt 0 ]; then + echo "Not receiving job response from salt minion" >> "$setup_log" 2>&1 status=1 else status=0 @@ -1459,6 +1463,8 @@ salt_checkin() { done + #sleep 15; + LOOP_COUNT=0 while ! check_salt_master_status; do echo "salt minion cannot talk to salt master" >> "$setup_log" 2>&1 From cd1169b68d5b5811865dc4afdf76318f61793f01 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 10:53:42 -0400 Subject: [PATCH 276/376] logging changes --- setup/so-functions | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 2a34dd0a9..987a71317 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1395,9 +1395,9 @@ check_salt_master_status() { local status=$? #true if there is an issue talking to salt master if [ $status -gt 0 ]; then - echo "Cannot talk to salt master" >> "$setup_log" 2>&1 status=1 else + echo "Can talk to salt master" >> "$setup_log" 2>&1 status=0 fi @@ -1411,9 +1411,9 @@ check_salt_minion_status() { local status=$? #true if there is an issue getting a job response from the minion if [ $status -gt 0 ]; then - echo "Not receiving job response from salt minion" >> "$setup_log" 2>&1 status=1 else + echo "Received job response from salt minion" >> "$setup_log" 2>&1 status=0 fi @@ -1421,7 +1421,6 @@ check_salt_minion_status() { } - salt_checkin() { case "$install_type" in @@ -1437,6 +1436,7 @@ salt_checkin() { ) local LOOP_COUNT=0 for service in "${SALT_SERVICES[@]}"; do + echo "Stopping service $service" >> "$setup_log" 2>&1 systemctl stop "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 while check_service_status "$service"; do @@ -1449,6 +1449,7 @@ salt_checkin() { ((LOOP_COUNT+=1)) done + echo "Starting service $service" >> "$setup_log" 2>&1 systemctl start "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 while ! check_service_status "$service"; do @@ -1478,9 +1479,9 @@ salt_checkin() { LOOP_COUNT=0 while ! check_salt_minion_status; do - echo "salt master not getting job response from salt minion" >> "$setup_log" 2>&1 + echo "salt master did not get a job response from salt minion" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 30 ]; then - echo "salt master not getting job response from salt minion after 30 attempts, exiting" >> "$setup_log" 2>&1 + echo "salt master did not get a job response from salt minion after 30 attempts, exiting" >> "$setup_log" 2>&1 exit 1 fi sleep 1; From ea5116700d19fbd1d16d668b696ca558b4b99366 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 11:01:26 -0400 Subject: [PATCH 277/376] stop both service then start both --- setup/so-functions | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 987a71317..c92f6a152 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1431,8 +1431,8 @@ salt_checkin() { echo " *** Restarting Salt to fix any SSL errors. ***"; local SALT_SERVICES=(\ - "salt-minion" \ - "salt-master" + "salt-master" \ + "salt-minion" ) local LOOP_COUNT=0 for service in "${SALT_SERVICES[@]}"; do @@ -1448,7 +1448,11 @@ salt_checkin() { sleep 1; ((LOOP_COUNT+=1)) done + done + sleep 5; + + for service in "${SALT_SERVICES[@]}"; do echo "Starting service $service" >> "$setup_log" 2>&1 systemctl start "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 @@ -1461,7 +1465,6 @@ salt_checkin() { sleep 1; ((LOOP_COUNT+=1)) done - done #sleep 15; From 876c6c7cb0448f22a0033e35eb47c9e35ccb5ee0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 11:16:56 -0400 Subject: [PATCH 278/376] logic changes --- setup/so-functions | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index c92f6a152..a4444481e 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1374,7 +1374,7 @@ check_service_status() { local service_name=$1 echo "Checking service $service_name status" >> "$setup_log" 2>&1 - systemctl status $service_name > /dev/null 2>&1 + systemctl status $service_name >> "$setup_log" 2>&1 local status=$? #true service is running false if not if [ $status -gt 0 ]; then @@ -1439,7 +1439,7 @@ salt_checkin() { echo "Stopping service $service" >> "$setup_log" 2>&1 systemctl stop "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 - while check_service_status "$service"; do + while (( check_service_status "$service" )); do echo "$service still running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 60 ]; then echo "$service could not be stopped in 60 seconds, exiting" >> "$setup_log" 2>&1 @@ -1456,7 +1456,7 @@ salt_checkin() { echo "Starting service $service" >> "$setup_log" 2>&1 systemctl start "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 - while ! check_service_status "$service"; do + while ! (( check_service_status )) "$service"; do echo "$service still not running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 60 ]; then echo "$service could not be started in 60 seconds, exiting" >> "$setup_log" 2>&1 @@ -1470,7 +1470,7 @@ salt_checkin() { #sleep 15; LOOP_COUNT=0 - while ! check_salt_master_status; do + while (( check_salt_master_status )); do echo "salt minion cannot talk to salt master" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 30 ]; then echo "salt minion could not talk to salt master after 30 attempts, exiting" >> "$setup_log" 2>&1 @@ -1481,7 +1481,7 @@ salt_checkin() { done LOOP_COUNT=0 - while ! check_salt_minion_status; do + while (( check_salt_minion_status )); do echo "salt master did not get a job response from salt minion" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 30 ]; then echo "salt master did not get a job response from salt minion after 30 attempts, exiting" >> "$setup_log" 2>&1 From aa2b0699d57614eb593523f4de4954db9765a266 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 11:20:18 -0400 Subject: [PATCH 279/376] move parens --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index a4444481e..8e0beaa74 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1456,7 +1456,7 @@ salt_checkin() { echo "Starting service $service" >> "$setup_log" 2>&1 systemctl start "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 - while ! (( check_service_status )) "$service"; do + while ! (( check_service_status "$service" )); do echo "$service still not running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 60 ]; then echo "$service could not be started in 60 seconds, exiting" >> "$setup_log" 2>&1 From ab4285aaaf2dfe979ad7bb4212df8cad990182f6 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Fri, 14 Aug 2020 15:21:56 +0000 Subject: [PATCH 280/376] Only copy TheHive details to global pillar if enabled --- setup/so-functions | 62 +++++++++++++++++++++++++++------------------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index c43e668bc..7468ec01b 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -879,11 +879,13 @@ generate_passwords(){ PLAYBOOKPASS=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) FLEETPASS=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) FLEETJWT=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) - HIVEKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) - HIVEPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) - CORTEXKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) - CORTEXORGUSERKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) - CORTEXPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + if [[ "$THEHIVE" == "1" ]]; then + HIVEKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + HIVEPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + CORTEXKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + CORTEXORGUSERKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + CORTEXPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + fi SENSORONIKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) KRATOSKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) } @@ -1022,26 +1024,34 @@ manager_global() { fi # Create a global file for global values + printf '%s\n'\ + "global:"\ + " soversion: $SOVERSION"\ + " hnmanager: $HNMANAGER"\ + " ntpserver: $NTPSERVER"\ + " proxy: $PROXY"\ + " zeekversion: $ZEEKVERSION"\ + " ids: $NIDS"\ + " managerip: $MAINIP" > "$global_pillar" + + # Check if TheHive is enabled. If so, add creds and other details + if [[ "$THEHIVE" == "1" ]]; then + printf '%s\n'\ + " hiveuser: $WEBUSER"\ + " hivepassword: '$WEBPASSWD1'"\ + " hivekey: $HIVEKEY"\ + " hiveplaysecret: $HIVEPLAYSECRET"\ + " cortexuser: $WEBUSER"\ + " cortexpassword: '$WEBPASSWD1'"\ + " cortexkey: $CORTEXKEY"\ + " cortexorgname: SecurityOnion"\ + " cortexorguser: soadmin"\ + " cortexorguserkey: $CORTEXORGUSERKEY"\ + " cortexplaysecret: $CORTEXPLAYSECRET" >> "$global_pillar" + fi + + # Continue adding other details printf '%s\n'\ - "global:"\ - " soversion: $SOVERSION"\ - " hnmanager: $HNMANAGER"\ - " ntpserver: $NTPSERVER"\ - " proxy: $PROXY"\ - " zeekversion: $ZEEKVERSION"\ - " ids: $NIDS"\ - " managerip: $MAINIP"\ - " hiveuser: $WEBUSER"\ - " hivepassword: '$WEBPASSWD1'"\ - " hivekey: $HIVEKEY"\ - " hiveplaysecret: $HIVEPLAYSECRET"\ - " cortexuser: $WEBUSER"\ - " cortexpassword: '$WEBPASSWD1'"\ - " cortexkey: $CORTEXKEY"\ - " cortexorgname: SecurityOnion"\ - " cortexorguser: soadmin"\ - " cortexorguserkey: $CORTEXORGUSERKEY"\ - " cortexplaysecret: $CORTEXPLAYSECRET"\ " fleet_custom_hostname: "\ " fleet_manager: False"\ " fleet_node: False"\ @@ -1132,7 +1142,7 @@ manager_global() { " time_file: 1"\ " upload_queue_size: 4"\ " encoding: gzip"\ - " interval: 5" > "$global_pillar" + " interval: 5" >> "$global_pillar" printf '%s\n' '----' >> "$setup_log" 2>&1 cat "$global_pillar" >> "$setup_log" 2>&1 @@ -1872,4 +1882,4 @@ zeek_logs_enabled() { printf '%s\n' '----' >> "$setup_log" 2>&1 cat "$zeeklogs_pillar" >> "$setup_log" 2>&1 -} \ No newline at end of file +} From 683e8a2a39f969bac6c75efd8078f543d15d1d93 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 11:24:46 -0400 Subject: [PATCH 281/376] remove quotes --- setup/so-functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 8e0beaa74..160ccdb37 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1439,7 +1439,7 @@ salt_checkin() { echo "Stopping service $service" >> "$setup_log" 2>&1 systemctl stop "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 - while (( check_service_status "$service" )); do + while (( check_service_status $service )); do echo "$service still running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 60 ]; then echo "$service could not be stopped in 60 seconds, exiting" >> "$setup_log" 2>&1 @@ -1456,7 +1456,7 @@ salt_checkin() { echo "Starting service $service" >> "$setup_log" 2>&1 systemctl start "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 - while ! (( check_service_status "$service" )); do + while ! (( check_service_status $service )); do echo "$service still not running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 60 ]; then echo "$service could not be started in 60 seconds, exiting" >> "$setup_log" 2>&1 From 69fd80375994bd359795498d8c26d30762900401 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 11:30:10 -0400 Subject: [PATCH 282/376] change while --- setup/so-functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 160ccdb37..0d912e82b 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1439,7 +1439,7 @@ salt_checkin() { echo "Stopping service $service" >> "$setup_log" 2>&1 systemctl stop "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 - while (( check_service_status $service )); do + while (( $(check_service_status $service) )); do echo "$service still running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 60 ]; then echo "$service could not be stopped in 60 seconds, exiting" >> "$setup_log" 2>&1 @@ -1456,7 +1456,7 @@ salt_checkin() { echo "Starting service $service" >> "$setup_log" 2>&1 systemctl start "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 - while ! (( check_service_status $service )); do + while ! (( $(check_service_status $service) )); do echo "$service still not running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 60 ]; then echo "$service could not be started in 60 seconds, exiting" >> "$setup_log" 2>&1 From 7686a05f421bc224cc2877cb18e5877ce638a55f Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Fri, 14 Aug 2020 15:33:38 +0000 Subject: [PATCH 283/376] Set Strelka rules enabled by default for Eval Mode --- setup/so-setup | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/setup/so-setup b/setup/so-setup index b1b142b8c..7d9320b02 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -148,6 +148,7 @@ if [ "$install_type" = 'EVAL' ]; then is_manager=true is_sensor=true is_eval=true + STRELKARULES=1 elif [ "$install_type" = 'STANDALONE' ]; then is_manager=true is_distmanager=true @@ -308,6 +309,10 @@ if [[ $is_manager && ! $is_eval ]]; then whiptail_oinkcode fi + if [[ $STRELKA == 1 ]]; then + whiptail_strelka_rules + fi + if [ "$MANAGERADV" = 'ADVANCED' ] && [ "$ZEEKVERSION" != 'SURICATA' ]; then whiptail_manager_adv_service_zeeklogs fi @@ -316,9 +321,6 @@ fi if [[ $is_manager ]]; then whiptail_components_adv_warning whiptail_enable_components - if [[ $STRELKA == 1 ]]; then - whiptail_strelka_rules - fi fi if [[ $is_manager || $is_import ]]; then From e229cb49bcc283c63062cecca69b2e023692c554 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 11:40:21 -0400 Subject: [PATCH 284/376] logic changes --- setup/so-functions | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 0d912e82b..e857f71e1 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1376,12 +1376,12 @@ check_service_status() { echo "Checking service $service_name status" >> "$setup_log" 2>&1 systemctl status $service_name >> "$setup_log" 2>&1 local status=$? - #true service is running false if not + #true if there is an issue with the service false if it is running properly if [ $status -gt 0 ]; then - echo "$service_name is running" >> "$setup_log" 2>&1 + echo "$service_name not is running" >> "$setup_log" 2>&1 status=1 else - echo "$service_name is not running" >> "$setup_log" 2>&1 + echo "$service_name is running" >> "$setup_log" 2>&1 status=0 fi @@ -1439,7 +1439,7 @@ salt_checkin() { echo "Stopping service $service" >> "$setup_log" 2>&1 systemctl stop "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 - while (( $(check_service_status $service) )); do + while ! (( $(check_service_status $service) )); do echo "$service still running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 60 ]; then echo "$service could not be stopped in 60 seconds, exiting" >> "$setup_log" 2>&1 @@ -1456,7 +1456,7 @@ salt_checkin() { echo "Starting service $service" >> "$setup_log" 2>&1 systemctl start "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 - while ! (( $(check_service_status $service) )); do + while (( $(check_service_status $service) )); do echo "$service still not running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 60 ]; then echo "$service could not be started in 60 seconds, exiting" >> "$setup_log" 2>&1 From 18f37e3ef8a0f30f0d9db2dc7a67ac16af2e6402 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 14 Aug 2020 11:49:18 -0400 Subject: [PATCH 285/376] Install registry if the image is local --- setup/so-functions | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/setup/so-functions b/setup/so-functions index c43e668bc..4ce5c867b 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -940,6 +940,15 @@ install_cleanup() { } +import_registry_docker() { + if [ -f /nsm/docker-registry/docker/registry_image.tar ]; then + service docker start + docker import /nsm/docker-registry/docker/registry_image.tar registry:2 + else + echo "Need to download registry" + fi +} + manager_pillar() { local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls From c64faacdbccfcf88b2eacb630b3727fb6e79914f Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 14 Aug 2020 12:15:56 -0400 Subject: [PATCH 286/376] Install registry if the image is local --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index b1b142b8c..a1fe12317 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -541,7 +541,7 @@ fi else set_progress_str 26 'Downloading containers from the internet' fi - + import_registry_docker >> $setup_log 2>&1 salt-call state.apply -l info registry >> $setup_log 2>&1 docker_seed_registry 2>> "$setup_log" # ~ 60% when finished From 9d59fc23dd81b13b952fbcfd46f0f41fae62a625 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 12:24:15 -0400 Subject: [PATCH 287/376] logic changes --- setup/so-functions | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index e857f71e1..5e2110ffe 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1378,7 +1378,7 @@ check_service_status() { local status=$? #true if there is an issue with the service false if it is running properly if [ $status -gt 0 ]; then - echo "$service_name not is running" >> "$setup_log" 2>&1 + echo "$service_name is not running" >> "$setup_log" 2>&1 status=1 else echo "$service_name is running" >> "$setup_log" 2>&1 @@ -1439,7 +1439,7 @@ salt_checkin() { echo "Stopping service $service" >> "$setup_log" 2>&1 systemctl stop "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 - while ! (( $(check_service_status $service) )); do + while ! check_service_status $service; do echo "$service still running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 60 ]; then echo "$service could not be stopped in 60 seconds, exiting" >> "$setup_log" 2>&1 @@ -1456,7 +1456,7 @@ salt_checkin() { echo "Starting service $service" >> "$setup_log" 2>&1 systemctl start "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 - while (( $(check_service_status $service) )); do + while check_service_status $service; do echo "$service still not running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 60 ]; then echo "$service could not be started in 60 seconds, exiting" >> "$setup_log" 2>&1 @@ -1470,7 +1470,7 @@ salt_checkin() { #sleep 15; LOOP_COUNT=0 - while (( check_salt_master_status )); do + while check_salt_master_status; do echo "salt minion cannot talk to salt master" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 30 ]; then echo "salt minion could not talk to salt master after 30 attempts, exiting" >> "$setup_log" 2>&1 @@ -1481,7 +1481,7 @@ salt_checkin() { done LOOP_COUNT=0 - while (( check_salt_minion_status )); do + while check_salt_minion_status; do echo "salt master did not get a job response from salt minion" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 30 ]; then echo "salt master did not get a job response from salt minion after 30 attempts, exiting" >> "$setup_log" 2>&1 From 4b21c1b492e2e99947db8ceae3f4ba48a1cffa4c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 12:45:50 -0400 Subject: [PATCH 288/376] logic change --- setup/so-functions | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 5e2110ffe..95313c6ff 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1379,14 +1379,12 @@ check_service_status() { #true if there is an issue with the service false if it is running properly if [ $status -gt 0 ]; then echo "$service_name is not running" >> "$setup_log" 2>&1 - status=1 + echo 1; else echo "$service_name is running" >> "$setup_log" 2>&1 - status=0 + echo 0; fi - return $status - } check_salt_master_status() { @@ -1395,14 +1393,12 @@ check_salt_master_status() { local status=$? #true if there is an issue talking to salt master if [ $status -gt 0 ]; then - status=1 + echo 1; else echo "Can talk to salt master" >> "$setup_log" 2>&1 - status=0 + echo 0; fi - return $status - } check_salt_minion_status() { @@ -1411,14 +1407,12 @@ check_salt_minion_status() { local status=$? #true if there is an issue getting a job response from the minion if [ $status -gt 0 ]; then - status=1 + echo 1; else echo "Received job response from salt minion" >> "$setup_log" 2>&1 - status=0 + echo 0; fi - return $status - } salt_checkin() { @@ -1439,7 +1433,7 @@ salt_checkin() { echo "Stopping service $service" >> "$setup_log" 2>&1 systemctl stop "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 - while ! check_service_status $service; do + while ! $(check_service_status $service); do echo "$service still running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 60 ]; then echo "$service could not be stopped in 60 seconds, exiting" >> "$setup_log" 2>&1 @@ -1456,7 +1450,7 @@ salt_checkin() { echo "Starting service $service" >> "$setup_log" 2>&1 systemctl start "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 - while check_service_status $service; do + while $(check_service_status $service); do echo "$service still not running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 60 ]; then echo "$service could not be started in 60 seconds, exiting" >> "$setup_log" 2>&1 @@ -1470,7 +1464,7 @@ salt_checkin() { #sleep 15; LOOP_COUNT=0 - while check_salt_master_status; do + while $(check_salt_master_status); do echo "salt minion cannot talk to salt master" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 30 ]; then echo "salt minion could not talk to salt master after 30 attempts, exiting" >> "$setup_log" 2>&1 @@ -1481,7 +1475,7 @@ salt_checkin() { done LOOP_COUNT=0 - while check_salt_minion_status; do + while $(check_salt_minion_status); do echo "salt master did not get a job response from salt minion" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 30 ]; then echo "salt master did not get a job response from salt minion after 30 attempts, exiting" >> "$setup_log" 2>&1 From 4bb23a089e5ebe8ffe7b8920b86bc6d9580312dd Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 12:48:52 -0400 Subject: [PATCH 289/376] add some parens --- setup/so-functions | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 95313c6ff..2e4e054b9 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1433,7 +1433,7 @@ salt_checkin() { echo "Stopping service $service" >> "$setup_log" 2>&1 systemctl stop "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 - while ! $(check_service_status $service); do + while ! (( $(check_service_status $service) )); do echo "$service still running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 60 ]; then echo "$service could not be stopped in 60 seconds, exiting" >> "$setup_log" 2>&1 @@ -1450,7 +1450,7 @@ salt_checkin() { echo "Starting service $service" >> "$setup_log" 2>&1 systemctl start "$service" >> "$setup_log" 2>&1 LOOP_COUNT=0 - while $(check_service_status $service); do + while (( $(check_service_status $service) )); do echo "$service still not running" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 60 ]; then echo "$service could not be started in 60 seconds, exiting" >> "$setup_log" 2>&1 @@ -1464,7 +1464,7 @@ salt_checkin() { #sleep 15; LOOP_COUNT=0 - while $(check_salt_master_status); do + while (( $(check_salt_master_status) )); do echo "salt minion cannot talk to salt master" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 30 ]; then echo "salt minion could not talk to salt master after 30 attempts, exiting" >> "$setup_log" 2>&1 @@ -1475,7 +1475,7 @@ salt_checkin() { done LOOP_COUNT=0 - while $(check_salt_minion_status); do + while (( $(check_salt_minion_status) )); do echo "salt master did not get a job response from salt minion" >> "$setup_log" 2>&1 if [ $LOOP_COUNT -gt 30 ]; then echo "salt master did not get a job response from salt minion after 30 attempts, exiting" >> "$setup_log" 2>&1 From 6602ad32862189e068bfb867f347e7e966204230 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 12:53:24 -0400 Subject: [PATCH 290/376] sleep for 5 seconds --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 2e4e054b9..3ca22c159 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1461,7 +1461,7 @@ salt_checkin() { done done - #sleep 15; + sleep 5; LOOP_COUNT=0 while (( $(check_salt_master_status) )); do From b7bfa6f9a9d712018dbf61066af5c963054581d6 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 12:55:54 -0400 Subject: [PATCH 291/376] move functions up --- setup/so-functions | 90 +++++++++++++++++++++++----------------------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 3ca22c159..59a8f6fe3 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -195,6 +195,51 @@ check_pass_match() { fi } +check_service_status() { + + local service_name=$1 + echo "Checking service $service_name status" >> "$setup_log" 2>&1 + systemctl status $service_name >> "$setup_log" 2>&1 + local status=$? + #true if there is an issue with the service false if it is running properly + if [ $status -gt 0 ]; then + echo "$service_name is not running" >> "$setup_log" 2>&1 + echo 1; + else + echo "$service_name is running" >> "$setup_log" 2>&1 + echo 0; + fi + +} + +check_salt_master_status() { + echo "Checking if we can talk to the salt master" >> "$setup_log" 2>&1 + salt-call state.show_top > /dev/null 2>&1 + local status=$? + #true if there is an issue talking to salt master + if [ $status -gt 0 ]; then + echo 1; + else + echo "Can talk to salt master" >> "$setup_log" 2>&1 + echo 0; + fi + +} + +check_salt_minion_status() { + echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 + salt "$MINION_ID" test.ping >> "$setup_log" 2>&1 + local status=$? + #true if there is an issue getting a job response from the minion + if [ $status -gt 0 ]; then + echo 1; + else + echo "Received job response from salt minion" >> "$setup_log" 2>&1 + echo 0; + fi + +} + check_soremote_pass() { check_pass_match "$SOREMOTEPASS1" "$SOREMOTEPASS2" "SCMATCH" } @@ -1370,51 +1415,6 @@ saltify() { } -check_service_status() { - - local service_name=$1 - echo "Checking service $service_name status" >> "$setup_log" 2>&1 - systemctl status $service_name >> "$setup_log" 2>&1 - local status=$? - #true if there is an issue with the service false if it is running properly - if [ $status -gt 0 ]; then - echo "$service_name is not running" >> "$setup_log" 2>&1 - echo 1; - else - echo "$service_name is running" >> "$setup_log" 2>&1 - echo 0; - fi - -} - -check_salt_master_status() { - echo "Checking if we can talk to the salt master" >> "$setup_log" 2>&1 - salt-call state.show_top > /dev/null 2>&1 - local status=$? - #true if there is an issue talking to salt master - if [ $status -gt 0 ]; then - echo 1; - else - echo "Can talk to salt master" >> "$setup_log" 2>&1 - echo 0; - fi - -} - -check_salt_minion_status() { - echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 - salt "$MINION_ID" test.ping >> "$setup_log" 2>&1 - local status=$? - #true if there is an issue getting a job response from the minion - if [ $status -gt 0 ]; then - echo 1; - else - echo "Received job response from salt minion" >> "$setup_log" 2>&1 - echo 0; - fi - -} - salt_checkin() { case "$install_type" in From ff84640aad5465a4c5f0729ed88ffecfeb1e070a Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 13:59:23 -0400 Subject: [PATCH 292/376] add pcap to import node, test not starting zeek docker by default --- salt/top.sls | 1 + salt/zeek/init.sls | 3 +++ salt/zeek/map.jinja | 6 ++++++ setup/so-functions | 1 + setup/so-setup | 2 +- 5 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 salt/zeek/map.jinja diff --git a/salt/top.sls b/salt/top.sls index 01eed5343..4b560c3c1 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -399,6 +399,7 @@ base: - firewall - idstools - suricata.manager + - pcap - elasticsearch - kibana - filebeat diff --git a/salt/zeek/init.sls b/salt/zeek/init.sls index 8743878da..f6e1e999e 100644 --- a/salt/zeek/init.sls +++ b/salt/zeek/init.sls @@ -1,3 +1,5 @@ +{% from "zeek/map.jinja" import START with context %} + {% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} @@ -167,6 +169,7 @@ localzeeksync: so-zeek: docker_container.running: - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-zeek:{{ VERSION }} + - start: {{ START }} - privileged: True - binds: - /nsm/zeek/logs:/nsm/zeek/logs:rw diff --git a/salt/zeek/map.jinja b/salt/zeek/map.jinja new file mode 100644 index 000000000..ad4d70e80 --- /dev/null +++ b/salt/zeek/map.jinja @@ -0,0 +1,6 @@ +# don't start the docker container if it is an import node +{% if grains.id.split('_')|last == 'import' %} + {% set START = False %} +{% else %} + {% set START = True %} +{% endif %} \ No newline at end of file diff --git a/setup/so-functions b/setup/so-functions index 5a63d7c12..8e94dc373 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -804,6 +804,7 @@ docker_seed_registry() { "so-filebeat:$VERSION" \ "so-suricata:$VERSION" \ "so-soc:$VERSION" \ + "so-steno:$VERSION" \ "so-elasticsearch:$VERSION" \ "so-kibana:$VERSION" \ "so-kratos:$VERSION" \ diff --git a/setup/so-setup b/setup/so-setup index 45b08433f..56647b1b4 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -577,7 +577,7 @@ fi salt-call state.apply -l info elasticsearch >> $setup_log 2>&1 fi - if [[ $is_sensor ]]; then + if [[ $is_sensor || $is_import ]]; then set_progress_str 65 "$(print_salt_state_apply 'pcap')" salt-call state.apply -l info pcap >> $setup_log 2>&1 fi From 04340728ffa9b070b026ea6e04ae39fe15d475c6 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 14 Aug 2020 14:28:49 -0400 Subject: [PATCH 293/376] Improve title spacing among standard log lines --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 5a63d7c12..95409b84a 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -37,7 +37,7 @@ info() { } title() { - echo -e "-----------------------------\n $1\n-----------------------------\n" >> "$setup_log" 2>&1 + echo -e "\n-----------------------------\n $1\n-----------------------------\n" >> "$setup_log" 2>&1 } logCmd() { From 3836f0030979b3387544dc438eca37c9e50b43e4 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 14:32:34 -0400 Subject: [PATCH 294/376] allow sensori port for import node --- salt/firewall/assigned_hostgroups.map.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/firewall/assigned_hostgroups.map.yaml b/salt/firewall/assigned_hostgroups.map.yaml index ef9e6fe0c..b6dd7b9bc 100644 --- a/salt/firewall/assigned_hostgroups.map.yaml +++ b/salt/firewall/assigned_hostgroups.map.yaml @@ -506,6 +506,7 @@ role: portgroups: - {{ portgroups.beats_5044 }} - {{ portgroups.beats_5644 }} + - {{ portgroups.sensoroni }} search_node: portgroups: - {{ portgroups.redis }} From f9a6b8d2315e61f928ad91a2d4cf8c707d3843c4 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 14:39:02 -0400 Subject: [PATCH 295/376] remove zeek and suricata from so-status for import node --- salt/common/maps/import.map.jinja | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/salt/common/maps/import.map.jinja b/salt/common/maps/import.map.jinja index adb266809..324536d11 100644 --- a/salt/common/maps/import.map.jinja +++ b/salt/common/maps/import.map.jinja @@ -5,8 +5,6 @@ 'so-soc', 'so-kratos', 'so-elasticsearch', - 'so-kibana', - 'so-suricata', - 'so-zeek' + 'so-kibana' ] } %} \ No newline at end of file From 7fa5e17935fc1a3e3b71ad203bd8ef12035afbb6 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 14 Aug 2020 14:40:12 -0400 Subject: [PATCH 296/376] Correct if logic for determining when to show web interface URL --- setup/so-whiptail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index f58e7cace..6cf4374fc 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -1064,7 +1064,7 @@ whiptail_setup_complete() { [ -n "$TESTING" ] && return - if [[ -n "$REDIRECTIT" && is_manager ]]; then + if [[ -n "$REDIRECTIT" && $is_manager = true ]]; then if [[ -n $ALLOW_CIDR ]]; then local sentence_prefix="Access" else From c9d6293f8f75f9a6be6d86056543ee50c67d3f06 Mon Sep 17 00:00:00 2001 From: weslambert Date: Fri, 14 Aug 2020 14:41:35 -0400 Subject: [PATCH 297/376] Don't copy SSH key if automated install --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 45b08433f..e99ab399b 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -430,7 +430,7 @@ host_pillar >> $setup_log 2>&1 if [[ $is_minion || $is_import ]]; then set_updates >> $setup_log 2>&1 - copy_ssh_key >> $setup_log 2>&1 + [ "$automated" == no ] && copy_ssh_key >> $setup_log 2>&1 fi # Begin install From 35027e32b35938fe4d2ded10be0cba7b2b651cf9 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 14:43:37 -0400 Subject: [PATCH 298/376] dont constantly run steno or suricata containers for import node --- salt/pcap/init.sls | 2 ++ salt/pcap/map.jinja | 6 ++++++ salt/suricata/init.sls | 2 ++ salt/suricata/map.jinja | 6 ++++++ 4 files changed, 16 insertions(+) create mode 100644 salt/pcap/map.jinja create mode 100644 salt/suricata/map.jinja diff --git a/salt/pcap/init.sls b/salt/pcap/init.sls index 3db7a227c..135b49334 100644 --- a/salt/pcap/init.sls +++ b/salt/pcap/init.sls @@ -18,6 +18,7 @@ {% set INTERFACE = salt['pillar.get']('sensor:interface', 'bond0') %} {% set BPF_STENO = salt['pillar.get']('steno:bpf', None) %} {% set BPF_COMPILED = "" %} +{% from "pcap/map.jinja" import START with context %} # PCAP Section @@ -131,6 +132,7 @@ sensoronilog: so-steno: docker_container.running: - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-steno:{{ VERSION }} + - start: {{ START }} - network_mode: host - privileged: True - port_bindings: diff --git a/salt/pcap/map.jinja b/salt/pcap/map.jinja new file mode 100644 index 000000000..ad4d70e80 --- /dev/null +++ b/salt/pcap/map.jinja @@ -0,0 +1,6 @@ +# don't start the docker container if it is an import node +{% if grains.id.split('_')|last == 'import' %} + {% set START = False %} +{% else %} + {% set START = True %} +{% endif %} \ No newline at end of file diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls index 783f174ca..a15255af1 100644 --- a/salt/suricata/init.sls +++ b/salt/suricata/init.sls @@ -23,6 +23,7 @@ {# import_yaml 'suricata/files/defaults2.yaml' as suricata #} {% from 'suricata/suricata_config.map.jinja' import suricata_defaults as suricata_config with context %} +{% from "suricata/map.jinja" import START with context %} # Suricata @@ -134,6 +135,7 @@ suribpf: so-suricata: docker_container.running: - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} + - start: {{ START }} - privileged: True - environment: - INTERFACE={{ interface }} diff --git a/salt/suricata/map.jinja b/salt/suricata/map.jinja new file mode 100644 index 000000000..ad4d70e80 --- /dev/null +++ b/salt/suricata/map.jinja @@ -0,0 +1,6 @@ +# don't start the docker container if it is an import node +{% if grains.id.split('_')|last == 'import' %} + {% set START = False %} +{% else %} + {% set START = True %} +{% endif %} \ No newline at end of file From 11ebc6b8b20f48e75ecfb30b1dc298e846ce8e16 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 14 Aug 2020 15:28:35 -0400 Subject: [PATCH 299/376] Do not cancel setup if user choose not to run so-allow during setup --- setup/so-whiptail | 2 -- 1 file changed, 2 deletions(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 6cf4374fc..1e019b58c 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -1123,8 +1123,6 @@ whiptail_so_allow() { export ALLOW_ROLE='a' export ALLOW_CIDR fi - - whiptail_check_exitstatus $exitstatus } whiptail_gauge_post_setup() { From d963222f3188c21b670f3725e344ff66aefdbbb7 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 15:28:47 -0400 Subject: [PATCH 300/376] provide proper url for so-import-pcap based on redirect strategy chosen during setup - https://github.com/Security-Onion-Solutions/securityonion/issues/1039 --- salt/common/tools/sbin/so-import-pcap | 9 +++++---- setup/so-functions | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/salt/common/tools/sbin/so-import-pcap b/salt/common/tools/sbin/so-import-pcap index 6e2d98daa..f10f5fad9 100755 --- a/salt/common/tools/sbin/so-import-pcap +++ b/salt/common/tools/sbin/so-import-pcap @@ -15,10 +15,11 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set MANAGER = salt['grains.get']('master') %} -{% set VERSION = salt['pillar.get']('global:soversion') %} -{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} +{%- set MANAGER = salt['grains.get']('master') %} +{%- set VERSION = salt['pillar.get']('global:soversion') %} +{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {%- set MANAGERIP = salt['pillar.get']('global:managerip') -%} +{%- set URLBASE = salt['pillar.get']('global:url_base') %} . /usr/sbin/so-common @@ -212,7 +213,7 @@ cat << EOF Import complete! You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser: -https://{{ MANAGERIP }}/#/hunt?q=import.id:${HASH}%20%7C%20groupby%20event.module%20event.dataset&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC +https://{{ URLBASE }}/#/hunt?q=import.id:${HASH}%20%7C%20groupby%20event.module%20event.dataset&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC or you can manually set your Time Range to be (in UTC): From: $START_OLDEST To: $END_NEWEST diff --git a/setup/so-functions b/setup/so-functions index 778d1a21d..88539f0cf 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1028,7 +1028,6 @@ manager_pillar() { " osquery: $OSQUERY"\ " thehive: $THEHIVE"\ " playbook: $PLAYBOOK"\ - " url_base: $REDIRECTIT"\ ""\ "elasticsearch:"\ " mainip: $MAINIP"\ @@ -1087,6 +1086,7 @@ manager_global() { " proxy: $PROXY"\ " zeekversion: $ZEEKVERSION"\ " ids: $NIDS"\ + " url_base: $REDIRECTIT"\ " managerip: $MAINIP" > "$global_pillar" # Check if TheHive is enabled. If so, add creds and other details From bac58abf3e71ff80558fa485d682019da0add9c1 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 14 Aug 2020 15:32:33 -0400 Subject: [PATCH 301/376] Airgap round 1 --- setup/so-functions | 89 +++++++++++++++++++++++++++++----------------- setup/so-setup | 16 +++++++++ setup/so-whiptail | 12 +++++++ 3 files changed, 84 insertions(+), 33 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 95409b84a..d155f7881 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -655,24 +655,27 @@ detect_os() { echo "Installing required packages to run installer..." >> "$setup_log" 2>&1 # Install bind-utils so the host command exists - if ! command -v host > /dev/null 2>&1; then + if [[ ! $is_iso ]]; then + if ! command -v host > /dev/null 2>&1; then yum -y install bind-utils >> "$setup_log" 2>&1 - fi - if ! command -v nmcli > /dev/null 2>&1; then + fi + if ! command -v nmcli > /dev/null 2>&1; then { yum -y install NetworkManager; systemctl enable NetworkManager; systemctl start NetworkManager; } >> "$setup_log" 2<&1 - fi - if ! command -v bc > /dev/null 2>&1; then + fi + if ! command -v bc > /dev/null 2>&1; then yum -y install bc >> "$setup_log" 2>&1 - fi - if ! yum versionlock > /dev/null 2>&1; then + fi + if ! yum versionlock > /dev/null 2>&1; then yum -y install yum-plugin-versionlock >> "$setup_log" 2>&1 - fi - - + fi + else + logCmd "systemctl enable NetworkManager" + logCmd "systemctl start NetworkManager" + fi elif [ -f /etc/os-release ]; then OS=ubuntu if grep -q "UBUNTU_CODENAME=bionic" /etc/os-release; then @@ -745,8 +748,12 @@ docker_install() { if [ $OS = 'centos' ]; then { yum clean expire-cache; - yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo; - yum -y install docker-ce-19.03.11-3.el7 containerd.io-1.2.13-3.2.el7; + if [[ ! $is_airgap ]]; then + yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo; + fi + if [[ ! $is_iso ]]; then + yum -y install docker-ce-19.03.11-3.el7 containerd.io-1.2.13-3.2.el7; + fi yum versionlock docker-ce-19.03.11-3.el7; yum versionlock containerd.io-1.2.13-3.2.el7 } >> "$setup_log" 2>&1 @@ -989,10 +996,10 @@ install_cleanup() { import_registry_docker() { if [ -f /nsm/docker-registry/docker/registry_image.tar ]; then - service docker start - docker import /nsm/docker-registry/docker/registry_image.tar registry:2 + logCmd "service docker start" + logCmd "docker import /nsm/docker-registry/docker/registry_image.tar registry:2" else - echo "Need to download registry" + info "Need to download registry" fi } @@ -1341,41 +1348,56 @@ saltify() { cp ./yum_repos/saltstack.repo /etc/yum.repos.d/saltstack.repo; } >> "$setup_log" 2>&1 set_progress_str 6 'Installing various dependencies' - yum -y install wget nmap-ncat >> "$setup_log" 2>&1 + if [[ ! $is_iso ]]; then + logCmd "yum -y install wget nmap-ncat" + fi case "$install_type" in 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORT') reserve_group_ids >> "$setup_log" 2>&1 - yum -y install epel-release >> "$setup_log" 2>&1 - yum -y install sqlite argon2 curl mariadb-devel >> "$setup_log" 2>&1 + if [[ ! $is_iso ]]; then + logCmd "yum -y install epel-release" + logCmd "yum -y install sqlite argon2 curl mariadb-devel" + fi # Download Ubuntu Keys in case manager updates = 1 mkdir -p /opt/so/gpg >> "$setup_log" 2>&1 - wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3001.1/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 - wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1 - wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1 - cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo >> "$setup_log" 2>&1 + if [[ ! $is_airgap ]]; then + logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3001.1/SALTSTACK-GPG-KEY.pub" + logCmd "wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg" + logCmd "wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH" + logCmd "cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo" + fi set_progress_str 7 'Installing salt-master' - yum -y install salt-master-3001.1 >> "$setup_log" 2>&1 + if [[ ! $is_iso ]]; then + logCmd "yum -y install salt-master-3001.1" + fi systemctl enable salt-master >> "$setup_log" 2>&1 ;; *) if [ "$MANAGERUPDATES" = '1' ]; then { - # Create the GPG Public Key for the Salt Repo - cp ./public_keys/salt.pem /etc/pki/rpm-gpg/saltstack-signing-key; + if [[ ! $is_airgap ]]; then + # Create the GPG Public Key for the Salt Repo + cp ./public_keys/salt.pem /etc/pki/rpm-gpg/saltstack-signing-key; - # Copy repo files over - cp ./yum_repos/saltstack.repo /etc/yum.repos.d/saltstack.repo; + # Copy repo files over + cp ./yum_repos/saltstack.repo /etc/yum.repos.d/saltstack.repo; + else + info "This is airgap" + fi } >> "$setup_log" 2>&1 fi ;; esac - cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo >> "$setup_log" 2>&1 - yum clean expire-cache >> "$setup_log" 2>&1 + if [[ ! $is_airgap ]]; then + cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo >> "$setup_log" 2>&1 + yum clean expire-cache >> "$setup_log" 2>&1 + fi set_progress_str 8 'Installing salt-minion & python modules' { - yum -y install epel-release - yum -y install salt-minion-3001.1\ - python3\ + if [[ ! $is_iso ]]; then + yum -y install epel-release + yum -y install salt-minion-3001.1\ + python3\ python36-docker\ python36-dateutil\ python36-m2crypto\ @@ -1385,7 +1407,8 @@ saltify() { lvm2\ openssl\ jq; - yum -y update --exclude=salt*; + yum -y update --exclude=salt*; + fi systemctl enable salt-minion; } >> "$setup_log" 2>&1 yum versionlock salt* diff --git a/setup/so-setup b/setup/so-setup index e99ab399b..7c5d5a8a2 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -182,6 +182,22 @@ elif [ "$install_type" = 'IMPORT' ]; then is_import=true fi +# Say yes to the dress if its an ISO install +if [[ "$setup_type" == 'iso' ]]; then + is_iso=true +fi + +#Check if this is an airgap install + +if [[ $is_manager ]]; then + if [[ $is_iso ]]; then + whiptail_airgap + if [[ "$INTERWEBS" == 'AIRGAP' ]]; then + is_airgap=true + fi + fi +fi + if [[ $is_manager && $is_sensor ]]; then check_requirements "standalone" elif [[ $is_fleet_standalone ]]; then diff --git a/setup/so-whiptail b/setup/so-whiptail index 6cf4374fc..9dfe566c1 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -18,6 +18,18 @@ source ./so-variables source ./so-common-functions +whiptail_airgap() { + + [ -n "$TESTING" ] && return + + INTERWEBS=$(whiptail --title "Security Onion Setup" --radiolist \ + "Choose your install conditions:" 20 75 4 \ + "STANDARD" "This manager has internet accesss" ON \ + "AIRGAP" "This manager does not have internet access" OFF 3>&1 1>&2 2>&3 ) + + local exitstatus=$? + whiptail_check_exitstatus $exitstatus +} whiptail_basic_zeek() { From 47faee48a6a01083d41f09ca6b2d3ad6518ba0e1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 15:58:59 -0400 Subject: [PATCH 302/376] heavynode firewall rules --- salt/firewall/assigned_hostgroups.map.yaml | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/salt/firewall/assigned_hostgroups.map.yaml b/salt/firewall/assigned_hostgroups.map.yaml index b6dd7b9bc..f7f87eb5f 100644 --- a/salt/firewall/assigned_hostgroups.map.yaml +++ b/salt/firewall/assigned_hostgroups.map.yaml @@ -442,16 +442,24 @@ role: chain: DOCKER-USER: hostgroups: - self: + manager: portgroups: - - {{ portgroups.redis }} - - {{ portgroups.beats_5044 }} - - {{ portgroups.beats_5644 }} + - {{ portgroups.elasticsearch_node }} + dockernet: + portgroups: + - {{ portgroups.elasticsearch_node }} + - {{ portgroups.elasticsearch_rest }} + elasticsearch_rest: + portgroups: + - {{ portgroups.elasticsearch_rest }} INPUT: hostgroups: anywhere: portgroups: - {{ portgroups.ssh }} + dockernet: + portgroups: + - {{ portgroups.all }} localhost: portgroups: - {{ portgroups.all }} From 5220b5ae0c0f03bb4ce6668506b3d1984fc16b1c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 16:37:45 -0400 Subject: [PATCH 303/376] use new module.run style --- salt/patch/needs_restarting.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/patch/needs_restarting.sls b/salt/patch/needs_restarting.sls index f60909d22..40280d6e2 100644 --- a/salt/patch/needs_restarting.sls +++ b/salt/patch/needs_restarting.sls @@ -1,5 +1,5 @@ needs_restarting: module.run: - mine.send: - - func: needs_restarting.check + - name: needs_restarting.check - order: last From 43f6f5c27a98af7ab8fcfba79eb9710155c84b7a Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 16:45:28 -0400 Subject: [PATCH 304/376] send service status to /dev/null to prevent FP on install failure --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 773f634cf..c2b9b9fff 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -233,7 +233,7 @@ check_service_status() { local service_name=$1 echo "Checking service $service_name status" >> "$setup_log" 2>&1 - systemctl status $service_name >> "$setup_log" 2>&1 + systemctl status $service_name > /dev/null 2>&1 local status=$? #true if there is an issue with the service false if it is running properly if [ $status -gt 0 ]; then From 387c26f052c1a74990ec61cba7c45113b974986f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 17:10:36 -0400 Subject: [PATCH 305/376] set checking interval for sensoroni on import node --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index c2b9b9fff..b09693e14 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1080,7 +1080,7 @@ manager_global() { if [ -z "$SENSOR_CHECKIN_INTERVAL_MS" ]; then SENSOR_CHECKIN_INTERVAL_MS=10000 - if [ "$install_type" = 'EVAL' ] || [ "$install_type" = 'STANDALONE' ]; then + if [ "$install_type" = 'EVAL' ] || [ "$install_type" = 'STANDALONE' ] || [ "$install_type" = 'IMPORT' ]; then SENSOR_CHECKIN_INTERVAL_MS=1000 fi fi From e6da423dc390cc60dd0aa3d7168cf2c790c729be Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 14 Aug 2020 17:55:30 -0400 Subject: [PATCH 306/376] change reference from manager:url_base to global:url_base - https://github.com/Security-Onion-Solutions/securityonion/issues/1039 --- salt/common/tools/sbin/so-kibana-config-export | 2 +- salt/elastalert/files/rules/so/suricata_thehive.yaml | 2 +- salt/elastalert/files/rules/so/wazuh_thehive.yaml | 2 +- salt/fleet/event_gen-packages.sls | 2 +- salt/kibana/bin/so-kibana-config-load | 2 +- salt/motd/files/so_motd.jinja | 2 +- salt/nginx/files/navigator_config.json | 2 +- salt/soc/files/kratos/kratos.yaml | 2 +- salt/soctopus/files/SOCtopus.conf | 2 +- salt/soctopus/files/templates/generic.template | 2 +- salt/soctopus/files/templates/osquery.template | 2 +- salt/soctopus/init.sls | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/salt/common/tools/sbin/so-kibana-config-export b/salt/common/tools/sbin/so-kibana-config-export index 6542c3f04..7f578a3ba 100755 --- a/salt/common/tools/sbin/so-kibana-config-export +++ b/salt/common/tools/sbin/so-kibana-config-export @@ -3,7 +3,7 @@ # {%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager', False) -%} # {%- set FLEET_NODE = salt['pillar.get']('global:fleet_node', False) -%} # {%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', '') %} -# {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %} +# {%- set MANAGER = salt['pillar.get']('global:url_base', '') %} # # Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC # diff --git a/salt/elastalert/files/rules/so/suricata_thehive.yaml b/salt/elastalert/files/rules/so/suricata_thehive.yaml index 8657d4168..714d63d21 100644 --- a/salt/elastalert/files/rules/so/suricata_thehive.yaml +++ b/salt/elastalert/files/rules/so/suricata_thehive.yaml @@ -1,7 +1,7 @@ {% set es = salt['pillar.get']('global:managerip', '') %} {% set hivehost = salt['pillar.get']('global:managerip', '') %} {% set hivekey = salt['pillar.get']('global:hivekey', '') %} -{% set MANAGER = salt['pillar.get']('manager:url_base', '') %} +{% set MANAGER = salt['pillar.get']('global:url_base', '') %} # Elastalert rule to forward Suricata alerts from Security Onion to a specified TheHive instance. # diff --git a/salt/elastalert/files/rules/so/wazuh_thehive.yaml b/salt/elastalert/files/rules/so/wazuh_thehive.yaml index 7fd49e23e..7e5c6e7c0 100644 --- a/salt/elastalert/files/rules/so/wazuh_thehive.yaml +++ b/salt/elastalert/files/rules/so/wazuh_thehive.yaml @@ -1,7 +1,7 @@ {% set es = salt['pillar.get']('global:managerip', '') %} {% set hivehost = salt['pillar.get']('global:managerip', '') %} {% set hivekey = salt['pillar.get']('global:hivekey', '') %} -{% set MANAGER = salt['pillar.get']('manager:url_base', '') %} +{% set MANAGER = salt['pillar.get']('global:url_base', '') %} # Elastalert rule to forward high level Wazuh alerts from Security Onion to a specified TheHive instance. # diff --git a/salt/fleet/event_gen-packages.sls b/salt/fleet/event_gen-packages.sls index bfcfd2a1d..7506763dd 100644 --- a/salt/fleet/event_gen-packages.sls +++ b/salt/fleet/event_gen-packages.sls @@ -11,7 +11,7 @@ {% elif FLEETNODE %} {% set HOSTNAME = grains.host %} {% else %} - {% set HOSTNAME = salt['pillar.get']('manager:url_base') %} + {% set HOSTNAME = salt['pillar.get']('global:url_base') %} {% endif %} so/fleet: diff --git a/salt/kibana/bin/so-kibana-config-load b/salt/kibana/bin/so-kibana-config-load index 2e5d38ade..9d970b1e3 100644 --- a/salt/kibana/bin/so-kibana-config-load +++ b/salt/kibana/bin/so-kibana-config-load @@ -1,7 +1,7 @@ #!/bin/bash # {%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager', False) -%} # {%- set FLEET_NODE = salt['pillar.get']('global:fleet_node', False) -%} -# {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %} +# {%- set MANAGER = salt['pillar.get']('global:url_base', '') %} KIBANA_VERSION="7.6.1" diff --git a/salt/motd/files/so_motd.jinja b/salt/motd/files/so_motd.jinja index 43ad3b4de..1efb77254 100644 --- a/salt/motd/files/so_motd.jinja +++ b/salt/motd/files/so_motd.jinja @@ -1,6 +1,6 @@ {% set needs_restarting_check = salt['mine.get']('*', 'needs_restarting.check', tgt_type='glob') -%} {% set role = grains.id.split('_') | last -%} -{% set url = salt['pillar.get']('manager:url_base') -%} +{% set url = salt['pillar.get']('global:url_base') -%} {% if role in ['eval', 'managersearch', 'manager', 'standalone'] %} Access the Security Onion web interface at https://{{ url }} diff --git a/salt/nginx/files/navigator_config.json b/salt/nginx/files/navigator_config.json index b0866d742..d4f6e0908 100644 --- a/salt/nginx/files/navigator_config.json +++ b/salt/nginx/files/navigator_config.json @@ -1,4 +1,4 @@ -{%- set URL_BASE = salt['pillar.get']('manager:url_base', '') %} +{%- set URL_BASE = salt['pillar.get']('global:url_base', '') %} { "enterprise_attack_url": "https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json", diff --git a/salt/soc/files/kratos/kratos.yaml b/salt/soc/files/kratos/kratos.yaml index 2171971bc..2e8a408fd 100644 --- a/salt/soc/files/kratos/kratos.yaml +++ b/salt/soc/files/kratos/kratos.yaml @@ -1,4 +1,4 @@ -{%- set WEBACCESS = salt['pillar.get']('manager:url_base', '') -%} +{%- set WEBACCESS = salt['pillar.get']('global:url_base', '') -%} {%- set KRATOSKEY = salt['pillar.get']('kratos:kratoskey', '') -%} selfservice: diff --git a/salt/soctopus/files/SOCtopus.conf b/salt/soctopus/files/SOCtopus.conf index 093b4fd3e..39e9c276d 100644 --- a/salt/soctopus/files/SOCtopus.conf +++ b/salt/soctopus/files/SOCtopus.conf @@ -1,4 +1,4 @@ -{%- set MANAGER = salt['pillar.get']('manager:url_base', '') %} +{%- set MANAGER = salt['pillar.get']('global:url_base', '') %} {%- set HIVEKEY = salt['pillar.get']('global:hivekey', '') %} {%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %} diff --git a/salt/soctopus/files/templates/generic.template b/salt/soctopus/files/templates/generic.template index e93bc30f8..2dd2c96c7 100644 --- a/salt/soctopus/files/templates/generic.template +++ b/salt/soctopus/files/templates/generic.template @@ -1,4 +1,4 @@ -{% set es = salt['pillar.get']('manager:url_base', '') %} +{% set es = salt['pillar.get']('global:url_base', '') %} {% set hivehost = salt['pillar.get']('global:managerip', '') %} {% set hivekey = salt['pillar.get']('global:hivekey', '') %} alert: diff --git a/salt/soctopus/files/templates/osquery.template b/salt/soctopus/files/templates/osquery.template index de1d1cf0c..9c770fc6f 100644 --- a/salt/soctopus/files/templates/osquery.template +++ b/salt/soctopus/files/templates/osquery.template @@ -1,4 +1,4 @@ -{% set es = salt['pillar.get']('manager:url_base', '') %} +{% set es = salt['pillar.get']('global:url_base', '') %} {% set hivehost = salt['pillar.get']('global:managerip', '') %} {% set hivekey = salt['pillar.get']('global:hivekey', '') %} alert: diff --git a/salt/soctopus/init.sls b/salt/soctopus/init.sls index 7526974df..39768fc42 100644 --- a/salt/soctopus/init.sls +++ b/salt/soctopus/init.sls @@ -1,7 +1,7 @@ {% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} -{%- set MANAGER_URL = salt['pillar.get']('manager:url_base', '') %} +{%- set MANAGER_URL = salt['pillar.get']('global:url_base', '') %} {%- set MANAGER_IP = salt['pillar.get']('global:managerip', '') %} soctopusdir: From d8833abf7329486ed9091466a1f0fb9af7261600 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Sat, 15 Aug 2020 09:42:56 -0400 Subject: [PATCH 307/376] Use load instead of import on the registry image itself --- salt/registry/init.sls | 2 +- setup/so-functions | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/registry/init.sls b/salt/registry/init.sls index 9ee44d1de..6e17d639e 100644 --- a/salt/registry/init.sls +++ b/salt/registry/init.sls @@ -40,7 +40,7 @@ dockerregistryconf: # Install the registry container so-dockerregistry: docker_container.running: - - image: registry:2 + - image: registry:latest - hostname: so-registry - restart_policy: always - port_bindings: diff --git a/setup/so-functions b/setup/so-functions index b52868ca8..741929399 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -998,7 +998,7 @@ install_cleanup() { import_registry_docker() { if [ -f /nsm/docker-registry/docker/registry_image.tar ]; then logCmd "service docker start" - logCmd "docker import /nsm/docker-registry/docker/registry_image.tar registry:2" + logCmd "docker load -i /nsm/docker-registry/docker/registry_image.tar" else info "Need to download registry" fi From dcb110b31ffedcdd3e392a0fd84c86b620e7d669 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 17 Aug 2020 09:57:00 -0400 Subject: [PATCH 308/376] Add rc1 conditional logic --- salt/common/tools/sbin/soup | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 8f0325a6c..973a13eb7 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -89,6 +89,18 @@ highstate() { pillar_changes() { # This function is to add any new pillar items if needed. echo "Checking to see if pillar changes are needed." + + # Move baseurl in global.sls + if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then + # Move the static file to global.sls + echo "Migrating static.sls to global.sls" + mv -v /opt/so/saltstack/local/pillar/static.sls /opt/so/saltstack/local/pillar/global.sls >> "$SOUP_LOG" 2>&1 + sed -i '1c\global:' /opt/so/saltstack/local/pillar/global.sls >> "$SOUP_LOG" 2>&1 + + # Moving baseurl inside static.sls + + + fi } update_dockers() { From a82c4c24fbbb77f7dd6f96388b36219a7c32c000 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 17 Aug 2020 10:55:07 -0400 Subject: [PATCH 309/376] move url_base from manager to global in when running soup --- salt/common/tools/sbin/soup | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 973a13eb7..f7e46e780 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -29,6 +29,7 @@ manager_check() { MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}') if [[ "$MANAGERCHECK" =~ ^('so-eval'|'so-manager'|'so-standalone'|'so-managersearch'|'so-import')$ ]]; then echo "This is a manager. We can proceed." + MINIONID=$(salt-call grains.get id --out=txt|awk -F: {'print $2'}|tr -d ' ') else echo "Please run soup on the manager. The manager controls all updates." exit 0 @@ -97,8 +98,10 @@ pillar_changes() { mv -v /opt/so/saltstack/local/pillar/static.sls /opt/so/saltstack/local/pillar/global.sls >> "$SOUP_LOG" 2>&1 sed -i '1c\global:' /opt/so/saltstack/local/pillar/global.sls >> "$SOUP_LOG" 2>&1 - # Moving baseurl inside static.sls - + # Moving baseurl from minion sls file to inside global.sls + local line=$(grep '^ url_base:' /opt/so/saltstack/local/pillar/minions/$MINIONID) + sed -i '/^ url_base:/d' /opt/so/saltstack/local/pillar/minions/$MINIONID; + sed -i "/^global:/a \\$line" /opt/so/saltstack/local/pillar/global.sls; fi } From eb1272c12766aab74f9178e3f09087ccccbacc71 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 17 Aug 2020 12:26:44 -0400 Subject: [PATCH 310/376] add sls extension --- salt/common/tools/sbin/soup | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index f7e46e780..a93a000b7 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -99,8 +99,8 @@ pillar_changes() { sed -i '1c\global:' /opt/so/saltstack/local/pillar/global.sls >> "$SOUP_LOG" 2>&1 # Moving baseurl from minion sls file to inside global.sls - local line=$(grep '^ url_base:' /opt/so/saltstack/local/pillar/minions/$MINIONID) - sed -i '/^ url_base:/d' /opt/so/saltstack/local/pillar/minions/$MINIONID; + local line=$(grep '^ url_base:' /opt/so/saltstack/local/pillar/minions/$MINIONID.sls) + sed -i '/^ url_base:/d' /opt/so/saltstack/local/pillar/minions/$MINIONID.sls; sed -i "/^global:/a \\$line" /opt/so/saltstack/local/pillar/global.sls; fi From a8aa97edd267fadee5d63b7067762c2819cbf896 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Mon, 17 Aug 2020 14:09:17 -0400 Subject: [PATCH 311/376] Playbook schema update - RC2 --- salt/playbook/files/playbook_db_init.sql | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/salt/playbook/files/playbook_db_init.sql b/salt/playbook/files/playbook_db_init.sql index 1b1535fe3..83e5d6f54 100644 --- a/salt/playbook/files/playbook_db_init.sql +++ b/salt/playbook/files/playbook_db_init.sql @@ -455,7 +455,7 @@ CREATE TABLE `custom_values` ( PRIMARY KEY (`id`), KEY `custom_values_customized` (`customized_type`,`customized_id`), KEY `index_custom_values_on_custom_field_id` (`custom_field_id`) -) ENGINE=InnoDB AUTO_INCREMENT=134139 DEFAULT CHARSET=latin1; +) ENGINE=InnoDB AUTO_INCREMENT=145325 DEFAULT CHARSET=latin1; /*!40101 SET character_set_client = @saved_cs_client */; -- @@ -825,7 +825,7 @@ CREATE TABLE `journal_details` ( `value` longtext, PRIMARY KEY (`id`), KEY `journal_details_journal_id` (`journal_id`) -) ENGINE=InnoDB AUTO_INCREMENT=9 DEFAULT CHARSET=latin1; +) ENGINE=InnoDB AUTO_INCREMENT=792 DEFAULT CHARSET=latin1; /*!40101 SET character_set_client = @saved_cs_client */; -- @@ -857,7 +857,7 @@ CREATE TABLE `journals` ( KEY `index_journals_on_user_id` (`user_id`), KEY `index_journals_on_journalized_id` (`journalized_id`), KEY `index_journals_on_created_on` (`created_on`) -) ENGINE=InnoDB AUTO_INCREMENT=8218 DEFAULT CHARSET=latin1; +) ENGINE=InnoDB AUTO_INCREMENT=9502 DEFAULT CHARSET=latin1; /*!40101 SET character_set_client = @saved_cs_client */; -- @@ -1146,7 +1146,7 @@ CREATE TABLE `queries` ( LOCK TABLES `queries` WRITE; /*!40000 ALTER TABLE `queries` DISABLE KEYS */; -INSERT INTO `queries` VALUES (3,1,'All Plays','---\ntracker_id:\n :operator: \"=\"\n :values:\n - \'1\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(4,NULL,'Disabled Plays','---\nstatus_id:\n :operator: \"=\"\n :values:\n - \'6\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(5,NULL,'Draft Plays','---\nstatus_id:\n :operator: \"=\"\n :values:\n - \'2\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(6,NULL,'Playbook - Community Sigma','---\ncf_13:\n :operator: \"=\"\n :values:\n - community\n',1,'---\n- :status\n- :cf_10\n- :cf_18\n- :cf_19\n- :cf_20\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(8,NULL,'Playbook - Internal','---\ncf_13:\n :operator: \"=\"\n :values:\n - Internal\n',1,'---\n- :status\n- :cf_10\n- :cf_14\n- :cf_16\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(9,NULL,'Active Plays','---\ntracker_id:\n :operator: \"=\"\n :values:\n - \'1\'\nstatus_id:\n :operator: \"=\"\n :values:\n - \'3\'\n',1,'---\n- :status\n- :cf_10\n- :cf_13\n- :cf_18\n- :cf_19\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'); +INSERT INTO `queries` VALUES (3,1,'All Plays','---\ntracker_id:\n :operator: \"=\"\n :values:\n - \'1\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(4,NULL,'Inactive Plays','---\nstatus_id:\n :operator: \"=\"\n :values:\n - \'4\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(5,NULL,'Draft Plays','---\nstatus_id:\n :operator: \"=\"\n :values:\n - \'2\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(6,NULL,'Playbook - Community Sigma','---\ncf_13:\n :operator: \"=\"\n :values:\n - community\n',1,'---\n- :status\n- :cf_10\n- :cf_18\n- :cf_19\n- :cf_20\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(8,NULL,'Playbook - Internal','---\ncf_13:\n :operator: \"=\"\n :values:\n - Internal\n',1,'---\n- :status\n- :cf_10\n- :cf_14\n- :cf_16\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(9,NULL,'Active Plays','---\ntracker_id:\n :operator: \"=\"\n :values:\n - \'1\'\nstatus_id:\n :operator: \"=\"\n :values:\n - \'3\'\n',1,'---\n- :status\n- :cf_10\n- :cf_13\n- :cf_18\n- :cf_19\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'); /*!40000 ALTER TABLE `queries` ENABLE KEYS */; UNLOCK TABLES; @@ -1310,7 +1310,7 @@ CREATE TABLE `settings` ( LOCK TABLES `settings` WRITE; /*!40000 ALTER TABLE `settings` DISABLE KEYS */; -INSERT INTO `settings` VALUES (1,'ui_theme','circle','2020-04-26 13:11:26'),(2,'default_language','en','2020-04-26 13:11:26'),(3,'force_default_language_for_anonymous','0','2020-04-26 13:11:26'),(4,'force_default_language_for_loggedin','0','2020-04-26 13:11:26'),(5,'start_of_week','','2020-04-26 13:11:26'),(6,'date_format','','2020-04-26 13:11:26'),(7,'time_format','','2020-04-26 13:11:26'),(8,'timespan_format','decimal','2020-04-26 13:11:26'),(9,'user_format','firstname_lastname','2020-05-02 12:45:00'),(10,'gravatar_enabled','1','2020-05-02 12:41:07'),(11,'thumbnails_enabled','1','2020-04-26 13:11:26'),(12,'thumbnails_size','100','2020-04-26 13:11:26'),(13,'new_item_menu_tab','0','2020-04-26 13:11:30'),(14,'login_required','0','2020-07-10 19:32:45'),(15,'autologin','0','2020-04-26 13:11:54'),(16,'self_registration','0','2020-04-26 13:11:54'),(17,'show_custom_fields_on_registration','0','2020-04-26 13:11:54'),(18,'password_min_length','8','2020-04-26 13:11:54'),(19,'password_required_char_classes','--- []\n','2020-04-26 13:11:54'),(20,'password_max_age','0','2020-04-26 13:11:54'),(21,'lost_password','1','2020-04-26 13:11:54'),(22,'openid','0','2020-04-26 13:11:55'),(23,'session_lifetime','0','2020-04-26 13:11:55'),(24,'session_timeout','0','2020-04-26 13:11:55'),(25,'rest_api_enabled','1','2020-04-26 13:11:58'),(26,'jsonp_enabled','0','2020-04-26 13:11:58'),(27,'default_projects_public','0','2020-04-26 13:12:21'),(28,'default_projects_modules','---\n- sigma_editor\n','2020-04-26 13:12:21'),(29,'default_projects_tracker_ids','--- []\n','2020-04-26 13:12:21'),(30,'sequential_project_identifiers','0','2020-04-26 13:12:21'),(31,'project_list_defaults','---\n:column_names:\n- name\n- identifier\n- short_description\n','2020-04-26 13:12:21'),(32,'app_title','Playbook','2020-04-26 18:17:51'),(33,'welcome_text','','2020-04-26 18:17:51'),(34,'per_page_options','25,75,150','2020-05-02 12:41:38'),(35,'search_results_per_page','10','2020-04-26 18:17:51'),(36,'activity_days_default','30','2020-04-26 18:17:51'),(37,'host_name','localhost:3000','2020-04-26 18:17:51'),(38,'protocol','http','2020-04-26 18:17:51'),(39,'text_formatting','textile','2020-04-26 18:17:51'),(40,'cache_formatted_text','0','2020-04-26 18:17:51'),(41,'wiki_compression','','2020-04-26 18:17:51'),(42,'feeds_limit','15','2020-04-26 18:17:51'),(43,'plugin_redmine_playbook','--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nproject: \'1\'\nconvert_url: http://10.66.166.188:7000/playbook/sigmac\ncreate_url: http://10.66.166.188:7000/playbook/play','2020-05-02 12:39:20'),(44,'cross_project_issue_relations','0','2020-05-01 16:27:33'),(45,'link_copied_issue','no','2020-05-01 16:27:33'),(46,'cross_project_subtasks','','2020-05-01 16:27:33'),(47,'close_duplicate_issues','0','2020-05-01 16:27:33'),(48,'issue_group_assignment','0','2020-05-01 16:27:33'),(49,'default_issue_start_date_to_creation_date','1','2020-05-01 16:27:33'),(50,'display_subprojects_issues','0','2020-05-01 16:27:33'),(51,'issue_done_ratio','issue_field','2020-05-01 16:27:33'),(52,'non_working_week_days','---\n- \'6\'\n- \'7\'\n','2020-05-01 16:27:33'),(53,'issues_export_limit','500','2020-05-01 16:27:33'),(54,'gantt_items_limit','500','2020-05-01 16:27:33'),(55,'gantt_months_limit','24','2020-05-01 16:27:33'),(56,'parent_issue_dates','derived','2020-05-01 16:27:33'),(57,'parent_issue_priority','derived','2020-05-01 16:27:33'),(58,'parent_issue_done_ratio','derived','2020-05-01 16:27:33'),(59,'issue_list_default_columns','---\n- status\n- cf_10\n- cf_13\n- cf_14\n- cf_1\n- updated_on\n','2020-05-01 19:32:13'),(60,'issue_list_default_totals','--- []\n','2020-05-01 16:27:33'),(61,'enabled_scm','--- []\n','2020-05-01 16:27:47'),(62,'autofetch_changesets','0','2020-05-01 16:27:47'),(63,'sys_api_enabled','0','2020-05-01 16:27:47'),(64,'repository_log_display_limit','100','2020-05-01 16:27:47'),(65,'commit_logs_formatting','1','2020-05-01 16:27:47'),(66,'commit_ref_keywords','refs,references,IssueID','2020-05-01 16:27:47'),(67,'commit_cross_project_ref','0','2020-05-01 16:27:47'),(68,'commit_logtime_enabled','0','2020-05-01 16:27:47'),(69,'commit_update_keywords','--- []\n','2020-05-01 16:27:47'),(70,'gravatar_default','','2020-05-02 12:41:07'); +INSERT INTO `settings` VALUES (1,'ui_theme','circle','2020-04-26 13:11:26'),(2,'default_language','en','2020-04-26 13:11:26'),(3,'force_default_language_for_anonymous','0','2020-04-26 13:11:26'),(4,'force_default_language_for_loggedin','0','2020-04-26 13:11:26'),(5,'start_of_week','','2020-04-26 13:11:26'),(6,'date_format','','2020-04-26 13:11:26'),(7,'time_format','','2020-04-26 13:11:26'),(8,'timespan_format','decimal','2020-04-26 13:11:26'),(9,'user_format','firstname_lastname','2020-05-02 12:45:00'),(10,'gravatar_enabled','1','2020-05-02 12:41:07'),(11,'thumbnails_enabled','1','2020-04-26 13:11:26'),(12,'thumbnails_size','100','2020-04-26 13:11:26'),(13,'new_item_menu_tab','0','2020-04-26 13:11:30'),(14,'login_required','0','2020-07-10 19:32:45'),(15,'autologin','0','2020-04-26 13:11:54'),(16,'self_registration','0','2020-04-26 13:11:54'),(17,'show_custom_fields_on_registration','0','2020-04-26 13:11:54'),(18,'password_min_length','8','2020-04-26 13:11:54'),(19,'password_required_char_classes','--- []\n','2020-04-26 13:11:54'),(20,'password_max_age','0','2020-04-26 13:11:54'),(21,'lost_password','1','2020-04-26 13:11:54'),(22,'openid','0','2020-04-26 13:11:55'),(23,'session_lifetime','0','2020-04-26 13:11:55'),(24,'session_timeout','0','2020-04-26 13:11:55'),(25,'rest_api_enabled','1','2020-04-26 13:11:58'),(26,'jsonp_enabled','0','2020-04-26 13:11:58'),(27,'default_projects_public','0','2020-04-26 13:12:21'),(28,'default_projects_modules','---\n- sigma_editor\n','2020-04-26 13:12:21'),(29,'default_projects_tracker_ids','--- []\n','2020-04-26 13:12:21'),(30,'sequential_project_identifiers','0','2020-04-26 13:12:21'),(31,'project_list_defaults','---\n:column_names:\n- name\n- identifier\n- short_description\n','2020-04-26 13:12:21'),(32,'app_title','Playbook','2020-04-26 18:17:51'),(33,'welcome_text','','2020-04-26 18:17:51'),(34,'per_page_options','25,75,150','2020-05-02 12:41:38'),(35,'search_results_per_page','10','2020-04-26 18:17:51'),(36,'activity_days_default','30','2020-04-26 18:17:51'),(37,'host_name','localhost:3000','2020-04-26 18:17:51'),(38,'protocol','http','2020-04-26 18:17:51'),(39,'text_formatting','textile','2020-04-26 18:17:51'),(40,'cache_formatted_text','0','2020-04-26 18:17:51'),(41,'wiki_compression','','2020-04-26 18:17:51'),(42,'feeds_limit','15','2020-04-26 18:17:51'),(43,'plugin_redmine_playbook','--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nproject: \'1\'\nconvert_url: http://10.66.166.135:7000/playbook/sigmac\ncreate_url: http://10.66.166.135:7000/playbook/play','2020-05-02 12:39:20'),(44,'cross_project_issue_relations','0','2020-05-01 16:27:33'),(45,'link_copied_issue','no','2020-05-01 16:27:33'),(46,'cross_project_subtasks','','2020-05-01 16:27:33'),(47,'close_duplicate_issues','0','2020-05-01 16:27:33'),(48,'issue_group_assignment','0','2020-05-01 16:27:33'),(49,'default_issue_start_date_to_creation_date','1','2020-05-01 16:27:33'),(50,'display_subprojects_issues','0','2020-05-01 16:27:33'),(51,'issue_done_ratio','issue_field','2020-05-01 16:27:33'),(52,'non_working_week_days','---\n- \'6\'\n- \'7\'\n','2020-05-01 16:27:33'),(53,'issues_export_limit','500','2020-05-01 16:27:33'),(54,'gantt_items_limit','500','2020-05-01 16:27:33'),(55,'gantt_months_limit','24','2020-05-01 16:27:33'),(56,'parent_issue_dates','derived','2020-05-01 16:27:33'),(57,'parent_issue_priority','derived','2020-05-01 16:27:33'),(58,'parent_issue_done_ratio','derived','2020-05-01 16:27:33'),(59,'issue_list_default_columns','---\n- status\n- cf_10\n- cf_13\n- cf_14\n- cf_1\n- updated_on\n','2020-05-01 19:32:13'),(60,'issue_list_default_totals','--- []\n','2020-05-01 16:27:33'),(61,'enabled_scm','--- []\n','2020-05-01 16:27:47'),(62,'autofetch_changesets','0','2020-05-01 16:27:47'),(63,'sys_api_enabled','0','2020-05-01 16:27:47'),(64,'repository_log_display_limit','100','2020-05-01 16:27:47'),(65,'commit_logs_formatting','1','2020-05-01 16:27:47'),(66,'commit_ref_keywords','refs,references,IssueID','2020-05-01 16:27:47'),(67,'commit_cross_project_ref','0','2020-05-01 16:27:47'),(68,'commit_logtime_enabled','0','2020-05-01 16:27:47'),(69,'commit_update_keywords','--- []\n','2020-05-01 16:27:47'),(70,'gravatar_default','','2020-05-02 12:41:07'); /*!40000 ALTER TABLE `settings` ENABLE KEYS */; UNLOCK TABLES; @@ -1371,7 +1371,7 @@ CREATE TABLE `tokens` ( PRIMARY KEY (`id`), UNIQUE KEY `tokens_value` (`value`), KEY `index_tokens_on_user_id` (`user_id`) -) ENGINE=InnoDB AUTO_INCREMENT=62 DEFAULT CHARSET=latin1; +) ENGINE=InnoDB AUTO_INCREMENT=67 DEFAULT CHARSET=latin1; /*!40101 SET character_set_client = @saved_cs_client */; -- @@ -1380,7 +1380,7 @@ CREATE TABLE `tokens` ( LOCK TABLES `tokens` WRITE; /*!40000 ALTER TABLE `tokens` DISABLE KEYS */; -INSERT INTO `tokens` VALUES (3,1,'feeds','6e5575602e1227c188cd85ef6d12608bb8701193','2020-04-26 13:10:46','2020-04-26 13:10:46'),(4,1,'session','999412fa9badda7423c6c654d6364c32c20b3eac','2020-04-26 18:07:03','2020-04-26 18:12:02'),(5,1,'session','124ad4acbf87a942426350e7ad028c1d119c3851','2020-04-26 18:17:11','2020-04-26 18:19:24'),(9,1,'session','2890c663e0552f26ddb92acad6ab3b6d05b92915','2020-04-26 18:51:15','2020-04-26 18:51:15'),(19,1,'session','b7ffb106ea0b34650dd9c1770f74c2b0ffe166b2','2020-05-01 16:52:33','2020-05-01 18:02:30'),(20,1,'session','f44cfcf918eef59ffda47991c431d9c2b2ac6113','2020-05-01 18:05:56','2020-05-01 18:05:56'),(23,9,'feeds','211918c9d7168979b5dc19bebb14573b928a5067','2020-05-01 18:26:17','2020-05-01 18:26:17'),(25,9,'api','de6639318502476f2fa5aa06f43f51fb389a3d7f','2020-05-01 18:26:31','2020-05-01 18:26:31'),(46,1,'session','2d0c8f8ae641c06d8c2362746846440d465d53c0','2020-05-06 20:48:01','2020-05-06 20:48:07'),(59,1,'session','2afe6590653d59a697d1436729c64f322a2eff82','2020-07-01 18:11:07','2020-07-01 20:30:43'),(61,1,'session','b01f95709ca1ab086a049cf9c5afd81ca9d4526e','2020-07-15 16:30:42','2020-07-15 16:31:40'); +INSERT INTO `tokens` VALUES (3,1,'feeds','6e5575602e1227c188cd85ef6d12608bb8701193','2020-04-26 13:10:46','2020-04-26 13:10:46'),(4,1,'session','999412fa9badda7423c6c654d6364c32c20b3eac','2020-04-26 18:07:03','2020-04-26 18:12:02'),(5,1,'session','124ad4acbf87a942426350e7ad028c1d119c3851','2020-04-26 18:17:11','2020-04-26 18:19:24'),(9,1,'session','2890c663e0552f26ddb92acad6ab3b6d05b92915','2020-04-26 18:51:15','2020-04-26 18:51:15'),(19,1,'session','b7ffb106ea0b34650dd9c1770f74c2b0ffe166b2','2020-05-01 16:52:33','2020-05-01 18:02:30'),(20,1,'session','f44cfcf918eef59ffda47991c431d9c2b2ac6113','2020-05-01 18:05:56','2020-05-01 18:05:56'),(23,9,'feeds','211918c9d7168979b5dc19bebb14573b928a5067','2020-05-01 18:26:17','2020-05-01 18:26:17'),(25,9,'api','de6639318502476f2fa5aa06f43f51fb389a3d7f','2020-05-01 18:26:31','2020-05-01 18:26:31'),(46,1,'session','2d0c8f8ae641c06d8c2362746846440d465d53c0','2020-05-06 20:48:01','2020-05-06 20:48:07'),(59,1,'session','2afe6590653d59a697d1436729c64f322a2eff82','2020-07-01 18:11:07','2020-07-01 20:30:43'),(61,1,'session','b01f95709ca1ab086a049cf9c5afd81ca9d4526e','2020-07-15 16:30:42','2020-07-15 16:31:40'),(62,1,'session','d29acdcd0b8e4ebf78ef8f696d3e76df7e2ab2ac','2020-08-17 14:51:59','2020-08-17 14:53:22'); /*!40000 ALTER TABLE `tokens` ENABLE KEYS */; UNLOCK TABLES; @@ -1481,7 +1481,7 @@ CREATE TABLE `users` ( LOCK TABLES `users` WRITE; /*!40000 ALTER TABLE `users` DISABLE KEYS */; -INSERT INTO `users` VALUES (1,'admin','95535e9f7a386c412f20134ebb869c00cf346477','Admin','Admin',1,1,'2020-07-15 16:30:42','',NULL,'2020-04-26 13:08:34','2020-04-26 13:10:45','User',NULL,'all','5ceb2c95ce1593d4ba034d385ceefb2f',0,'2020-04-26 13:10:27'),(2,'','','','Anonymous users',0,1,NULL,'',NULL,'2020-04-26 13:08:38','2020-04-26 13:08:38','GroupAnonymous',NULL,'',NULL,0,NULL),(3,'','','','Non member users',0,1,NULL,'',NULL,'2020-04-26 13:08:38','2020-04-26 13:08:38','GroupNonMember',NULL,'',NULL,0,NULL),(4,'','','','Anonymous',0,0,NULL,'',NULL,'2020-04-26 13:09:44','2020-04-26 13:09:44','AnonymousUser',NULL,'only_my_events',NULL,0,NULL),(5,'','','','Analysts',0,1,NULL,'',NULL,'2020-04-26 18:43:40','2020-04-26 18:43:40','Group',NULL,'',NULL,0,NULL),(6,'','','','Automation',0,1,NULL,'',NULL,'2020-04-26 18:43:47','2020-04-26 18:43:47','Group',NULL,'',NULL,0,NULL),(7,'','','','Admins',0,1,NULL,'',NULL,'2020-04-26 18:43:58','2020-04-26 18:43:58','Group',NULL,'',NULL,0,NULL),(9,'automation','d2e7d78af1f0c0637765ae8cf1a359c4a30034c9','SecOps','Automation',0,1,'2020-05-01 18:26:17','en',NULL,'2020-04-26 18:47:46','2020-05-01 18:26:10','User',NULL,'none','41043e596f70e327e34fc99c861f5b31',0,'2020-05-01 18:26:10'); +INSERT INTO `users` VALUES (1,'admin','95535e9f7a386c412f20134ebb869c00cf346477','Admin','Admin',1,1,'2020-08-17 18:03:20','',NULL,'2020-04-26 13:08:34','2020-04-26 13:10:45','User',NULL,'all','5ceb2c95ce1593d4ba034d385ceefb2f',0,'2020-04-26 13:10:27'),(2,'','','','Anonymous users',0,1,NULL,'',NULL,'2020-04-26 13:08:38','2020-04-26 13:08:38','GroupAnonymous',NULL,'',NULL,0,NULL),(3,'','','','Non member users',0,1,NULL,'',NULL,'2020-04-26 13:08:38','2020-04-26 13:08:38','GroupNonMember',NULL,'',NULL,0,NULL),(4,'','','','Anonymous',0,0,NULL,'',NULL,'2020-04-26 13:09:44','2020-04-26 13:09:44','AnonymousUser',NULL,'only_my_events',NULL,0,NULL),(5,'','','','Analysts',0,1,NULL,'',NULL,'2020-04-26 18:43:40','2020-04-26 18:43:40','Group',NULL,'',NULL,0,NULL),(6,'','','','Automation',0,1,NULL,'',NULL,'2020-04-26 18:43:47','2020-04-26 18:43:47','Group',NULL,'',NULL,0,NULL),(7,'','','','Admins',0,1,NULL,'',NULL,'2020-04-26 18:43:58','2020-04-26 18:43:58','Group',NULL,'',NULL,0,NULL),(9,'automation','d2e7d78af1f0c0637765ae8cf1a359c4a30034c9','SecOps','Automation',0,1,'2020-05-01 18:26:17','en',NULL,'2020-04-26 18:47:46','2020-05-01 18:26:10','User',NULL,'none','41043e596f70e327e34fc99c861f5b31',0,'2020-05-01 18:26:10'); /*!40000 ALTER TABLE `users` ENABLE KEYS */; UNLOCK TABLES; @@ -1567,7 +1567,7 @@ CREATE TABLE `webhooks` ( LOCK TABLES `webhooks` WRITE; /*!40000 ALTER TABLE `webhooks` DISABLE KEYS */; -INSERT INTO `webhooks` VALUES (1,'http://10.66.166.188:7000/playbook/webhook',1); +INSERT INTO `webhooks` VALUES (1,'http://10.66.166.135:7000/playbook/webhook',1); /*!40000 ALTER TABLE `webhooks` ENABLE KEYS */; UNLOCK TABLES; @@ -1742,7 +1742,7 @@ CREATE TABLE `workflows` ( KEY `index_workflows_on_role_id` (`role_id`), KEY `index_workflows_on_new_status_id` (`new_status_id`), KEY `index_workflows_on_tracker_id` (`tracker_id`) -) ENGINE=InnoDB AUTO_INCREMENT=648 DEFAULT CHARSET=latin1; +) ENGINE=InnoDB AUTO_INCREMENT=652 DEFAULT CHARSET=latin1; /*!40101 SET character_set_client = @saved_cs_client */; -- @@ -1751,7 +1751,7 @@ CREATE TABLE `workflows` ( LOCK TABLES `workflows` WRITE; /*!40000 ALTER TABLE `workflows` DISABLE KEYS */; -INSERT INTO `workflows` VALUES (132,1,2,0,3,0,0,'WorkflowPermission','14','readonly'),(134,1,2,0,3,0,0,'WorkflowPermission','16','readonly'),(151,1,3,0,3,0,0,'WorkflowPermission','14','readonly'),(153,1,3,0,3,0,0,'WorkflowPermission','16','readonly'),(170,1,4,0,3,0,0,'WorkflowPermission','14','readonly'),(172,1,4,0,3,0,0,'WorkflowPermission','16','readonly'),(189,1,5,0,3,0,0,'WorkflowPermission','14','readonly'),(191,1,5,0,3,0,0,'WorkflowPermission','16','readonly'),(208,1,6,0,3,0,0,'WorkflowPermission','14','readonly'),(210,1,6,0,3,0,0,'WorkflowPermission','16','readonly'),(220,1,2,3,3,0,0,'WorkflowTransition',NULL,NULL),(221,1,2,3,4,0,0,'WorkflowTransition',NULL,NULL),(222,1,2,3,5,0,0,'WorkflowTransition',NULL,NULL),(223,1,2,6,3,0,0,'WorkflowTransition',NULL,NULL),(224,1,2,6,4,0,0,'WorkflowTransition',NULL,NULL),(225,1,2,6,5,0,0,'WorkflowTransition',NULL,NULL),(226,1,3,4,3,0,0,'WorkflowTransition',NULL,NULL),(227,1,3,4,4,0,0,'WorkflowTransition',NULL,NULL),(228,1,3,4,5,0,0,'WorkflowTransition',NULL,NULL),(229,1,4,5,3,0,0,'WorkflowTransition',NULL,NULL),(230,1,4,5,4,0,0,'WorkflowTransition',NULL,NULL),(231,1,4,5,5,0,0,'WorkflowTransition',NULL,NULL),(232,1,4,6,3,0,0,'WorkflowTransition',NULL,NULL),(233,1,4,6,4,0,0,'WorkflowTransition',NULL,NULL),(234,1,4,6,5,0,0,'WorkflowTransition',NULL,NULL),(235,1,6,3,3,0,0,'WorkflowTransition',NULL,NULL),(236,1,6,3,4,0,0,'WorkflowTransition',NULL,NULL),(237,1,6,3,5,0,0,'WorkflowTransition',NULL,NULL),(239,1,2,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(240,1,3,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(241,1,4,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(242,1,5,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(243,1,6,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(244,1,0,2,5,0,0,'WorkflowTransition',NULL,NULL),(245,1,0,2,4,0,0,'WorkflowTransition',NULL,NULL),(246,1,0,6,5,0,0,'WorkflowTransition',NULL,NULL),(352,1,2,0,3,0,0,'WorkflowPermission','project_id','readonly'),(353,1,2,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(354,1,2,0,3,0,0,'WorkflowPermission','subject','readonly'),(355,1,2,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(356,1,2,0,3,0,0,'WorkflowPermission','is_private','readonly'),(357,1,2,0,3,0,0,'WorkflowPermission','description','readonly'),(358,1,2,0,3,0,0,'WorkflowPermission','1','readonly'),(359,1,2,0,3,0,0,'WorkflowPermission','2','readonly'),(360,1,2,0,3,0,0,'WorkflowPermission','10','readonly'),(361,1,2,0,3,0,0,'WorkflowPermission','20','readonly'),(362,1,2,0,3,0,0,'WorkflowPermission','8','readonly'),(363,1,2,0,3,0,0,'WorkflowPermission','15','readonly'),(364,1,2,0,3,0,0,'WorkflowPermission','11','readonly'),(365,1,2,0,3,0,0,'WorkflowPermission','12','readonly'),(366,1,2,0,3,0,0,'WorkflowPermission','19','readonly'),(367,1,2,0,3,0,0,'WorkflowPermission','7','readonly'),(368,1,2,0,3,0,0,'WorkflowPermission','3','readonly'),(369,1,2,0,3,0,0,'WorkflowPermission','5','readonly'),(370,1,2,0,3,0,0,'WorkflowPermission','6','readonly'),(371,1,2,0,3,0,0,'WorkflowPermission','22','readonly'),(372,1,3,0,3,0,0,'WorkflowPermission','project_id','readonly'),(373,1,3,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(374,1,3,0,3,0,0,'WorkflowPermission','subject','readonly'),(375,1,3,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(376,1,3,0,3,0,0,'WorkflowPermission','is_private','readonly'),(377,1,3,0,3,0,0,'WorkflowPermission','description','readonly'),(378,1,3,0,3,0,0,'WorkflowPermission','1','readonly'),(379,1,3,0,3,0,0,'WorkflowPermission','2','readonly'),(380,1,3,0,3,0,0,'WorkflowPermission','10','readonly'),(381,1,3,0,3,0,0,'WorkflowPermission','20','readonly'),(382,1,3,0,3,0,0,'WorkflowPermission','8','readonly'),(383,1,3,0,3,0,0,'WorkflowPermission','15','readonly'),(384,1,3,0,3,0,0,'WorkflowPermission','11','readonly'),(385,1,3,0,3,0,0,'WorkflowPermission','12','readonly'),(386,1,3,0,3,0,0,'WorkflowPermission','19','readonly'),(387,1,3,0,3,0,0,'WorkflowPermission','7','readonly'),(388,1,3,0,3,0,0,'WorkflowPermission','3','readonly'),(389,1,3,0,3,0,0,'WorkflowPermission','5','readonly'),(390,1,3,0,3,0,0,'WorkflowPermission','6','readonly'),(391,1,3,0,3,0,0,'WorkflowPermission','22','readonly'),(392,1,4,0,3,0,0,'WorkflowPermission','project_id','readonly'),(393,1,4,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(394,1,4,0,3,0,0,'WorkflowPermission','subject','readonly'),(395,1,4,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(396,1,4,0,3,0,0,'WorkflowPermission','is_private','readonly'),(397,1,4,0,3,0,0,'WorkflowPermission','description','readonly'),(398,1,4,0,3,0,0,'WorkflowPermission','1','readonly'),(399,1,4,0,3,0,0,'WorkflowPermission','2','readonly'),(400,1,4,0,3,0,0,'WorkflowPermission','10','readonly'),(401,1,4,0,3,0,0,'WorkflowPermission','20','readonly'),(402,1,4,0,3,0,0,'WorkflowPermission','8','readonly'),(403,1,4,0,3,0,0,'WorkflowPermission','15','readonly'),(404,1,4,0,3,0,0,'WorkflowPermission','11','readonly'),(405,1,4,0,3,0,0,'WorkflowPermission','12','readonly'),(406,1,4,0,3,0,0,'WorkflowPermission','19','readonly'),(407,1,4,0,3,0,0,'WorkflowPermission','7','readonly'),(408,1,4,0,3,0,0,'WorkflowPermission','3','readonly'),(409,1,4,0,3,0,0,'WorkflowPermission','5','readonly'),(410,1,4,0,3,0,0,'WorkflowPermission','6','readonly'),(411,1,4,0,3,0,0,'WorkflowPermission','22','readonly'),(412,1,5,0,3,0,0,'WorkflowPermission','project_id','readonly'),(413,1,5,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(414,1,5,0,3,0,0,'WorkflowPermission','subject','readonly'),(415,1,5,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(416,1,5,0,3,0,0,'WorkflowPermission','is_private','readonly'),(417,1,5,0,3,0,0,'WorkflowPermission','description','readonly'),(418,1,5,0,3,0,0,'WorkflowPermission','1','readonly'),(419,1,5,0,3,0,0,'WorkflowPermission','2','readonly'),(420,1,5,0,3,0,0,'WorkflowPermission','10','readonly'),(421,1,5,0,3,0,0,'WorkflowPermission','20','readonly'),(422,1,5,0,3,0,0,'WorkflowPermission','8','readonly'),(423,1,5,0,3,0,0,'WorkflowPermission','15','readonly'),(424,1,5,0,3,0,0,'WorkflowPermission','11','readonly'),(425,1,5,0,3,0,0,'WorkflowPermission','12','readonly'),(426,1,5,0,3,0,0,'WorkflowPermission','19','readonly'),(427,1,5,0,3,0,0,'WorkflowPermission','7','readonly'),(428,1,5,0,3,0,0,'WorkflowPermission','3','readonly'),(429,1,5,0,3,0,0,'WorkflowPermission','5','readonly'),(430,1,5,0,3,0,0,'WorkflowPermission','6','readonly'),(431,1,5,0,3,0,0,'WorkflowPermission','22','readonly'),(432,1,6,0,3,0,0,'WorkflowPermission','project_id','readonly'),(433,1,6,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(434,1,6,0,3,0,0,'WorkflowPermission','subject','readonly'),(435,1,6,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(436,1,6,0,3,0,0,'WorkflowPermission','is_private','readonly'),(437,1,6,0,3,0,0,'WorkflowPermission','description','readonly'),(438,1,6,0,3,0,0,'WorkflowPermission','1','readonly'),(439,1,6,0,3,0,0,'WorkflowPermission','2','readonly'),(440,1,6,0,3,0,0,'WorkflowPermission','10','readonly'),(441,1,6,0,3,0,0,'WorkflowPermission','20','readonly'),(442,1,6,0,3,0,0,'WorkflowPermission','8','readonly'),(443,1,6,0,3,0,0,'WorkflowPermission','15','readonly'),(444,1,6,0,3,0,0,'WorkflowPermission','11','readonly'),(445,1,6,0,3,0,0,'WorkflowPermission','12','readonly'),(446,1,6,0,3,0,0,'WorkflowPermission','19','readonly'),(447,1,6,0,3,0,0,'WorkflowPermission','7','readonly'),(448,1,6,0,3,0,0,'WorkflowPermission','3','readonly'),(449,1,6,0,3,0,0,'WorkflowPermission','5','readonly'),(450,1,6,0,3,0,0,'WorkflowPermission','6','readonly'),(451,1,6,0,3,0,0,'WorkflowPermission','22','readonly'),(537,1,2,0,2,0,0,'WorkflowPermission','project_id','readonly'),(538,1,2,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(539,1,2,0,2,0,0,'WorkflowPermission','subject','readonly'),(540,1,2,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(541,1,2,0,2,0,0,'WorkflowPermission','is_private','readonly'),(542,1,2,0,2,0,0,'WorkflowPermission','description','readonly'),(543,1,2,0,2,0,0,'WorkflowPermission','1','readonly'),(544,1,2,0,2,0,0,'WorkflowPermission','2','readonly'),(545,1,2,0,2,0,0,'WorkflowPermission','10','readonly'),(546,1,2,0,2,0,0,'WorkflowPermission','20','readonly'),(547,1,2,0,2,0,0,'WorkflowPermission','8','readonly'),(548,1,2,0,2,0,0,'WorkflowPermission','15','readonly'),(549,1,2,0,2,0,0,'WorkflowPermission','11','readonly'),(550,1,2,0,2,0,0,'WorkflowPermission','12','readonly'),(551,1,2,0,2,0,0,'WorkflowPermission','19','readonly'),(552,1,2,0,2,0,0,'WorkflowPermission','17','readonly'),(553,1,2,0,2,0,0,'WorkflowPermission','7','readonly'),(554,1,2,0,2,0,0,'WorkflowPermission','3','readonly'),(555,1,2,0,2,0,0,'WorkflowPermission','5','readonly'),(556,1,2,0,2,0,0,'WorkflowPermission','6','readonly'),(557,1,2,0,2,0,0,'WorkflowPermission','22','readonly'),(558,1,3,0,2,0,0,'WorkflowPermission','project_id','readonly'),(559,1,3,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(560,1,3,0,2,0,0,'WorkflowPermission','subject','readonly'),(561,1,3,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(562,1,3,0,2,0,0,'WorkflowPermission','is_private','readonly'),(563,1,3,0,2,0,0,'WorkflowPermission','description','readonly'),(564,1,3,0,2,0,0,'WorkflowPermission','1','readonly'),(565,1,3,0,2,0,0,'WorkflowPermission','2','readonly'),(566,1,3,0,2,0,0,'WorkflowPermission','10','readonly'),(567,1,3,0,2,0,0,'WorkflowPermission','20','readonly'),(568,1,3,0,2,0,0,'WorkflowPermission','8','readonly'),(569,1,3,0,2,0,0,'WorkflowPermission','15','readonly'),(570,1,3,0,2,0,0,'WorkflowPermission','11','readonly'),(571,1,3,0,2,0,0,'WorkflowPermission','12','readonly'),(572,1,3,0,2,0,0,'WorkflowPermission','19','readonly'),(573,1,3,0,2,0,0,'WorkflowPermission','17','readonly'),(574,1,3,0,2,0,0,'WorkflowPermission','7','readonly'),(575,1,3,0,2,0,0,'WorkflowPermission','3','readonly'),(576,1,3,0,2,0,0,'WorkflowPermission','5','readonly'),(577,1,3,0,2,0,0,'WorkflowPermission','6','readonly'),(578,1,3,0,2,0,0,'WorkflowPermission','22','readonly'),(579,1,4,0,2,0,0,'WorkflowPermission','project_id','readonly'),(580,1,4,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(581,1,4,0,2,0,0,'WorkflowPermission','subject','readonly'),(582,1,4,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(583,1,4,0,2,0,0,'WorkflowPermission','is_private','readonly'),(584,1,4,0,2,0,0,'WorkflowPermission','description','readonly'),(585,1,4,0,2,0,0,'WorkflowPermission','1','readonly'),(586,1,4,0,2,0,0,'WorkflowPermission','2','readonly'),(587,1,4,0,2,0,0,'WorkflowPermission','10','readonly'),(588,1,4,0,2,0,0,'WorkflowPermission','20','readonly'),(589,1,4,0,2,0,0,'WorkflowPermission','8','readonly'),(590,1,4,0,2,0,0,'WorkflowPermission','15','readonly'),(591,1,4,0,2,0,0,'WorkflowPermission','11','readonly'),(592,1,4,0,2,0,0,'WorkflowPermission','12','readonly'),(593,1,4,0,2,0,0,'WorkflowPermission','19','readonly'),(594,1,4,0,2,0,0,'WorkflowPermission','17','readonly'),(595,1,4,0,2,0,0,'WorkflowPermission','7','readonly'),(596,1,4,0,2,0,0,'WorkflowPermission','3','readonly'),(597,1,4,0,2,0,0,'WorkflowPermission','5','readonly'),(598,1,4,0,2,0,0,'WorkflowPermission','6','readonly'),(599,1,4,0,2,0,0,'WorkflowPermission','22','readonly'),(600,1,5,0,2,0,0,'WorkflowPermission','project_id','readonly'),(601,1,5,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(602,1,5,0,2,0,0,'WorkflowPermission','subject','readonly'),(603,1,5,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(604,1,5,0,2,0,0,'WorkflowPermission','is_private','readonly'),(605,1,5,0,2,0,0,'WorkflowPermission','description','readonly'),(606,1,5,0,2,0,0,'WorkflowPermission','1','readonly'),(607,1,5,0,2,0,0,'WorkflowPermission','2','readonly'),(608,1,5,0,2,0,0,'WorkflowPermission','10','readonly'),(609,1,5,0,2,0,0,'WorkflowPermission','20','readonly'),(610,1,5,0,2,0,0,'WorkflowPermission','8','readonly'),(611,1,5,0,2,0,0,'WorkflowPermission','15','readonly'),(612,1,5,0,2,0,0,'WorkflowPermission','11','readonly'),(613,1,5,0,2,0,0,'WorkflowPermission','12','readonly'),(614,1,5,0,2,0,0,'WorkflowPermission','19','readonly'),(615,1,5,0,2,0,0,'WorkflowPermission','17','readonly'),(616,1,5,0,2,0,0,'WorkflowPermission','7','readonly'),(617,1,5,0,2,0,0,'WorkflowPermission','3','readonly'),(618,1,5,0,2,0,0,'WorkflowPermission','5','readonly'),(619,1,5,0,2,0,0,'WorkflowPermission','6','readonly'),(620,1,5,0,2,0,0,'WorkflowPermission','22','readonly'),(621,1,6,0,2,0,0,'WorkflowPermission','project_id','readonly'),(622,1,6,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(623,1,6,0,2,0,0,'WorkflowPermission','subject','readonly'),(624,1,6,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(625,1,6,0,2,0,0,'WorkflowPermission','is_private','readonly'),(626,1,6,0,2,0,0,'WorkflowPermission','description','readonly'),(627,1,6,0,2,0,0,'WorkflowPermission','1','readonly'),(628,1,6,0,2,0,0,'WorkflowPermission','2','readonly'),(629,1,6,0,2,0,0,'WorkflowPermission','10','readonly'),(630,1,6,0,2,0,0,'WorkflowPermission','20','readonly'),(631,1,6,0,2,0,0,'WorkflowPermission','8','readonly'),(632,1,6,0,2,0,0,'WorkflowPermission','15','readonly'),(633,1,6,0,2,0,0,'WorkflowPermission','11','readonly'),(634,1,6,0,2,0,0,'WorkflowPermission','12','readonly'),(635,1,6,0,2,0,0,'WorkflowPermission','19','readonly'),(636,1,6,0,2,0,0,'WorkflowPermission','17','readonly'),(637,1,6,0,2,0,0,'WorkflowPermission','7','readonly'),(638,1,6,0,2,0,0,'WorkflowPermission','3','readonly'),(639,1,6,0,2,0,0,'WorkflowPermission','5','readonly'),(640,1,6,0,2,0,0,'WorkflowPermission','6','readonly'),(641,1,6,0,2,0,0,'WorkflowPermission','22','readonly'),(642,1,2,3,2,0,0,'WorkflowTransition',NULL,NULL),(643,1,2,6,2,0,0,'WorkflowTransition',NULL,NULL),(644,1,3,4,2,0,0,'WorkflowTransition',NULL,NULL),(645,1,4,5,2,0,0,'WorkflowTransition',NULL,NULL),(646,1,4,6,2,0,0,'WorkflowTransition',NULL,NULL),(647,1,6,3,2,0,0,'WorkflowTransition',NULL,NULL); +INSERT INTO `workflows` VALUES (132,1,2,0,3,0,0,'WorkflowPermission','14','readonly'),(134,1,2,0,3,0,0,'WorkflowPermission','16','readonly'),(151,1,3,0,3,0,0,'WorkflowPermission','14','readonly'),(153,1,3,0,3,0,0,'WorkflowPermission','16','readonly'),(170,1,4,0,3,0,0,'WorkflowPermission','14','readonly'),(172,1,4,0,3,0,0,'WorkflowPermission','16','readonly'),(189,1,5,0,3,0,0,'WorkflowPermission','14','readonly'),(191,1,5,0,3,0,0,'WorkflowPermission','16','readonly'),(208,1,6,0,3,0,0,'WorkflowPermission','14','readonly'),(210,1,6,0,3,0,0,'WorkflowPermission','16','readonly'),(220,1,2,3,3,0,0,'WorkflowTransition',NULL,NULL),(221,1,2,3,4,0,0,'WorkflowTransition',NULL,NULL),(222,1,2,3,5,0,0,'WorkflowTransition',NULL,NULL),(226,1,3,4,3,0,0,'WorkflowTransition',NULL,NULL),(227,1,3,4,4,0,0,'WorkflowTransition',NULL,NULL),(228,1,3,4,5,0,0,'WorkflowTransition',NULL,NULL),(229,1,4,5,3,0,0,'WorkflowTransition',NULL,NULL),(230,1,4,5,4,0,0,'WorkflowTransition',NULL,NULL),(231,1,4,5,5,0,0,'WorkflowTransition',NULL,NULL),(232,1,4,6,3,0,0,'WorkflowTransition',NULL,NULL),(233,1,4,6,4,0,0,'WorkflowTransition',NULL,NULL),(234,1,4,6,5,0,0,'WorkflowTransition',NULL,NULL),(239,1,2,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(240,1,3,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(241,1,4,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(242,1,5,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(243,1,6,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(244,1,0,2,5,0,0,'WorkflowTransition',NULL,NULL),(245,1,0,2,4,0,0,'WorkflowTransition',NULL,NULL),(246,1,0,6,5,0,0,'WorkflowTransition',NULL,NULL),(352,1,2,0,3,0,0,'WorkflowPermission','project_id','readonly'),(353,1,2,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(354,1,2,0,3,0,0,'WorkflowPermission','subject','readonly'),(355,1,2,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(356,1,2,0,3,0,0,'WorkflowPermission','is_private','readonly'),(357,1,2,0,3,0,0,'WorkflowPermission','description','readonly'),(358,1,2,0,3,0,0,'WorkflowPermission','1','readonly'),(359,1,2,0,3,0,0,'WorkflowPermission','2','readonly'),(360,1,2,0,3,0,0,'WorkflowPermission','10','readonly'),(361,1,2,0,3,0,0,'WorkflowPermission','20','readonly'),(362,1,2,0,3,0,0,'WorkflowPermission','8','readonly'),(363,1,2,0,3,0,0,'WorkflowPermission','15','readonly'),(364,1,2,0,3,0,0,'WorkflowPermission','11','readonly'),(365,1,2,0,3,0,0,'WorkflowPermission','12','readonly'),(366,1,2,0,3,0,0,'WorkflowPermission','19','readonly'),(367,1,2,0,3,0,0,'WorkflowPermission','7','readonly'),(368,1,2,0,3,0,0,'WorkflowPermission','3','readonly'),(369,1,2,0,3,0,0,'WorkflowPermission','5','readonly'),(370,1,2,0,3,0,0,'WorkflowPermission','6','readonly'),(371,1,2,0,3,0,0,'WorkflowPermission','22','readonly'),(372,1,3,0,3,0,0,'WorkflowPermission','project_id','readonly'),(373,1,3,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(374,1,3,0,3,0,0,'WorkflowPermission','subject','readonly'),(375,1,3,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(376,1,3,0,3,0,0,'WorkflowPermission','is_private','readonly'),(377,1,3,0,3,0,0,'WorkflowPermission','description','readonly'),(378,1,3,0,3,0,0,'WorkflowPermission','1','readonly'),(379,1,3,0,3,0,0,'WorkflowPermission','2','readonly'),(380,1,3,0,3,0,0,'WorkflowPermission','10','readonly'),(381,1,3,0,3,0,0,'WorkflowPermission','20','readonly'),(382,1,3,0,3,0,0,'WorkflowPermission','8','readonly'),(383,1,3,0,3,0,0,'WorkflowPermission','15','readonly'),(384,1,3,0,3,0,0,'WorkflowPermission','11','readonly'),(385,1,3,0,3,0,0,'WorkflowPermission','12','readonly'),(386,1,3,0,3,0,0,'WorkflowPermission','19','readonly'),(387,1,3,0,3,0,0,'WorkflowPermission','7','readonly'),(388,1,3,0,3,0,0,'WorkflowPermission','3','readonly'),(389,1,3,0,3,0,0,'WorkflowPermission','5','readonly'),(390,1,3,0,3,0,0,'WorkflowPermission','6','readonly'),(391,1,3,0,3,0,0,'WorkflowPermission','22','readonly'),(392,1,4,0,3,0,0,'WorkflowPermission','project_id','readonly'),(393,1,4,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(394,1,4,0,3,0,0,'WorkflowPermission','subject','readonly'),(395,1,4,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(396,1,4,0,3,0,0,'WorkflowPermission','is_private','readonly'),(397,1,4,0,3,0,0,'WorkflowPermission','description','readonly'),(398,1,4,0,3,0,0,'WorkflowPermission','1','readonly'),(399,1,4,0,3,0,0,'WorkflowPermission','2','readonly'),(400,1,4,0,3,0,0,'WorkflowPermission','10','readonly'),(401,1,4,0,3,0,0,'WorkflowPermission','20','readonly'),(402,1,4,0,3,0,0,'WorkflowPermission','8','readonly'),(403,1,4,0,3,0,0,'WorkflowPermission','15','readonly'),(404,1,4,0,3,0,0,'WorkflowPermission','11','readonly'),(405,1,4,0,3,0,0,'WorkflowPermission','12','readonly'),(406,1,4,0,3,0,0,'WorkflowPermission','19','readonly'),(407,1,4,0,3,0,0,'WorkflowPermission','7','readonly'),(408,1,4,0,3,0,0,'WorkflowPermission','3','readonly'),(409,1,4,0,3,0,0,'WorkflowPermission','5','readonly'),(410,1,4,0,3,0,0,'WorkflowPermission','6','readonly'),(411,1,4,0,3,0,0,'WorkflowPermission','22','readonly'),(412,1,5,0,3,0,0,'WorkflowPermission','project_id','readonly'),(413,1,5,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(414,1,5,0,3,0,0,'WorkflowPermission','subject','readonly'),(415,1,5,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(416,1,5,0,3,0,0,'WorkflowPermission','is_private','readonly'),(417,1,5,0,3,0,0,'WorkflowPermission','description','readonly'),(418,1,5,0,3,0,0,'WorkflowPermission','1','readonly'),(419,1,5,0,3,0,0,'WorkflowPermission','2','readonly'),(420,1,5,0,3,0,0,'WorkflowPermission','10','readonly'),(421,1,5,0,3,0,0,'WorkflowPermission','20','readonly'),(422,1,5,0,3,0,0,'WorkflowPermission','8','readonly'),(423,1,5,0,3,0,0,'WorkflowPermission','15','readonly'),(424,1,5,0,3,0,0,'WorkflowPermission','11','readonly'),(425,1,5,0,3,0,0,'WorkflowPermission','12','readonly'),(426,1,5,0,3,0,0,'WorkflowPermission','19','readonly'),(427,1,5,0,3,0,0,'WorkflowPermission','7','readonly'),(428,1,5,0,3,0,0,'WorkflowPermission','3','readonly'),(429,1,5,0,3,0,0,'WorkflowPermission','5','readonly'),(430,1,5,0,3,0,0,'WorkflowPermission','6','readonly'),(431,1,5,0,3,0,0,'WorkflowPermission','22','readonly'),(432,1,6,0,3,0,0,'WorkflowPermission','project_id','readonly'),(433,1,6,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(434,1,6,0,3,0,0,'WorkflowPermission','subject','readonly'),(435,1,6,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(436,1,6,0,3,0,0,'WorkflowPermission','is_private','readonly'),(437,1,6,0,3,0,0,'WorkflowPermission','description','readonly'),(438,1,6,0,3,0,0,'WorkflowPermission','1','readonly'),(439,1,6,0,3,0,0,'WorkflowPermission','2','readonly'),(440,1,6,0,3,0,0,'WorkflowPermission','10','readonly'),(441,1,6,0,3,0,0,'WorkflowPermission','20','readonly'),(442,1,6,0,3,0,0,'WorkflowPermission','8','readonly'),(443,1,6,0,3,0,0,'WorkflowPermission','15','readonly'),(444,1,6,0,3,0,0,'WorkflowPermission','11','readonly'),(445,1,6,0,3,0,0,'WorkflowPermission','12','readonly'),(446,1,6,0,3,0,0,'WorkflowPermission','19','readonly'),(447,1,6,0,3,0,0,'WorkflowPermission','7','readonly'),(448,1,6,0,3,0,0,'WorkflowPermission','3','readonly'),(449,1,6,0,3,0,0,'WorkflowPermission','5','readonly'),(450,1,6,0,3,0,0,'WorkflowPermission','6','readonly'),(451,1,6,0,3,0,0,'WorkflowPermission','22','readonly'),(537,1,2,0,2,0,0,'WorkflowPermission','project_id','readonly'),(538,1,2,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(539,1,2,0,2,0,0,'WorkflowPermission','subject','readonly'),(540,1,2,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(541,1,2,0,2,0,0,'WorkflowPermission','is_private','readonly'),(542,1,2,0,2,0,0,'WorkflowPermission','description','readonly'),(543,1,2,0,2,0,0,'WorkflowPermission','1','readonly'),(544,1,2,0,2,0,0,'WorkflowPermission','2','readonly'),(545,1,2,0,2,0,0,'WorkflowPermission','10','readonly'),(546,1,2,0,2,0,0,'WorkflowPermission','20','readonly'),(547,1,2,0,2,0,0,'WorkflowPermission','8','readonly'),(548,1,2,0,2,0,0,'WorkflowPermission','15','readonly'),(549,1,2,0,2,0,0,'WorkflowPermission','11','readonly'),(550,1,2,0,2,0,0,'WorkflowPermission','12','readonly'),(551,1,2,0,2,0,0,'WorkflowPermission','19','readonly'),(552,1,2,0,2,0,0,'WorkflowPermission','17','readonly'),(553,1,2,0,2,0,0,'WorkflowPermission','7','readonly'),(554,1,2,0,2,0,0,'WorkflowPermission','3','readonly'),(555,1,2,0,2,0,0,'WorkflowPermission','5','readonly'),(556,1,2,0,2,0,0,'WorkflowPermission','6','readonly'),(557,1,2,0,2,0,0,'WorkflowPermission','22','readonly'),(558,1,3,0,2,0,0,'WorkflowPermission','project_id','readonly'),(559,1,3,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(560,1,3,0,2,0,0,'WorkflowPermission','subject','readonly'),(561,1,3,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(562,1,3,0,2,0,0,'WorkflowPermission','is_private','readonly'),(563,1,3,0,2,0,0,'WorkflowPermission','description','readonly'),(564,1,3,0,2,0,0,'WorkflowPermission','1','readonly'),(565,1,3,0,2,0,0,'WorkflowPermission','2','readonly'),(566,1,3,0,2,0,0,'WorkflowPermission','10','readonly'),(567,1,3,0,2,0,0,'WorkflowPermission','20','readonly'),(568,1,3,0,2,0,0,'WorkflowPermission','8','readonly'),(569,1,3,0,2,0,0,'WorkflowPermission','15','readonly'),(570,1,3,0,2,0,0,'WorkflowPermission','11','readonly'),(571,1,3,0,2,0,0,'WorkflowPermission','12','readonly'),(572,1,3,0,2,0,0,'WorkflowPermission','19','readonly'),(573,1,3,0,2,0,0,'WorkflowPermission','17','readonly'),(574,1,3,0,2,0,0,'WorkflowPermission','7','readonly'),(575,1,3,0,2,0,0,'WorkflowPermission','3','readonly'),(576,1,3,0,2,0,0,'WorkflowPermission','5','readonly'),(577,1,3,0,2,0,0,'WorkflowPermission','6','readonly'),(578,1,3,0,2,0,0,'WorkflowPermission','22','readonly'),(579,1,4,0,2,0,0,'WorkflowPermission','project_id','readonly'),(580,1,4,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(581,1,4,0,2,0,0,'WorkflowPermission','subject','readonly'),(582,1,4,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(583,1,4,0,2,0,0,'WorkflowPermission','is_private','readonly'),(584,1,4,0,2,0,0,'WorkflowPermission','description','readonly'),(585,1,4,0,2,0,0,'WorkflowPermission','1','readonly'),(586,1,4,0,2,0,0,'WorkflowPermission','2','readonly'),(587,1,4,0,2,0,0,'WorkflowPermission','10','readonly'),(588,1,4,0,2,0,0,'WorkflowPermission','20','readonly'),(589,1,4,0,2,0,0,'WorkflowPermission','8','readonly'),(590,1,4,0,2,0,0,'WorkflowPermission','15','readonly'),(591,1,4,0,2,0,0,'WorkflowPermission','11','readonly'),(592,1,4,0,2,0,0,'WorkflowPermission','12','readonly'),(593,1,4,0,2,0,0,'WorkflowPermission','19','readonly'),(594,1,4,0,2,0,0,'WorkflowPermission','17','readonly'),(595,1,4,0,2,0,0,'WorkflowPermission','7','readonly'),(596,1,4,0,2,0,0,'WorkflowPermission','3','readonly'),(597,1,4,0,2,0,0,'WorkflowPermission','5','readonly'),(598,1,4,0,2,0,0,'WorkflowPermission','6','readonly'),(599,1,4,0,2,0,0,'WorkflowPermission','22','readonly'),(600,1,5,0,2,0,0,'WorkflowPermission','project_id','readonly'),(601,1,5,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(602,1,5,0,2,0,0,'WorkflowPermission','subject','readonly'),(603,1,5,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(604,1,5,0,2,0,0,'WorkflowPermission','is_private','readonly'),(605,1,5,0,2,0,0,'WorkflowPermission','description','readonly'),(606,1,5,0,2,0,0,'WorkflowPermission','1','readonly'),(607,1,5,0,2,0,0,'WorkflowPermission','2','readonly'),(608,1,5,0,2,0,0,'WorkflowPermission','10','readonly'),(609,1,5,0,2,0,0,'WorkflowPermission','20','readonly'),(610,1,5,0,2,0,0,'WorkflowPermission','8','readonly'),(611,1,5,0,2,0,0,'WorkflowPermission','15','readonly'),(612,1,5,0,2,0,0,'WorkflowPermission','11','readonly'),(613,1,5,0,2,0,0,'WorkflowPermission','12','readonly'),(614,1,5,0,2,0,0,'WorkflowPermission','19','readonly'),(615,1,5,0,2,0,0,'WorkflowPermission','17','readonly'),(616,1,5,0,2,0,0,'WorkflowPermission','7','readonly'),(617,1,5,0,2,0,0,'WorkflowPermission','3','readonly'),(618,1,5,0,2,0,0,'WorkflowPermission','5','readonly'),(619,1,5,0,2,0,0,'WorkflowPermission','6','readonly'),(620,1,5,0,2,0,0,'WorkflowPermission','22','readonly'),(621,1,6,0,2,0,0,'WorkflowPermission','project_id','readonly'),(622,1,6,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(623,1,6,0,2,0,0,'WorkflowPermission','subject','readonly'),(624,1,6,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(625,1,6,0,2,0,0,'WorkflowPermission','is_private','readonly'),(626,1,6,0,2,0,0,'WorkflowPermission','description','readonly'),(627,1,6,0,2,0,0,'WorkflowPermission','1','readonly'),(628,1,6,0,2,0,0,'WorkflowPermission','2','readonly'),(629,1,6,0,2,0,0,'WorkflowPermission','10','readonly'),(630,1,6,0,2,0,0,'WorkflowPermission','20','readonly'),(631,1,6,0,2,0,0,'WorkflowPermission','8','readonly'),(632,1,6,0,2,0,0,'WorkflowPermission','15','readonly'),(633,1,6,0,2,0,0,'WorkflowPermission','11','readonly'),(634,1,6,0,2,0,0,'WorkflowPermission','12','readonly'),(635,1,6,0,2,0,0,'WorkflowPermission','19','readonly'),(636,1,6,0,2,0,0,'WorkflowPermission','17','readonly'),(637,1,6,0,2,0,0,'WorkflowPermission','7','readonly'),(638,1,6,0,2,0,0,'WorkflowPermission','3','readonly'),(639,1,6,0,2,0,0,'WorkflowPermission','5','readonly'),(640,1,6,0,2,0,0,'WorkflowPermission','6','readonly'),(641,1,6,0,2,0,0,'WorkflowPermission','22','readonly'),(642,1,2,3,2,0,0,'WorkflowTransition',NULL,NULL),(644,1,3,4,2,0,0,'WorkflowTransition',NULL,NULL),(645,1,4,5,2,0,0,'WorkflowTransition',NULL,NULL),(646,1,4,6,2,0,0,'WorkflowTransition',NULL,NULL),(648,1,4,3,2,0,0,'WorkflowTransition',NULL,NULL),(649,1,4,3,3,0,0,'WorkflowTransition',NULL,NULL),(650,1,4,3,4,0,0,'WorkflowTransition',NULL,NULL),(651,1,4,3,5,0,0,'WorkflowTransition',NULL,NULL); /*!40000 ALTER TABLE `workflows` ENABLE KEYS */; UNLOCK TABLES; /*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; @@ -1764,4 +1764,4 @@ UNLOCK TABLES; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; --- Dump completed on 2020-07-15 16:33:41 +-- Dump completed on 2020-08-17 18:06:56 From ba192d6c323ed3b2d6929930e71bc88d84ccab5c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 17 Aug 2020 17:23:25 -0400 Subject: [PATCH 312/376] Update addtotab.sh --- pillar/data/addtotab.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pillar/data/addtotab.sh b/pillar/data/addtotab.sh index 696ec171e..ac3d913a5 100644 --- a/pillar/data/addtotab.sh +++ b/pillar/data/addtotab.sh @@ -44,11 +44,11 @@ echo " guid: $GUID" >> $local_salt_dir/pillar/data/$TYPE.sls echo " rootfs: $ROOTFS" >> $local_salt_dir/pillar/data/$TYPE.sls echo " nsmfs: $NSM" >> $local_salt_dir/pillar/data/$TYPE.sls if [ $TYPE == 'sensorstab' ]; then - echo " monint: $MONINT" >> $local_salt_dir/pillar/data/$TYPE.sls + echo " monint: bond0" >> $local_salt_dir/pillar/data/$TYPE.sls salt-call state.apply grafana queue=True fi if [ $TYPE == 'evaltab' ] || [ $TYPE == 'standalonetab' ]; then - echo " monint: $MONINT" >> $local_salt_dir/pillar/data/$TYPE.sls + echo " monint: bond0" >> $local_salt_dir/pillar/data/$TYPE.sls if [ ! $10 ]; then salt-call state.apply grafana queue=True salt-call state.apply utility queue=True From 9f8f59f4dffa24dadb36ccb9558367d89a723957 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 18 Aug 2020 10:48:52 -0400 Subject: [PATCH 313/376] fix monint for several node types for grafana --- salt/grafana/init.sls | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/grafana/init.sls b/salt/grafana/init.sls index eb446b2e0..32c7dbdf6 100644 --- a/salt/grafana/init.sls +++ b/salt/grafana/init.sls @@ -91,7 +91,7 @@ dashboard-manager: - defaults: SERVERNAME: {{ SN }} MANINT: {{ SNDATA.manint }} - MONINT: {{ SNDATA.manint }} + MONINT: {{ SNDATA.monint }} CPUS: {{ SNDATA.totalcpus }} UID: so_overview ROOTFS: {{ SNDATA.rootfs }} @@ -114,7 +114,7 @@ dashboard-managersearch: - defaults: SERVERNAME: {{ SN }} MANINT: {{ SNDATA.manint }} - MONINT: {{ SNDATA.manint }} + MONINT: {{ SNDATA.monint }} CPUS: {{ SNDATA.totalcpus }} UID: so_overview ROOTFS: {{ SNDATA.rootfs }} @@ -137,7 +137,7 @@ dashboard-standalone: - defaults: SERVERNAME: {{ SN }} MANINT: {{ SNDATA.manint }} - MONINT: {{ SNDATA.manint }} + MONINT: {{ SNDATA.monint }} CPUS: {{ SNDATA.totalcpus }} UID: so_overview ROOTFS: {{ SNDATA.rootfs }} @@ -159,8 +159,8 @@ dashboard-{{ SN }}: - source: salt://grafana/dashboards/sensor_nodes/sensor.json - defaults: SERVERNAME: {{ SN }} - MONINT: {{ SNDATA.monint }} MANINT: {{ SNDATA.manint }} + MONINT: {{ SNDATA.monint }} CPUS: {{ SNDATA.totalcpus }} UID: {{ SNDATA.guid }} ROOTFS: {{ SNDATA.rootfs }} @@ -183,7 +183,7 @@ dashboardsearch-{{ SN }}: - defaults: SERVERNAME: {{ SN }} MANINT: {{ SNDATA.manint }} - MONINT: {{ SNDATA.manint }} + MONINT: {{ SNDATA.monint }} CPUS: {{ SNDATA.totalcpus }} UID: {{ SNDATA.guid }} ROOTFS: {{ SNDATA.rootfs }} From 45d957566da2dd9cc0d063ea51fa560c4236d4a6 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 18 Aug 2020 11:36:29 -0400 Subject: [PATCH 314/376] Only show 'Waiting for TheHive to start up' status if setup is actually installing thehive --- setup/so-setup | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/setup/so-setup b/setup/so-setup index a925207a9..640363f2b 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -726,9 +726,10 @@ else IP=$ALLOW_CIDR so-allow -$ALLOW_ROLE >> $setup_log 2>&1 fi - set_progress_str 99 'Waiting for TheHive to start up' - if [[ $THEHIVE == 1 ]]; then check_hive_init >> $setup_log 2>&1; fi - + if [[ $THEHIVE == 1 ]]; then + set_progress_str 99 'Waiting for TheHive to start up' + check_hive_init >> $setup_log 2>&1 + fi } | whiptail_gauge_post_setup "Running post-installation steps..." whiptail_setup_complete From c8dfc2495c60858d80ba17c44f1dc259f5162621 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 18 Aug 2020 14:21:23 -0400 Subject: [PATCH 315/376] add strelka to heavynode if strelka is enabled - https://github.com/Security-Onion-Solutions/securityonion/issues/1188 --- salt/top.sls | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/top.sls b/salt/top.sls index 4b560c3c1..fdcbcab3e 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -361,6 +361,9 @@ base: - logstash - curator - filebeat + {%- if STRELKA %} + - strelka + {%- endif %} {%- if FLEETMANAGER or FLEETNODE %} - fleet.install_package - redis From eaad0487b5e29308fc399cfa97b9e6b89fe41b82 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 18 Aug 2020 14:54:11 -0400 Subject: [PATCH 316/376] Enable YARA rules by default --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 640363f2b..51febf59f 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -326,7 +326,7 @@ if [[ $is_manager && ! $is_eval ]]; then fi if [[ $STRELKA == 1 ]]; then - whiptail_strelka_rules + STRELKARULES=1 fi if [ "$MANAGERADV" = 'ADVANCED' ] && [ "$ZEEKVERSION" != 'SURICATA' ]; then From 44fcd999fdf9e8579c717e5d6d1000004ae8d204 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 18 Aug 2020 15:08:24 -0400 Subject: [PATCH 317/376] Address #1205 --- salt/ssl/init.sls | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index b7b347ec5..82512068c 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -254,6 +254,13 @@ ealstickeyperms: - name: /etc/pki/elasticsearch.key - mode: 640 - group: 930 + +elasticp12perms: + file.managed: + - replace: False + - name: /etc/pki/elasticsearch.p12 + - mode: 640 + - group: 930 # Create a cert for Redis encryption /etc/pki/redis.key: @@ -530,11 +537,19 @@ fleetkeyperms: - onchanges: - x509: /etc/pki/elasticsearch.key -miniokeyperms: +elasticp12perms: + file.managed: + - replace: False + - name: /etc/pki/elasticsearch.p12 + - mode: 640 + - group: 930 + +elastickeyperms: file.managed: - replace: False - name: /etc/pki/elasticsearch.key - mode: 640 - group: 930 + {%- endif %} -{%- endif %} \ No newline at end of file +{%- endif %} From 5a3d95d9a1985ea7afd5bcfd98fb50b218eb3e03 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 18 Aug 2020 15:09:21 -0400 Subject: [PATCH 318/376] remove monint from manager since it doesnt have a monint --- salt/grafana/init.sls | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/grafana/init.sls b/salt/grafana/init.sls index 32c7dbdf6..a83facb97 100644 --- a/salt/grafana/init.sls +++ b/salt/grafana/init.sls @@ -91,7 +91,6 @@ dashboard-manager: - defaults: SERVERNAME: {{ SN }} MANINT: {{ SNDATA.manint }} - MONINT: {{ SNDATA.monint }} CPUS: {{ SNDATA.totalcpus }} UID: so_overview ROOTFS: {{ SNDATA.rootfs }} From 47ad3f65ef37bceaec7d09917971066364c80e58 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 18 Aug 2020 15:26:30 -0400 Subject: [PATCH 319/376] Only fail setup when the root mailbox is not empty for ISO installations, since network installations can't be sure if the error came from setup or something unrelated --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 51febf59f..2bb97cc16 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -711,7 +711,7 @@ success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}') if [[ $success != 0 ]]; then SO_ERROR=1; fi # Check entire setup log for errors or unexpected salt states and ensure cron jobs are not reporting errors to root's mailbox -if grep -q -E "ERROR|Result: False" $setup_log || [[ -s /var/spool/mail/root ]]; then SO_ERROR=1; fi +if grep -q -E "ERROR|Result: False" $setup_log || [[ -s /var/spool/mail/root && "$setup_type" == "iso" ]]; then SO_ERROR=1; fi if [[ -n $SO_ERROR ]]; then echo "Errors detected during setup; skipping post-setup steps to allow for analysis of failures." >> $setup_log 2>&1 From 59aa55f9bced286186b4fdd6ffa5588d166113c9 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 18 Aug 2020 15:29:41 -0400 Subject: [PATCH 320/376] Add playsecrets --- salt/common/tools/sbin/soup | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index a93a000b7..41dccb7c6 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -103,6 +103,12 @@ pillar_changes() { sed -i '/^ url_base:/d' /opt/so/saltstack/local/pillar/minions/$MINIONID.sls; sed -i "/^global:/a \\$line" /opt/so/saltstack/local/pillar/global.sls; + # Adding play values to the global.sls + local HIVEPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + local CORTEXPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + sed -i "/^global:/a \\ hiveplaysecret: $HIVEPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls; + sed -i "/^global:/a \\ cortexplaysecret: $CORTEXPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls; + fi } From 65d9afd8d51f2637373d9878828b9ea9a61cf779 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 18 Aug 2020 15:37:17 -0400 Subject: [PATCH 321/376] remove monint from nodestab grafana dashboard since search nodes dont have monint --- salt/grafana/init.sls | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/grafana/init.sls b/salt/grafana/init.sls index a83facb97..ce70a4a22 100644 --- a/salt/grafana/init.sls +++ b/salt/grafana/init.sls @@ -182,7 +182,6 @@ dashboardsearch-{{ SN }}: - defaults: SERVERNAME: {{ SN }} MANINT: {{ SNDATA.manint }} - MONINT: {{ SNDATA.monint }} CPUS: {{ SNDATA.totalcpus }} UID: {{ SNDATA.guid }} ROOTFS: {{ SNDATA.rootfs }} From a4e986ea373cdcd63ed193b5138e5393ae132a83 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 18 Aug 2020 15:43:43 -0400 Subject: [PATCH 322/376] Don't echo pillar to setup log --- setup/so-functions | 1 - 1 file changed, 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 741929399..dc81ddafe 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1207,7 +1207,6 @@ manager_global() { " interval: 5" >> "$global_pillar" printf '%s\n' '----' >> "$setup_log" 2>&1 - cat "$global_pillar" >> "$setup_log" 2>&1 } minio_generate_keys() { From d4f7a07f857d9bd9461ff38c4de19e28c07eb9aa Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Tue, 18 Aug 2020 15:54:11 -0400 Subject: [PATCH 323/376] Osquery Parsing fix --- salt/elasticsearch/files/ingest/osquery.query_result | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/files/ingest/osquery.query_result b/salt/elasticsearch/files/ingest/osquery.query_result index 2005252b6..3a6ed15a3 100644 --- a/salt/elasticsearch/files/ingest/osquery.query_result +++ b/salt/elasticsearch/files/ingest/osquery.query_result @@ -18,8 +18,8 @@ "source": "def dict = ['result': new HashMap()]; for (entry in ctx['message2'].entrySet()) { dict['result'][entry.getKey()] = entry.getValue(); } ctx['osquery'] = dict; " } }, - { "set": { "field": "event.module", "value": "osquery" } }, - { "set": { "field": "event.dataset", "value": "{{osquery.result.name}}"} }, + { "set": { "field": "event.module", "value": "osquery", "override": false } }, + { "set": { "field": "event.dataset", "value": "{{osquery.result.name}}", "override": false} }, { "pipeline": { "name": "common" } } ] } \ No newline at end of file From 294a197cbfe3ac96b32936b41635364817b20043 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 18 Aug 2020 16:57:38 -0400 Subject: [PATCH 324/376] Add cross cluster for SSL --- salt/common/tools/sbin/soup | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 41dccb7c6..5d5196b97 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -109,6 +109,25 @@ pillar_changes() { sed -i "/^global:/a \\ hiveplaysecret: $HIVEPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls; sed -i "/^global:/a \\ cortexplaysecret: $CORTEXPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls; + # Move storage nodes to hostname for SSL + # Get a list we can use: + grep -A1 searchnode /opt/so/saltstack/local/pillar/data/nodestab.sls | grep -v '\-\-' | sed '$!N;s/\n/ /' | awk '{print $1,$3}' | awk '/_searchnode:/{gsub(/\_searchnode:/, "_searchnode"); print}' >/tmp/nodes.txt + # Remove the nodes from cluster settings + while read p; do + local NAME=$(echo $p | awk '{print $1}') + local IP=$(echo $p | awk '{print $2}') + echo "Removing the old cross cluster config for $NAME" + curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_cluster/settings -d '{"persistent":{"cluster":{"remote":{"'$NAME'":{"skip_unavailable":null,"seeds":null}}}}}' + done Date: Tue, 18 Aug 2020 17:38:35 -0400 Subject: [PATCH 325/376] Add cross cluster for SSL --- salt/common/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 5d5196b97..45f018b7f 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -124,7 +124,7 @@ pillar_changes() { local NAME=$(echo $p | awk '{print $1}') local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}') echo "Adding the new cross cluster config for $NAME" - curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"$NAME": {"skip_unavailable": "true", "seeds": ["$EHOSTNAME:9300"]}}}}}' + curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["$EHOSTNAME:9300"]}}}}}' done Date: Tue, 18 Aug 2020 17:45:14 -0400 Subject: [PATCH 326/376] Add cross cluster for SSL --- salt/common/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 45f018b7f..6134a8900 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -124,7 +124,7 @@ pillar_changes() { local NAME=$(echo $p | awk '{print $1}') local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}') echo "Adding the new cross cluster config for $NAME" - curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["$EHOSTNAME:9300"]}}}}}' + curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}' done Date: Wed, 19 Aug 2020 10:08:11 -0400 Subject: [PATCH 327/376] Salt ACL --- salt/common/tools/sbin/soup | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 6134a8900..f06b085b4 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -87,6 +87,28 @@ highstate() { salt-call state.highstate -l info } +masterlock() { + # Lock the ACL to just the manager + cp -v /etc/salt/master /etc/salt/master.upgrade + echo "peer:" >> /etc/salt/master + echo " *_manager:" >> /etc/salt/master + echo " - .*" >> /etc/salt/master + echo " *_standalone:" >> /etc/salt/master + echo " - .*" >> /etc/salt/master + echo " *_managersearch:" >> /etc/salt/master + echo " - .*" >> /etc/salt/master + echo " *_eval:" >> /etc/salt/master + echo " - .*" >> /etc/salt/master + echo " *_helix:" >> /etc/salt/master + echo " - .*" >> /etc/salt/master + echo " *_import:" >> /etc/salt/master + echo " - .*" >> /etc/salt/master +} + +masterunlock() { + mv /etc/salt/master.upgrade /etc/salt/master +} + pillar_changes() { # This function is to add any new pillar items if needed. echo "Checking to see if pillar changes are needed." @@ -343,6 +365,19 @@ copy_new_files echo "" update_version +echo "" +echo "Locking down Salt Master for upgrade" +masterlock + +echo "" +echo "Starting Salt Master service." +systemctl start salt-master + +echo "" +echo "Stopping Salt Master to remove ACL" +systemctl stop salt-master + +masterunlock echo "" echo "Starting Salt Master service." From f57e0fbc56511f2e03a9fcbf6853bea4476630b8 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 19 Aug 2020 10:33:26 -0400 Subject: [PATCH 328/376] Salt ACL --- salt/common/tools/sbin/soup | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index f06b085b4..1f09f20b2 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -373,6 +373,12 @@ echo "" echo "Starting Salt Master service." systemctl start salt-master +echo "" +echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes." +highstate +echo "" +echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete." + echo "" echo "Stopping Salt Master to remove ACL" systemctl stop salt-master @@ -383,13 +389,6 @@ echo "" echo "Starting Salt Master service." systemctl start salt-master - -echo "" -echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes." -highstate -echo "" -echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete." - SALTUPGRADED="True" if [[ "$SALTUPGRADED" == "True" ]]; then echo "" From 2f0ffffca472576a963e2e3e48edd7b279b725b4 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 19 Aug 2020 11:46:29 -0400 Subject: [PATCH 329/376] lock and unlock master during soup --- salt/common/tools/sbin/soup | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 1f09f20b2..53b7a2baa 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -88,25 +88,19 @@ highstate() { } masterlock() { - # Lock the ACL to just the manager - cp -v /etc/salt/master /etc/salt/master.upgrade - echo "peer:" >> /etc/salt/master - echo " *_manager:" >> /etc/salt/master - echo " - .*" >> /etc/salt/master - echo " *_standalone:" >> /etc/salt/master - echo " - .*" >> /etc/salt/master - echo " *_managersearch:" >> /etc/salt/master - echo " - .*" >> /etc/salt/master - echo " *_eval:" >> /etc/salt/master - echo " - .*" >> /etc/salt/master - echo " *_helix:" >> /etc/salt/master - echo " - .*" >> /etc/salt/master - echo " *_import:" >> /etc/salt/master - echo " - .*" >> /etc/salt/master + TOPFILE=/opt/so/saltstack/default/salt/top.sls + BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup + mv -v $TOPFILE $BACKUPTOPFILE + echo "base:" > $TOPFILE + echo " $MINIONID:" >> $TOPFILE + echo " - ca" >> $TOPFILE + echo " - ssl" >> $TOPFILE + echo " - elasticsearch" >> $TOPFILE + } masterunlock() { - mv /etc/salt/master.upgrade /etc/salt/master + mv -v $BACKUPTOPFILE $TOPFILE } pillar_changes() { From 9280dbb9d973128c35660e929e9966d0d417f104 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 19 Aug 2020 12:00:25 -0400 Subject: [PATCH 330/376] Update soup --- salt/common/tools/sbin/soup | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 53b7a2baa..f0d30e8fa 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -88,19 +88,24 @@ highstate() { } masterlock() { - TOPFILE=/opt/so/saltstack/default/salt/top.sls - BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup - mv -v $TOPFILE $BACKUPTOPFILE - echo "base:" > $TOPFILE - echo " $MINIONID:" >> $TOPFILE - echo " - ca" >> $TOPFILE - echo " - ssl" >> $TOPFILE - echo " - elasticsearch" >> $TOPFILE - + echo "Locking Salt Master" + if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then + TOPFILE=/opt/so/saltstack/default/salt/top.sls + BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup + mv -v $TOPFILE $BACKUPTOPFILE + echo "base:" > $TOPFILE + echo " $MINIONID:" >> $TOPFILE + echo " - ca" >> $TOPFILE + echo " - ssl" >> $TOPFILE + echo " - elasticsearch" >> $TOPFILE + fi } masterunlock() { - mv -v $BACKUPTOPFILE $TOPFILE + echo "Unlocking Salt Master" + if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then + mv -v $BACKUPTOPFILE $TOPFILE + fi } pillar_changes() { @@ -393,4 +398,4 @@ fi } -main "$@" | tee /dev/fd/3 \ No newline at end of file +main "$@" | tee /dev/fd/3 From 3d48c1f99baa181ad679081bcac0f8495a88c0f5 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 19 Aug 2020 12:14:11 -0400 Subject: [PATCH 331/376] Add playbook updates --- salt/common/tools/sbin/soup | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index f0d30e8fa..058a1d507 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -108,6 +108,15 @@ masterunlock() { fi } +playbook() { + echo "Applying playbook settings" + if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then + salt-call state.apply playbook.db_init + rm -f /opt/so/rules/elastalert/playbook/*.yaml + so-playbook-ruleupdate >> /root/soup_playbook_rule_update.log 2>&1 & + fi +} + pillar_changes() { # This function is to add any new pillar items if needed. echo "Checking to see if pillar changes are needed." @@ -387,6 +396,8 @@ masterunlock echo "" echo "Starting Salt Master service." systemctl start salt-master +highstate +playbook SALTUPGRADED="True" if [[ "$SALTUPGRADED" == "True" ]]; then From bf84822d36785763849f9427745f710393bda1c8 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 19 Aug 2020 13:04:10 -0400 Subject: [PATCH 332/376] fix if logic --- salt/ssl/init.sls | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 82512068c..393d3a2b7 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -518,6 +518,7 @@ fleetkeyperms: {% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%} - prereq: - x509: /etc/pki/elasticsearch.crt + {%- endif %} /etc/pki/elasticsearch.crt: x509.certificate_managed: @@ -550,6 +551,4 @@ elastickeyperms: - name: /etc/pki/elasticsearch.key - mode: 640 - group: 930 - - {%- endif %} {%- endif %} From 6edf1c14f8bc6f97c6a7ce01f4c2d73b5e33a6bc Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 19 Aug 2020 13:35:58 -0400 Subject: [PATCH 333/376] Fix filebeat certs --- salt/ssl/init.sls | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 393d3a2b7..a2c1d6e39 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -368,7 +368,18 @@ fleetkeyperms: - group: 939 {% endif %} -{% if grains['role'] in ['so-sensor', 'so-manager', 'so-node', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-import'] %} +{% if grains['role'] in ['so-sensor', 'so-manager', 'so-searchnode', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-import'] %} + +removefbcertdir: + file.absent: + - name: /etc/pki/filebeat.crt + - onlyif: "[ -d /etc/pki/filebeat.crt ]" + +removefbcertdir: + file.absent: + - name: /etc/pki/filebeat.p8 + - onlyif: "[ -d /etc/pki/filebeat.p8 ]" + fbcertdir: file.directory: @@ -505,7 +516,7 @@ fleetkeyperms: {% endif %} -{% if grains['role'] in ['so-node', 'so-heavynode'] %} +{% if grains['role'] in ['so-searchnode', 'so-heavynode'] %} # Create a cert for elasticsearch /etc/pki/elasticsearch.key: x509.private_key_managed: @@ -551,4 +562,5 @@ elastickeyperms: - name: /etc/pki/elasticsearch.key - mode: 640 - group: 930 + {%- endif %} From b5dd868d1b1157ccea6919f124e7972ed5264003 Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 19 Aug 2020 14:34:28 -0400 Subject: [PATCH 334/376] Add manager IP to container hosts file --- salt/logstash/init.sls | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index 1a85a081d..c82383375 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -15,6 +15,7 @@ {% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} +{% set MANAGERIP = salt['pillar.get']('global:managerip') %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %} {%- if FEATURES is sameas true %} @@ -146,6 +147,8 @@ so-logstash: - hostname: so-logstash - name: so-logstash - user: logstash + - extra_hosts: + - {{ MANAGER }}:{{ MANAGERIP }} - environment: - LS_JAVA_OPTS=-Xms{{ lsheap }} -Xmx{{ lsheap }} - port_bindings: From 4c246dc30d3be0d222468304dd5ca14aa4c4088b Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 19 Aug 2020 14:40:31 -0400 Subject: [PATCH 335/376] remove airgap install option until rc3 --- setup/so-setup | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/setup/so-setup b/setup/so-setup index 2bb97cc16..1f26f3a03 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -189,14 +189,14 @@ fi #Check if this is an airgap install -if [[ $is_manager ]]; then - if [[ $is_iso ]]; then - whiptail_airgap - if [[ "$INTERWEBS" == 'AIRGAP' ]]; then - is_airgap=true - fi - fi -fi +#if [[ $is_manager ]]; then +# if [[ $is_iso ]]; then +# whiptail_airgap +# if [[ "$INTERWEBS" == 'AIRGAP' ]]; then +# is_airgap=true +# fi +# fi +#fi if [[ $is_manager && $is_sensor ]]; then check_requirements "standalone" From ed1e3467897b95a1e7437f5f615b7fe7779f7bdb Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Wed, 19 Aug 2020 19:07:24 +0000 Subject: [PATCH 336/376] Add defaults file for search node --- setup/automation/aws_searchnode_defaults | 78 ++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 setup/automation/aws_searchnode_defaults diff --git a/setup/automation/aws_searchnode_defaults b/setup/automation/aws_searchnode_defaults new file mode 100644 index 000000000..3c2ff4df5 --- /dev/null +++ b/setup/automation/aws_searchnode_defaults @@ -0,0 +1,78 @@ +#!/bin/bash + +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +TESTING=true + +address_type=DHCP +ADMINUSER=onionuser +ADMINPASS1=onionuser +ADMINPASS2=onionuser +#ALLOW_CIDR=0.0.0.0/0 +ALLOW_ROLE=a +#BASICZEEK=7 +#BASICSURI=7 +# BLOGS= +#BNICS=ens6 +#ZEEKVERSION=ZEEK +# CURCLOSEDAYS= +# EVALADVANCED=BASIC +#GRAFANA=1 +# HELIXAPIKEY= +HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 +HNSENSOR=inherit +HOSTNAME=searchnode-aws +install_type=SEARCHNODE +# LSINPUTBATCHCOUNT= +# LSINPUTTHREADS= +# LSPIPELINEBATCH= +# LSPIPELINEWORKERS= +#MANAGERADV=BASIC +MANAGERUPDATES=1 +# MDNS= +# MGATEWAY= +# MIP= +# MMASK= +MNIC=ens5 +# MSEARCH= +MSRV=manager-aws +MSRVIP=172.16.163.10 +# MTU= +#NIDS=Suricata +# NODE_ES_HEAP_SIZE= +# NODE_LS_HEAP_SIZE= +NODESETUP=NODEBASIC +NSMSETUP=BASIC +NODEUPDATES=MANAGER +# OINKCODE= +#OSQUERY=1 +# PATCHSCHEDULEDAYS= +# PATCHSCHEDULEHOURS= +PATCHSCHEDULENAME=auto +#PLAYBOOK=1 +# REDIRECTHOST= +#REDIRECTINFO=HOSTNAME +#RULESETUP=ETOPEN +# SHARDCOUNT= +SKIP_REBOOT=0 +SOREMOTEPASS1=onionuser +SOREMOTEPASS2=onionuser +#STRELKA=1 +#THEHIVE=1 +WAZUH=1 +WEBUSER=onionuser@somewhere.invalid +WEBPASSWD1=0n10nus3r +WEBPASSWD2=0n10nus3r From db2cc5f7a711c3a530d317d48a4c47c1dcffea1a Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 19 Aug 2020 15:43:51 -0400 Subject: [PATCH 337/376] Update init.sls --- salt/ssl/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index a2c1d6e39..f535a8257 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -375,7 +375,7 @@ removefbcertdir: - name: /etc/pki/filebeat.crt - onlyif: "[ -d /etc/pki/filebeat.crt ]" -removefbcertdir: +removefbp8dir: file.absent: - name: /etc/pki/filebeat.p8 - onlyif: "[ -d /etc/pki/filebeat.p8 ]" From 826254bc3dcc00a373f5b729529a16c5ba2c7291 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 19 Aug 2020 15:59:48 -0400 Subject: [PATCH 338/376] give redis key to heavy node too --- salt/ssl/init.sls | 73 ++++++++++++++++++++++++----------------------- 1 file changed, 37 insertions(+), 36 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 82512068c..acf3c32da 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -72,8 +72,44 @@ influxkeyperms: - mode: 640 - group: 939 -{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import'] %} +{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode'] %} +# Create a cert for Redis encryption +/etc/pki/redis.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/redis.key') -%} + - prereq: + - x509: /etc/pki/redis.crt + {%- endif %} +/etc/pki/redis.crt: + x509.certificate_managed: + - ca_server: {{ ca_server }} + - signing_policy: registry + - public_key: /etc/pki/redis.key + - CN: {{ manager }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/redis.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' + +rediskeyperms: + file.managed: + - replace: False + - name: /etc/pki/redis.key + - mode: 640 + - group: 939 +{% endif %} + +{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import'] %} /etc/pki/filebeat.key: x509.private_key_managed: - CN: {{ manager }} @@ -262,41 +298,6 @@ elasticp12perms: - mode: 640 - group: 930 -# Create a cert for Redis encryption -/etc/pki/redis.key: - x509.private_key_managed: - - CN: {{ manager }} - - bits: 4096 - - days_remaining: 0 - - days_valid: 820 - - backup: True - - new: True - {% if salt['file.file_exists']('/etc/pki/redis.key') -%} - - prereq: - - x509: /etc/pki/redis.crt - {%- endif %} - -/etc/pki/redis.crt: - x509.certificate_managed: - - ca_server: {{ ca_server }} - - signing_policy: registry - - public_key: /etc/pki/redis.key - - CN: {{ manager }} - - days_remaining: 0 - - days_valid: 820 - - backup: True - - unless: - # https://github.com/saltstack/salt/issues/52167 - # Will trigger 5 days (432000 sec) from cert expiration - - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/redis.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' - -rediskeyperms: - file.managed: - - replace: False - - name: /etc/pki/redis.key - - mode: 640 - - group: 939 - /etc/pki/managerssl.key: x509.private_key_managed: - CN: {{ manager }} From 4527758e87ab15857164ced0af2890525cb4ceb3 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 19 Aug 2020 16:00:04 -0400 Subject: [PATCH 339/376] Update init.sls --- salt/ssl/init.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index f535a8257..60ae95aa4 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -368,7 +368,7 @@ fleetkeyperms: - group: 939 {% endif %} -{% if grains['role'] in ['so-sensor', 'so-manager', 'so-searchnode', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-import'] %} +{% if grains['role'] in ['so-sensor', 'so-manager', 'so-node, 'so-searchnode', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-import'] %} removefbcertdir: file.absent: @@ -516,7 +516,7 @@ fleetkeyperms: {% endif %} -{% if grains['role'] in ['so-searchnode', 'so-heavynode'] %} +{% if grains['role'] in ['so-node', 'so-searchnode', 'so-heavynode'] %} # Create a cert for elasticsearch /etc/pki/elasticsearch.key: x509.private_key_managed: From 51a52228ac073b59483e5b91c86f570401f6d31f Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 19 Aug 2020 16:01:58 -0400 Subject: [PATCH 340/376] Update init.sls --- salt/ssl/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 60ae95aa4..3971a169c 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -368,7 +368,7 @@ fleetkeyperms: - group: 939 {% endif %} -{% if grains['role'] in ['so-sensor', 'so-manager', 'so-node, 'so-searchnode', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-import'] %} +{% if grains['role'] in ['so-sensor', 'so-manager', 'so-node', 'so-searchnode', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-import'] %} removefbcertdir: file.absent: From 961cc67e3f32ba76e11a16f3727aaf5f87721f45 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 19 Aug 2020 16:05:40 -0400 Subject: [PATCH 341/376] add nginx state to heavynode --- salt/top.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/top.sls b/salt/top.sls index fdcbcab3e..19c1c77dc 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -352,6 +352,7 @@ base: - ca - ssl - common + - nginx - telegraf - firewall - minio From 5ff0058a65e38d9bd48eaea4c7cac3573f8a20b1 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 19 Aug 2020 16:12:54 -0400 Subject: [PATCH 342/376] Ensure strelka backend, frontend, and filestream are connecting to redis locally, on heavy node instances --- salt/strelka/files/backend/backend.yaml | 2 +- salt/strelka/files/filestream/filestream.yaml | 2 +- salt/strelka/files/frontend/frontend.yaml | 2 +- setup/so-functions | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/strelka/files/backend/backend.yaml b/salt/strelka/files/backend/backend.yaml index 8748a4fd6..96aa450b7 100644 --- a/salt/strelka/files/backend/backend.yaml +++ b/salt/strelka/files/backend/backend.yaml @@ -1,4 +1,4 @@ -{%- if grains.role == 'so-sensor' -%} +{%- if grains.role in ['so-sensor', 'so-heavynode'] -%} {%- set mainint = salt['pillar.get']('sensor:mainint') %} {%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %} {%- else %} diff --git a/salt/strelka/files/filestream/filestream.yaml b/salt/strelka/files/filestream/filestream.yaml index 1dc6795d9..681aad222 100644 --- a/salt/strelka/files/filestream/filestream.yaml +++ b/salt/strelka/files/filestream/filestream.yaml @@ -1,4 +1,4 @@ -{%- if grains.role == 'so-sensor' -%} +{%- if grains.role in ['so-sensor', 'so-heavynode'] -%} {%- set mainint = salt['pillar.get']('sensor:mainint') %} {%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %} {%- else %} diff --git a/salt/strelka/files/frontend/frontend.yaml b/salt/strelka/files/frontend/frontend.yaml index 23edef3e3..1233aadad 100644 --- a/salt/strelka/files/frontend/frontend.yaml +++ b/salt/strelka/files/frontend/frontend.yaml @@ -1,4 +1,4 @@ -{%- if grains.role == 'so-sensor' -%} +{%- if grains.role in ['so-sensor', 'so-heavynode'] -%} {%- set mainint = salt['pillar.get']('sensor:mainint') %} {%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %} {%- else %} diff --git a/setup/so-functions b/setup/so-functions index dc81ddafe..57f4e4504 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -710,10 +710,10 @@ detect_os() { disable_auto_start() { # Remove the automated setup script from crontab, if it exists - crontab -u $INSTALLUSERNAME -r + logCmd "crontab -u $INSTALLUSERNAME -r" # Truncate last line of the bash profile - sed -i '$ d' /home/$INSTALLUSERNAME/.bash_profile + logCmd "sed -i '$ d' /home/$INSTALLUSERNAME/.bash_profile" } From 232594078934e7d7f43ecba9e165c4e5da236a99 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 19 Aug 2020 16:24:25 -0400 Subject: [PATCH 343/376] Ensure strelka manager connects to local redis on heavy nodes --- salt/strelka/files/manager/manager.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/strelka/files/manager/manager.yaml b/salt/strelka/files/manager/manager.yaml index b4a73b1c0..466b94a8a 100644 --- a/salt/strelka/files/manager/manager.yaml +++ b/salt/strelka/files/manager/manager.yaml @@ -1,4 +1,4 @@ -{%- if grains.role == 'so-sensor' -%} +{%- if grains.role in ['so-sensor', 'so-heavynode'] -%} {%- set mainint = salt['pillar.get']('sensor:mainint') %} {%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %} {%- else %} From 0439cf320526c25391e91d2379ca0845c2b83904 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 19 Aug 2020 18:47:36 -0400 Subject: [PATCH 344/376] Update soup --- salt/common/tools/sbin/soup | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 058a1d507..6b87538aa 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -153,8 +153,9 @@ pillar_changes() { while read p; do local NAME=$(echo $p | awk '{print $1}') local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}') + local IP=$(echo $p | awk '{print $2}') echo "Adding the new cross cluster config for $NAME" - curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}' + curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'/'$IP'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}' done Date: Wed, 19 Aug 2020 18:51:32 -0400 Subject: [PATCH 345/376] Update soup --- salt/common/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 6b87538aa..d9fadce29 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -155,7 +155,7 @@ pillar_changes() { local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}') local IP=$(echo $p | awk '{print $2}') echo "Adding the new cross cluster config for $NAME" - curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'/'$IP'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}' + curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}' done Date: Wed, 19 Aug 2020 19:51:57 -0400 Subject: [PATCH 346/376] fix filebeat certs --- salt/ssl/init.sls | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 8a0c1d536..df6951591 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -37,6 +37,16 @@ m2cryptopkgs: - python-m2crypto {% endif %} +removefbcertdir: + file.absent: + - name: /etc/pki/filebeat.crt + - onlyif: "[ -d /etc/pki/filebeat.crt ]" + +removefbp8dir: + file.absent: + - name: /etc/pki/filebeat.p8 + - onlyif: "[ -d /etc/pki/filebeat.p8 ]" + /etc/pki/influxdb.key: x509.private_key_managed: - CN: {{ manager }} @@ -370,18 +380,7 @@ fleetkeyperms: {% endif %} {% if grains['role'] in ['so-sensor', 'so-manager', 'so-node', 'so-searchnode', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-import'] %} - -removefbcertdir: - file.absent: - - name: /etc/pki/filebeat.crt - - onlyif: "[ -d /etc/pki/filebeat.crt ]" - -removefbp8dir: - file.absent: - - name: /etc/pki/filebeat.p8 - - onlyif: "[ -d /etc/pki/filebeat.p8 ]" - - + fbcertdir: file.directory: - name: /opt/so/conf/filebeat/etc/pki From 507a3e852c3b137da7a395c94e07fea5cab0e7ce Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 19 Aug 2020 20:02:38 -0400 Subject: [PATCH 347/376] Update init.sls --- salt/ssl/init.sls | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index df6951591..0f51ea4c0 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -46,6 +46,11 @@ removefbp8dir: file.absent: - name: /etc/pki/filebeat.p8 - onlyif: "[ -d /etc/pki/filebeat.p8 ]" + +removeesp12dir: + file.absent: + - name: /etc/pki/elasticsearch.p12 + - onlyif: "[ -d /etc/pki/elasticsearch.p12 ]" /etc/pki/influxdb.key: x509.private_key_managed: From d969b1e1b7ca5861ba4e6e40ea5d0df11a7e502b Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 19 Aug 2020 20:56:08 -0400 Subject: [PATCH 348/376] Update init.sls --- salt/ssl/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 0f51ea4c0..23eafd14f 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -124,7 +124,7 @@ rediskeyperms: - group: 939 {% endif %} -{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import'] %} +{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode'] %} /etc/pki/filebeat.key: x509.private_key_managed: - CN: {{ manager }} From f7d3dca322f9ef4782a30cfe17637655b0dd8b14 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 19 Aug 2020 21:00:28 -0400 Subject: [PATCH 349/376] Fix duplicate state --- salt/ssl/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 23eafd14f..b8852d7a0 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -521,7 +521,7 @@ fleetkeyperms: {% endif %} -{% if grains['role'] in ['so-node', 'so-searchnode', 'so-heavynode'] %} +{% if grains['role'] in ['so-node', 'so-searchnode'] %} # Create a cert for elasticsearch /etc/pki/elasticsearch.key: x509.private_key_managed: From f9e5ea8ba7dedbd8ee0cf8d9387fbd637e51e4a4 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 19 Aug 2020 21:12:41 -0400 Subject: [PATCH 350/376] Fix SSL for filebeat --- salt/filebeat/etc/filebeat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml index a4525b494..6849b1c08 100644 --- a/salt/filebeat/etc/filebeat.yml +++ b/salt/filebeat/etc/filebeat.yml @@ -1,5 +1,5 @@ {%- if grains.role == 'so-heavynode' %} -{%- set MANAGER = salt['pillar.get']('sensor:mainip' '') %} +{%- set MANAGER = salt['grains.get']('host' '') %} {%- else %} {%- set MANAGER = salt['grains.get']('master') %} {%- endif %} From ccc2ed4478cf287d2986d16fb1fa174f8045ee93 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 19 Aug 2020 21:18:57 -0400 Subject: [PATCH 351/376] don't create symlinks if a heavy node --- salt/ssl/init.sls | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index b8852d7a0..0e430a3ce 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -176,7 +176,8 @@ chownilogstashfilebeatp8: - mode: 640 - user: 931 - group: 939 - + + {% if grains.role != 'so-heavynode' %} # Create Symlinks to the keys so I can distribute it to all the things filebeatdir: file.directory: @@ -266,7 +267,7 @@ miniokeyperms: - name: /etc/pki/minio.key - mode: 640 - group: 939 - + {% endif %} # Create a cert for elasticsearch /etc/pki/elasticsearch.key: x509.private_key_managed: From 2fce138d950aba1409734b7911661572d91d85ad Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 19 Aug 2020 21:26:27 -0400 Subject: [PATCH 352/376] Change it to grains.host instead of grains.id --- salt/ssl/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 0e430a3ce..70d4c4b6a 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -145,7 +145,7 @@ rediskeyperms: - signing_policy: filebeat - public_key: /etc/pki/filebeat.key {% if grains.role == 'so-heavynode' %} - - CN: {{grains.id}} + - CN: {{grains.host}} {% else %} - CN: {{manager}} {% endif %} From 43f4ebbcf1fccfb492fbc86b2cad3bf15933ab0b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 20 Aug 2020 09:05:38 -0400 Subject: [PATCH 353/376] remove monint from managersearch since they dont have a monint --- salt/grafana/init.sls | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/grafana/init.sls b/salt/grafana/init.sls index ce70a4a22..9fdd26b12 100644 --- a/salt/grafana/init.sls +++ b/salt/grafana/init.sls @@ -113,7 +113,6 @@ dashboard-managersearch: - defaults: SERVERNAME: {{ SN }} MANINT: {{ SNDATA.manint }} - MONINT: {{ SNDATA.monint }} CPUS: {{ SNDATA.totalcpus }} UID: so_overview ROOTFS: {{ SNDATA.rootfs }} From 22c918038615b082a61dcdac9b67af90e3f4738a Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 20 Aug 2020 10:03:49 -0400 Subject: [PATCH 354/376] Improve redirection of setup command output to log file, including stderr --- setup/so-functions | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 57f4e4504..8dd3c6bed 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -42,7 +42,8 @@ title() { logCmd() { cmd=$1 - info "Executing command: $cmd\n$($cmd)\n" + info "Executing command: $cmd" + $cmd >> "$setup_log" 2>&1 } analyze_system() { From 896bf6b78c2d7f8346e6f180c65efe00e63737ae Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 20 Aug 2020 10:08:10 -0400 Subject: [PATCH 355/376] Update doc links to 2.1 --- README.md | 12 ++++++------ setup/so-whiptail | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 912ac745a..f21ab6c97 100644 --- a/README.md +++ b/README.md @@ -14,24 +14,24 @@ Security Onion 2.0.2 RC1 is here! This version requires a fresh install, but the ### Release Notes -https://docs.securityonion.net/en/2.0/release-notes.html +https://docs.securityonion.net/en/2.1/release-notes.html ### Requirements -https://docs.securityonion.net/en/2.0/hardware.html +https://docs.securityonion.net/en/2.1/hardware.html ### Download -https://docs.securityonion.net/en/2.0/download.html +https://docs.securityonion.net/en/2.1/download.html ### Installation -https://docs.securityonion.net/en/2.0/installation.html +https://docs.securityonion.net/en/2.1/installation.html ### FAQ -https://docs.securityonion.net/en/2.0/faq.html +https://docs.securityonion.net/en/2.1/faq.html ### Feedback -https://docs.securityonion.net/en/2.0/community-support.html +https://docs.securityonion.net/en/2.1/community-support.html diff --git a/setup/so-whiptail b/setup/so-whiptail index a652c128b..7ed300939 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -339,7 +339,7 @@ whiptail_storage_requirements() { You need ${needed_val} to meet minimum requirements. - Visit https://docs.securityonion.net/en/2.0/hardware.html for more information. + Visit https://docs.securityonion.net/en/2.1/hardware.html for more information. Press YES to continue anyway, or press NO to cancel. EOM From 3f04e566f2bc2f3c08c94e6043397059946b9eb3 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Thu, 20 Aug 2020 14:16:05 +0000 Subject: [PATCH 356/376] Add defaults file for fwdnode --- setup/automation/aws_forwardnode_defaults | 78 +++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 setup/automation/aws_forwardnode_defaults diff --git a/setup/automation/aws_forwardnode_defaults b/setup/automation/aws_forwardnode_defaults new file mode 100644 index 000000000..99d8f21be --- /dev/null +++ b/setup/automation/aws_forwardnode_defaults @@ -0,0 +1,78 @@ +#!/bin/bash + +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +TESTING=true + +address_type=DHCP +ADMINUSER=onionuser +ADMINPASS1=onionuser +ADMINPASS2=onionuser +#ALLOW_CIDR=0.0.0.0/0 +#ALLOW_ROLE=a +BASICZEEK=1 +BASICSURI=1 +# BLOGS= +BNICS=ens6 +ZEEKVERSION=ZEEK +# CURCLOSEDAYS= +# EVALADVANCED=BASIC +#GRAFANA=1 +# HELIXAPIKEY= +HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 +HNSENSOR=inherit +HOSTNAME=forwardnode-aws +install_type=SENSOR +# LSINPUTBATCHCOUNT= +# LSINPUTTHREADS= +# LSPIPELINEBATCH= +# LSPIPELINEWORKERS= +#MANAGERADV=BASIC +MANAGERUPDATES=1 +# MDNS= +# MGATEWAY= +# MIP= +# MMASK= +MNIC=ens5 +# MSEARCH= +MSRV=manager-aws +MSRVIP=172.16.163.10 +# MTU= +#NIDS=Suricata +# NODE_ES_HEAP_SIZE= +# NODE_LS_HEAP_SIZE= +#NODESETUP=NODEBASIC +NSMSETUP=BASIC +NODEUPDATES=MANAGER +# OINKCODE= +#OSQUERY=1 +# PATCHSCHEDULEDAYS= +# PATCHSCHEDULEHOURS= +PATCHSCHEDULENAME=auto +#PLAYBOOK=1 +# REDIRECTHOST= +#REDIRECTINFO=HOSTNAME +#RULESETUP=ETOPEN +# SHARDCOUNT= +SKIP_REBOOT=0 +SOREMOTEPASS1=onionuser +SOREMOTEPASS2=onionuser +STRELKA=1 +#THEHIVE=1 +WAZUH=1 +WEBUSER=onionuser@somewhere.invalid +WEBPASSWD1=0n10nus3r +WEBPASSWD2=0n10nus3r From df95baa8354c3e9b306238e5c1646f0009045310 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 20 Aug 2020 10:45:48 -0400 Subject: [PATCH 357/376] Point logstash to use intca.crt --- salt/logstash/init.sls | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index c82383375..33fc496dc 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -168,7 +168,11 @@ so-logstash: - /sys/fs/cgroup:/sys/fs/cgroup:ro - /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro - /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro + {% if grains['role'] == 'so-heavynode' %} + - /etc/ssl/certs/intca.crt:/usr/share/filebeat/ca.crt:ro + {% else %} - /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro + {% endif %} - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro - /etc/pki/ca.cer:/ca/ca.crt:ro From 377c841c31e2c4882dfc3032cfefc48361cd80ff Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 20 Aug 2020 13:11:53 -0400 Subject: [PATCH 358/376] Switch back to direct command for removing setup from bash_profile due to how sed is interpreting the quoted expression --- setup/so-functions | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 8dd3c6bed..be450bad3 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -714,7 +714,8 @@ disable_auto_start() { logCmd "crontab -u $INSTALLUSERNAME -r" # Truncate last line of the bash profile - logCmd "sed -i '$ d' /home/$INSTALLUSERNAME/.bash_profile" + info "Removing auto-run of setup from bash profile" + sed -i '$ d' /home/$INSTALLUSERNAME/.bash_profile >> "$setup_log" 2>&1 } From 3eea2c6b103d38166aa27511320b0e7f5d37a8a0 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 20 Aug 2020 13:26:14 -0400 Subject: [PATCH 359/376] 2.1.0 Release notes in changes.json --- salt/soc/files/soc/changes.json | 59 +++++++++++---------------------- 1 file changed, 20 insertions(+), 39 deletions(-) diff --git a/salt/soc/files/soc/changes.json b/salt/soc/files/soc/changes.json index 4f359a996..eb381c815 100644 --- a/salt/soc/files/soc/changes.json +++ b/salt/soc/files/soc/changes.json @@ -1,43 +1,24 @@ { - "title": "Security Onion 2.0.3 RC1 is here!", + "title": "Security Onion 2.1.0 RC2 is here!", "changes": [ - { "summary": "Resolved an issue with large drives and the ISO install." }, - { "summary": "Modified ISO installation to use Logical Volume Management (LVM) for disk partitioning." }, - { "summary": "Updated Elastic Stack components to version 7.8.1." }, - { "summary": "Updated Zeek to version 3.0.8." }, - { "summary": "Fixed standalone pcap interval issue." }, - { "summary": "Security Fix 1067: variables.txt from ISO install stays on disk for 10 days." }, - { "summary": "Security Fix 1068: Remove user values from static.sls." }, - { "summary": "Issue 1059: Fix distributed deployment sensor interval issue allowing PCAP." }, - { "summary": "Issue 1058: Support for passwords that start with special characters." }, - { "summary": "Minor soup updates." }, - { "summary": "Re-branded 2.0 to give it a fresh look." }, - { "summary": "All documentation has moved to https://docs.securityonion.net/en/2.0" }, - { "summary": "soup is alive! Note: This tool only updates Security Onion components. Please use the built-in OS update process to keep the OS and other components up to date." }, - { "summary": "so-import-pcap is back! See the docs here: http://docs.securityonion.net/en/2.0/so-import-pcap." }, - { "summary": "Fixed issue with so-features-enable." }, - { "summary": "Users can now pivot to PCAP from Suricata alerts." }, - { "summary": "ISO install now prompts users to create an admin/sudo user instead of using a default account name." }, - { "summary": "The web email & password set during setup is now used to create the initial accounts for TheHive, Cortex, and Fleet." }, - { "summary": "Fixed issue with disk cleanup." }, - { "summary": "Changed the default permissions for /opt/so to keep non-priviledged users from accessing salt and related files." }, - { "summary": "Locked down access to certain SSL keys." }, - { "summary": "Suricata logs now compress after they roll over." }, - { "summary": "Users can now easily customize shard counts per index." }, - { "summary": "Improved Elastic ingest parsers including Windows event logs and Sysmon logs shipped with WinLogbeat and Osquery (ECS)." }, - { "summary": "Elastic nodes are now HOT by default, making it easier to add a warm node later." }, - { "summary": "so-allow now runs at the end of an install so users can enable access right away." }, - { "summary": "Alert severities across Wazuh, Suricata and Playbook (Sigma) have been standardized and copied to event.severity:
  • 1 = Low
  • 2 = Medium
  • 3 = High
  • 4 = Critical
" }, - { "summary": "Initial implementation of alerting queues:
  • Low & Medium alerts are accessible through Kibana & Hunt.
  • High & Critical alerts are accessible through Kibana, Hunt and TheHive for immediate analysis.
" }, - { "summary": "ATT&CK Navigator is now a statically-hosted site in the nginx container." }, - { "summary": "Playbook updates:
  • All Sigma rules in the community repo (500+) are now imported and kept up to date.
  • Initial implementation of automated testing when a Play's detection logic has been edited (i.e., Unit Testing).
  • Updated UI Theme.
  • Once authenticated through SOC, users can now access Playbook with analyst permissions without login.
" }, - { "summary": "Kolide Launcher has been updated to include the ability to pass arbitrary flags. This new functionality was sponsored by SOS." }, - { "summary": "Fixed issue with Wazuh authd registration service port not being correctly exposed." }, - { "summary": "Added option for exposure of Elasticsearch REST API (port 9200) to so-allow for easier external querying/integration with other tools." }, - { "summary": "Added option to so-allow for external Strelka file uploads (e.g., via strelka-fileshot)." }, - { "summary": "Added default YARA rules for Strelka. Default rules are maintained by Florian Roth and pulled from https://github.com/Neo23x0/signature-base." }, - { "summary": "Added the ability to use custom Zeek scripts." }, - { "summary": "Renamed master server to manager node." }, - { "summary": "Improved unification of Zeek and Strelka file data." } + { "summary": "Known Issue: Once you update your grid to RC2, any new nodes that join the grid must be RC2 so if you try to join a new RC1 node it will fail. For best results, use the latest RC2 ISO (or RC2 installer from github) when joining to an RC2 grid." }, + { "summary": "Known Issue: Shipping Windows Eventlogs with Osquery will fail intermittently with utf8 errors logged in the Application log. This is scheduled to be fixed in Osquery 4.5." }, + { "summary": "Known Issue: When running soup to upgrade from RC1 to RC2, there is a Salt error that occurs during the final highstate. This error is related to the patch_os_schedule and can be ignored as it will not occur again in subsequent highstates." }, + { "summary": "Known Issue: When Search Nodes are upgraded from RC1 to RC2, there is a chance of a race condition where certificates are missing. This will show errors in the manager log to the remote node. To fix this run the following on the search node that is having the issue:
  • Stop elasticsearch - sudo so-elasticsearch-stop
  • Run the SSL state - sudo salt-call state.apply ssl
  • Restart elasticsearch - sudo so-elasticsearch-restart
" }, + { "summary": "" }, + { "summary": "Fixed an issue where the console was timing out and making it appear that the installer was hung" }, + { "summary": "Introduced Import node type ideal for running so-import-pcap to import pcap files and view the resulting logs in Hunt or Kibana" }, + { "summary": "Moved static.sls to global.sls to align the name with the functionality" }, + { "summary": "Traffic between nodes in a distributed deployment is now fully encrypted" }, + { "summary": "Playbook
  • Elastalert now runs active Plays every 3 minutes
  • Changed default rule-update config to only import Windows rules from the Sigma Community repo
  • Lots of bug fixes & stability improvements
" }, + { "summary": "Ingest Node parsing updates for Osquery and Winlogbeat - implemented single pipeline for Windows eventlogs & sysmon logs" }, + { "summary": "Upgraded Osquery to 4.4 and re-enabled auto-updates" }, + { "summary": "Upgraded to Salt 3001.1" }, + { "summary": "Upgraded Wazuh to 3.13.1" }, + { "summary": "Hunt interface now shows the timezone being used for the selected date range" }, + { "summary": "Fixed Cortex initialization so that TheHive integration and initial user set is correctly configured" }, + { "summary": "Improved management of TheHive/Cortex credentials" }, + { "summary": "SOC now allows for arbitrary, time-bounded PCAP job creation, with optional filtering by host and port" }, + { "summary": "Historical release notes can be found on our docs site. https://docs.securityonion.net/en/2.1/release-notes.html." }, ] } From d1e5649a68dcc08c5ab7b415201194202e57f257 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 20 Aug 2020 13:46:20 -0400 Subject: [PATCH 360/376] Corrected JSON typo and improved formatting --- salt/soc/files/soc/changes.json | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/salt/soc/files/soc/changes.json b/salt/soc/files/soc/changes.json index eb381c815..2c6f51c29 100644 --- a/salt/soc/files/soc/changes.json +++ b/salt/soc/files/soc/changes.json @@ -1,24 +1,20 @@ { "title": "Security Onion 2.1.0 RC2 is here!", "changes": [ - { "summary": "Known Issue: Once you update your grid to RC2, any new nodes that join the grid must be RC2 so if you try to join a new RC1 node it will fail. For best results, use the latest RC2 ISO (or RC2 installer from github) when joining to an RC2 grid." }, - { "summary": "Known Issue: Shipping Windows Eventlogs with Osquery will fail intermittently with utf8 errors logged in the Application log. This is scheduled to be fixed in Osquery 4.5." }, - { "summary": "Known Issue: When running soup to upgrade from RC1 to RC2, there is a Salt error that occurs during the final highstate. This error is related to the patch_os_schedule and can be ignored as it will not occur again in subsequent highstates." }, - { "summary": "Known Issue: When Search Nodes are upgraded from RC1 to RC2, there is a chance of a race condition where certificates are missing. This will show errors in the manager log to the remote node. To fix this run the following on the search node that is having the issue:
  • Stop elasticsearch - sudo so-elasticsearch-stop
  • Run the SSL state - sudo salt-call state.apply ssl
  • Restart elasticsearch - sudo so-elasticsearch-restart
" }, - { "summary": "" }, - { "summary": "Fixed an issue where the console was timing out and making it appear that the installer was hung" }, - { "summary": "Introduced Import node type ideal for running so-import-pcap to import pcap files and view the resulting logs in Hunt or Kibana" }, - { "summary": "Moved static.sls to global.sls to align the name with the functionality" }, - { "summary": "Traffic between nodes in a distributed deployment is now fully encrypted" }, + { "summary": "Known Issues
  • Once you update your grid to RC2, any new nodes that join the grid must be RC2 so if you try to join a new RC1 node it will fail. For best results, use the latest RC2 ISO (or RC2 installer from github) when joining to an RC2 grid.
  • Shipping Windows Eventlogs with Osquery will fail intermittently with utf8 errors logged in the Application log. This is scheduled to be fixed in Osquery 4.5.
  • When running soup to upgrade from RC1 to RC2, there is a Salt error that occurs during the final highstate. This error is related to the patch_os_schedule and can be ignored as it will not occur again in subsequent highstates.
  • When Search Nodes are upgraded from RC1 to RC2, there is a chance of a race condition where certificates are missing. This will show errors in the manager log to the remote node. To fix this run the following on the search node that is having the issue:
    1. Stop elasticsearch - sudo so-elasticsearch-stop
    2. Run the SSL state - sudo salt-call state.apply ssl
    3. Restart elasticsearch - sudo so-elasticsearch-restart
" }, + { "summary": "Fixed an issue where the console was timing out and making it appear that the installer was hung." }, + { "summary": "Introduced Import node, which is ideal for running so-import-pcap to import pcap files and view the resulting logs in Hunt or Kibana." }, + { "summary": "Moved static.sls to global.sls to align the name with the functionality." }, + { "summary": "Traffic between nodes in a distributed deployment is now fully encrypted." }, { "summary": "Playbook
  • Elastalert now runs active Plays every 3 minutes
  • Changed default rule-update config to only import Windows rules from the Sigma Community repo
  • Lots of bug fixes & stability improvements
" }, { "summary": "Ingest Node parsing updates for Osquery and Winlogbeat - implemented single pipeline for Windows eventlogs & sysmon logs" }, - { "summary": "Upgraded Osquery to 4.4 and re-enabled auto-updates" }, + { "summary": "Upgraded Osquery to 4.4 and re-enabled auto-updates." }, { "summary": "Upgraded to Salt 3001.1" }, { "summary": "Upgraded Wazuh to 3.13.1" }, - { "summary": "Hunt interface now shows the timezone being used for the selected date range" }, - { "summary": "Fixed Cortex initialization so that TheHive integration and initial user set is correctly configured" }, - { "summary": "Improved management of TheHive/Cortex credentials" }, - { "summary": "SOC now allows for arbitrary, time-bounded PCAP job creation, with optional filtering by host and port" }, - { "summary": "Historical release notes can be found on our docs site. https://docs.securityonion.net/en/2.1/release-notes.html." }, + { "summary": "Hunt interface now shows the timezone being used for the selected date range." }, + { "summary": "Fixed Cortex initialization so that TheHive integration and initial user set is correctly configured." }, + { "summary": "Improved management of TheHive/Cortex credentials." }, + { "summary": "SOC now allows for arbitrary, time-bounded PCAP job creation, with optional filtering by host and port." }, + { "summary": "Historical release notes can be found on our docs website: https://docs.securityonion.net/en/2.1/release-notes.html" } ] } From a7a0520cfeb6916c1f89ab3d1c878edaebb2da80 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 20 Aug 2020 14:20:09 -0400 Subject: [PATCH 361/376] remove bonding for import node --- salt/import/bond.sls | 6 ------ setup/so-setup | 5 ----- 2 files changed, 11 deletions(-) delete mode 100644 salt/import/bond.sls diff --git a/salt/import/bond.sls b/salt/import/bond.sls deleted file mode 100644 index 14de298ec..000000000 --- a/salt/import/bond.sls +++ /dev/null @@ -1,6 +0,0 @@ -configure_bond0: - network.managed: - - name: bond0 - - type: bond - - mode: '1' - - enabled: True \ No newline at end of file diff --git a/setup/so-setup b/setup/so-setup index 1f26f3a03..e89ed38d4 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -542,11 +542,6 @@ fi salt-call state.apply salt.minion -l info >> $setup_log 2>&1 fi - if [[ $is_import ]]; then - set_progress_str 22 'Configuring bond interface' - salt-call state.apply import.bond -l info >> $setup_log 2>&1 - fi - set_progress_str 23 'Generating CA and checking in' salt_checkin >> $setup_log 2>&1 From b7da768dc7bcd1e8560cc21c8f5504449f696777 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 20 Aug 2020 16:46:32 -0400 Subject: [PATCH 362/376] add logrotate --- salt/suricata/files/suri-rotate.conf | 12 ++++++++++++ salt/suricata/init.sls | 6 ++++++ 2 files changed, 18 insertions(+) create mode 100644 salt/suricata/files/suri-rotate.conf diff --git a/salt/suricata/files/suri-rotate.conf b/salt/suricata/files/suri-rotate.conf new file mode 100644 index 000000000..40232633f --- /dev/null +++ b/salt/suricata/files/suri-rotate.conf @@ -0,0 +1,12 @@ +/opt/so/log/suricata/stats.log +{ + daily + rotate 2 + missingok + nocompress + create + sharedscripts + postrotate + docker exec -d so-suricata sh -c 'kill -HUP 6' + endscript +} \ No newline at end of file diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls index a15255af1..e49b59f4f 100644 --- a/salt/suricata/init.sls +++ b/salt/suricata/init.sls @@ -152,3 +152,9 @@ so-suricata: - file: surithresholding - file: /opt/so/conf/suricata/rules/ - file: /opt/so/conf/suricata/bpf + +surilogrotate: + file.managed: + - name: /etc/logrotate.d/suristats + - source: salt://suricata/files/suri-rotate.conf + - mode: 644 \ No newline at end of file From 2b88f22eb209732769a873ad7081082994b1cfab Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 20 Aug 2020 17:57:36 -0400 Subject: [PATCH 363/376] Make HUP for rotate more reliable --- salt/suricata/files/suri-rotate.conf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/suricata/files/suri-rotate.conf b/salt/suricata/files/suri-rotate.conf index 40232633f..e8461c48f 100644 --- a/salt/suricata/files/suri-rotate.conf +++ b/salt/suricata/files/suri-rotate.conf @@ -7,6 +7,6 @@ create sharedscripts postrotate - docker exec -d so-suricata sh -c 'kill -HUP 6' + docker exec -d so-suricata bash -c 'kill -HUP $(cat /var/run/suricata.pid)' endscript -} \ No newline at end of file +} From 05d727e599fe5eec193e1dedae6ffcc1b53a582c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 20 Aug 2020 19:18:39 -0400 Subject: [PATCH 364/376] Final changes.json update --- salt/soc/files/soc/changes.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/soc/files/soc/changes.json b/salt/soc/files/soc/changes.json index 2c6f51c29..5b0204ae0 100644 --- a/salt/soc/files/soc/changes.json +++ b/salt/soc/files/soc/changes.json @@ -4,6 +4,7 @@ { "summary": "Known Issues
  • Once you update your grid to RC2, any new nodes that join the grid must be RC2 so if you try to join a new RC1 node it will fail. For best results, use the latest RC2 ISO (or RC2 installer from github) when joining to an RC2 grid.
  • Shipping Windows Eventlogs with Osquery will fail intermittently with utf8 errors logged in the Application log. This is scheduled to be fixed in Osquery 4.5.
  • When running soup to upgrade from RC1 to RC2, there is a Salt error that occurs during the final highstate. This error is related to the patch_os_schedule and can be ignored as it will not occur again in subsequent highstates.
  • When Search Nodes are upgraded from RC1 to RC2, there is a chance of a race condition where certificates are missing. This will show errors in the manager log to the remote node. To fix this run the following on the search node that is having the issue:
    1. Stop elasticsearch - sudo so-elasticsearch-stop
    2. Run the SSL state - sudo salt-call state.apply ssl
    3. Restart elasticsearch - sudo so-elasticsearch-restart
" }, { "summary": "Fixed an issue where the console was timing out and making it appear that the installer was hung." }, { "summary": "Introduced Import node, which is ideal for running so-import-pcap to import pcap files and view the resulting logs in Hunt or Kibana." }, + { "summary": "Suricata stats.log now rotates once a day. If you have a bunch of suriloss defunct processes on nodes that have it, do the following:
  • Stop suricata - sudo so-suricata-stop
  • Remove the current stats.log - sudo rm /opt/so/log/suricata/stats.log
  • Reboot the machine - shutdown -r now
" }, { "summary": "Moved static.sls to global.sls to align the name with the functionality." }, { "summary": "Traffic between nodes in a distributed deployment is now fully encrypted." }, { "summary": "Playbook
  • Elastalert now runs active Plays every 3 minutes
  • Changed default rule-update config to only import Windows rules from the Sigma Community repo
  • Lots of bug fixes & stability improvements
" }, @@ -17,4 +18,4 @@ { "summary": "SOC now allows for arbitrary, time-bounded PCAP job creation, with optional filtering by host and port." }, { "summary": "Historical release notes can be found on our docs website: https://docs.securityonion.net/en/2.1/release-notes.html" } ] -} +} \ No newline at end of file From d0eae47047fd25ff892b34064590d8b646164842 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 20 Aug 2020 21:08:17 -0400 Subject: [PATCH 365/376] Update ISO download details and signature --- VERIFY_ISO.md | 24 ++++++++++++------------ sigs/securityonion-2.1.0-rc2.iso.sig | Bin 0 -> 543 bytes 2 files changed, 12 insertions(+), 12 deletions(-) create mode 100644 sigs/securityonion-2.1.0-rc2.iso.sig diff --git a/VERIFY_ISO.md b/VERIFY_ISO.md index 7ff0536b9..a864aa1bf 100644 --- a/VERIFY_ISO.md +++ b/VERIFY_ISO.md @@ -1,16 +1,16 @@ -### 2.0.2-rc1 ISO image built on 2020/07/23 +### 2.1.0-rc2 ISO image built on 2020/08/20 ### Download and Verify -2.0.2-rc1 ISO image: -https://download.securityonion.net/file/securityonion/securityonion-2.0.2-rc1.iso +2.1.0-rc1 ISO image: +https://download.securityonion.net/file/securityonion/securityonion-2.1.0-rc2.iso -MD5: DC991385818DB7A4242F4BF7045D1250 -SHA1: 0BD458F01F10B324DF90F95201CC33B9DEBEAFA3 -SHA256: BE851E5FB1952942A9C10F6563DF6EF93381D734FDFD7E05FFAC77A5064F781A +MD5: 29356D26D96C8CD714B6847821FD7E5D +SHA1: B716910E02EBF331DFA51E6130DF6382A8D8B756 +SHA256: 655A28107B11A2FAB2D5D1028777BB4731F6E8562A3CE75D18CA378086135811 Signature for ISO image: -https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.0.2-rc1.iso.sig +https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.2.0-rc2.iso.sig Signing key: https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS @@ -24,22 +24,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma Download the signature file for the ISO: ``` -wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.0.2-rc1.iso.sig +wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.1.0-rc2.iso.sig ``` Download the ISO image: ``` -wget https://download.securityonion.net/file/securityonion/securityonion-2.0.2-rc1.iso +wget https://download.securityonion.net/file/securityonion/securityonion-2.1.0-rc2.iso ``` Verify the downloaded ISO image using the signature file: ``` -gpg --verify securityonion-2.0.2-rc1.iso.sig securityonion-2.0.2-rc1.iso +gpg --verify securityonion-2.1.0-rc2.iso.sig securityonion-2.1.0-rc2.iso ``` The output should show "Good signature" and the Primary key fingerprint should match what's shown below: ``` -gpg: Signature made Thu 23 Jul 2020 10:38:04 PM EDT using RSA key ID FE507013 +gpg: Signature made Thu 20 Aug 2020 07:41:48 PM EDT using RSA key ID FE507013 gpg: Good signature from "Security Onion Solutions, LLC " gpg: WARNING: This key is not certified with a trusted signature! gpg: There is no indication that the signature belongs to the owner. @@ -47,4 +47,4 @@ Primary key fingerprint: C804 A93D 36BE 0C73 3EA1 9644 7C10 60B7 FE50 7013 ``` Once you've verified the ISO image, you're ready to proceed to our Installation guide: -https://docs.securityonion.net/en/2.0/installation.html +https://docs.securityonion.net/en/2.1/installation.html diff --git a/sigs/securityonion-2.1.0-rc2.iso.sig b/sigs/securityonion-2.1.0-rc2.iso.sig new file mode 100644 index 0000000000000000000000000000000000000000..b8a9bb057246b025352da5ecd893fda710d231e2 GIT binary patch literal 543 zcmV+)0^t3L0vrSY0RjL91p;3`3OoP`2@re`V7LBIa1$VN5C3mbeUQ~-V6C`}XQ)Dr z-P=rg&g71gtRBQ6RJ32A+@GyuvT;+Vn_R_FWEC=lRoe<6Vx8z@;`3L` zwInl?AgB0+hJW`~>qi`6@6suW1H%u8r2~n&GLnJlnD0GKa_*@U3USBCCX?KT#T{&SrfaLw^ z-tEPW#iTem1}Fw4BK^ivcR)0WiB#$U&2%A;7hR=YQn0zhXt_8(ZlG^$w1v(`{cxqLH z#Aj6r;y)y+4}QX^(mh^z$A5I&V8UF2vV(;f7RKi;W9ZjDT)Lm?CzuHa(N+vquvr*> z;`zmUdCS@!{M^T$b9c9cxq4_>phpLDjmEB&1p`7TE08XB~(y_seS2ipJu literal 0 HcmV?d00001 From bdb8f616e492ba77d73e34e06cf1d88b5b873fc8 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 21 Aug 2020 09:08:44 -0400 Subject: [PATCH 366/376] Update VERIFY_ISO.md --- VERIFY_ISO.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERIFY_ISO.md b/VERIFY_ISO.md index a864aa1bf..b77362eb7 100644 --- a/VERIFY_ISO.md +++ b/VERIFY_ISO.md @@ -10,7 +10,7 @@ SHA1: B716910E02EBF331DFA51E6130DF6382A8D8B756 SHA256: 655A28107B11A2FAB2D5D1028777BB4731F6E8562A3CE75D18CA378086135811 Signature for ISO image: -https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.2.0-rc2.iso.sig +https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.1.0-rc2.iso.sig Signing key: https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS From 9c6cc81f704ea03105636dc38472f4f4f1f78bc1 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 21 Aug 2020 12:44:24 -0400 Subject: [PATCH 367/376] Remove improper suricata logging filter - this re-enables logging output for the suricata process itself --- salt/suricata/defaults.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/suricata/defaults.yaml b/salt/suricata/defaults.yaml index 3945573a2..a9dccdf46 100644 --- a/salt/suricata/defaults.yaml +++ b/salt/suricata/defaults.yaml @@ -174,7 +174,6 @@ suricata: logging: default-log-level: notice #default-log-format: "[%i] %t - (%f:%l) <%d> (%n) -- " - default-output-filter: outputs: - console: enabled: "yes" From 64d34e46bfeafaa7c1ed4de12718743ad6b571f4 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 21 Aug 2020 14:31:04 -0400 Subject: [PATCH 368/376] Update ISO signature --- sigs/securityonion-2.1.0-rc2.iso.sig | Bin 543 -> 543 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/sigs/securityonion-2.1.0-rc2.iso.sig b/sigs/securityonion-2.1.0-rc2.iso.sig index b8a9bb057246b025352da5ecd893fda710d231e2..04a7abbab63d2f3e326d2478598b20e696e0b0d1 100644 GIT binary patch literal 543 zcmV+)0^t3L0vrSY0RjL91p;3`|H1$Y2@re`V7LBIa1+S)5B@heAB>i<78)+z*p84R z^N%A);!s8)_!PiXKG+*sK-N6}PJu^CahEFal2+fHJ$7zY<>vY9JwGOLxG3=b8sOQF zikui4RGBdu@AGcoDYY6QcM_|utV@LlJwUbUKLO0ewjGx%PTzbadK4XW@L1-(tBBG? ziN;k_OOcWGzp-2*3{LmZ22FXfH0einzea`(PD)c~fLbx?Ep$67O%`((vdbYi3S33nAc@9mY;+A|yeQb1CH)F*aI zOL(PncGmAQyYGS0albThv|K$d7W7=MoB?+5w54CqI3kl4!l6;kfn!_ZVPiQGztPW5 zR)<28q^zxaj5b*=2Gl^SSof7K@`prU5C@;D7PRyr)Ynm*d5m zP*3VQR{wCQ)+FL%Oji098d4sJG(O;{%?_H2GnB%`i5Lj$6{%iVx8z@;`3L` zwInl?AgB0+hJW`~>qi`6@6suW1H%u8r2~n&GLnJlnD0GKa_*@U3USBCCX?KT#T{&SrfaLw^ z-tEPW#iTem1}Fw4BK^ivcR)0WiB#$U&2%A;7hR=YQn0zhXt_8(ZlG^$w1v(`{cxqLH z#Aj6r;y)y+4}QX^(mh^z$A5I&V8UF2vV(;f7RKi;W9ZjDT)Lm?CzuHa(N+vquvr*> z;`zmUdCS@!{M^T$b9c9cxq4_>phpLDjmEB&1p`7TE08XB~(y_seS2ipJu From 3ea5bd0c536eb0f60bc82361938bf695a8ff463b Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 21 Aug 2020 14:44:12 -0400 Subject: [PATCH 369/376] Update MD5 and gpg info for new iso --- VERIFY_ISO.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/VERIFY_ISO.md b/VERIFY_ISO.md index b77362eb7..e3f0b4839 100644 --- a/VERIFY_ISO.md +++ b/VERIFY_ISO.md @@ -5,9 +5,9 @@ 2.1.0-rc1 ISO image: https://download.securityonion.net/file/securityonion/securityonion-2.1.0-rc2.iso -MD5: 29356D26D96C8CD714B6847821FD7E5D -SHA1: B716910E02EBF331DFA51E6130DF6382A8D8B756 -SHA256: 655A28107B11A2FAB2D5D1028777BB4731F6E8562A3CE75D18CA378086135811 +MD5: C0E543D57D720BB6DF9432C5EE7F8C74 +SHA1: 79E72A7312ADEB41EC8F580A5F5FFC9C7E7E08BE +SHA256: 4268AE058235BA945151A01F981BE439768750A86515B9204BFF5BC634793374 Signature for ISO image: https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.1.0-rc2.iso.sig @@ -39,7 +39,7 @@ gpg --verify securityonion-2.1.0-rc2.iso.sig securityonion-2.1.0-rc2.iso The output should show "Good signature" and the Primary key fingerprint should match what's shown below: ``` -gpg: Signature made Thu 20 Aug 2020 07:41:48 PM EDT using RSA key ID FE507013 +gpg: Signature made Fri 21 Aug 2020 01:09:22 PM EDT using RSA key ID FE507013 gpg: Good signature from "Security Onion Solutions, LLC " gpg: WARNING: This key is not certified with a trusted signature! gpg: There is no indication that the signature belongs to the owner. From daaa2d357914bd47a8173b86960f0a27decd150b Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 21 Aug 2020 16:24:09 -0400 Subject: [PATCH 370/376] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1e4ae7de7..d4e4e0a2b 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -## Security Onion 2.0.3.rc1 +## Security Onion 2.1.0.rc2 -Security Onion 2.0.3 RC1 is here! This version requires a fresh install, but there is good news - we have brought back soup! From now on, you should be able to run soup on the manager to upgrade your environment to RC2 and beyond! +Security Onion 2.1.0 RC2 is here! ### Warnings and Disclaimers From ebd8105cb5504ac3803b5eaeeb9b710f888bfbb7 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sun, 23 Aug 2020 16:03:37 -0400 Subject: [PATCH 371/376] Rotate suri stats log hourly --- salt/suricata/cron/surirotate | 6 ++++++ salt/suricata/init.sls | 19 +++++++++++++++++-- 2 files changed, 23 insertions(+), 2 deletions(-) create mode 100644 salt/suricata/cron/surirotate diff --git a/salt/suricata/cron/surirotate b/salt/suricata/cron/surirotate new file mode 100644 index 000000000..b77c4d635 --- /dev/null +++ b/salt/suricata/cron/surirotate @@ -0,0 +1,6 @@ +#!/bin/bash + +# Gzip the eve logs +find /nsm/suricata/eve*.json -type f -printf '%T@\t%p\n' | sort -t $'\t' -g | head -n -1 | cut -d $'\t' -f 2- | xargs nice gzip + +# TODO Add stats log \ No newline at end of file diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls index e49b59f4f..f8fd13067 100644 --- a/salt/suricata/init.sls +++ b/salt/suricata/init.sls @@ -79,6 +79,12 @@ surilogscript: - source: salt://suricata/cron/surilogcompress - mode: 755 +surirotatescript: + file.managed: + - name: /usr/local/bin/surirotate + - source: salt://suricata/cron/surirotate + - mode: 755 + /usr/local/bin/surilogcompress: cron.present: - user: suricata @@ -155,6 +161,15 @@ so-suricata: surilogrotate: file.managed: - - name: /etc/logrotate.d/suristats + - name: /opt/so/conf/suricata/suri-rotate.conf - source: salt://suricata/files/suri-rotate.conf - - mode: 644 \ No newline at end of file + - mode: 644 + +/usr/local/bin/surirotate: + cron.present: + - user: root + - minute: '6' + - hour: '*' + - daymonth: '*' + - month: '*' + - dayweek: '*' From a97ca94354887fc33f3b4cf55e369805e926f419 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sun, 23 Aug 2020 16:08:17 -0400 Subject: [PATCH 372/376] Rotate suri stats log hourly --- salt/suricata/cron/surirotate | 4 +--- salt/suricata/init.sls | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/salt/suricata/cron/surirotate b/salt/suricata/cron/surirotate index b77c4d635..4da651d0e 100644 --- a/salt/suricata/cron/surirotate +++ b/salt/suricata/cron/surirotate @@ -1,6 +1,4 @@ #!/bin/bash # Gzip the eve logs -find /nsm/suricata/eve*.json -type f -printf '%T@\t%p\n' | sort -t $'\t' -g | head -n -1 | cut -d $'\t' -f 2- | xargs nice gzip - -# TODO Add stats log \ No newline at end of file +/usr/sbin/logrotate -f /opt/so/conf/suricata/suri-rotate.conf > /dev/null 2>&1 diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls index f8fd13067..79e06db66 100644 --- a/salt/suricata/init.sls +++ b/salt/suricata/init.sls @@ -168,7 +168,7 @@ surilogrotate: /usr/local/bin/surirotate: cron.present: - user: root - - minute: '6' + - minute: '11' - hour: '*' - daymonth: '*' - month: '*' From e8568dbeb0943754c6ed693b4f62d0e71a1ce5d7 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sun, 23 Aug 2020 20:23:49 -0400 Subject: [PATCH 373/376] Update VERIFY_ISO.md --- VERIFY_ISO.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/VERIFY_ISO.md b/VERIFY_ISO.md index e3f0b4839..036ff23fd 100644 --- a/VERIFY_ISO.md +++ b/VERIFY_ISO.md @@ -5,9 +5,9 @@ 2.1.0-rc1 ISO image: https://download.securityonion.net/file/securityonion/securityonion-2.1.0-rc2.iso -MD5: C0E543D57D720BB6DF9432C5EE7F8C74 -SHA1: 79E72A7312ADEB41EC8F580A5F5FFC9C7E7E08BE -SHA256: 4268AE058235BA945151A01F981BE439768750A86515B9204BFF5BC634793374 +MD5: 9EAE772B64F5B3934C0DB7913E38D6D4 +SHA1: D0D347AE30564871DE81203C0CE53B950F8732CE +SHA256: 888AC7758C975FAA0A7267E5EFCB082164AC7AC8DCB3B370C06BA0B8493DAC44 Signature for ISO image: https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.1.0-rc2.iso.sig @@ -39,7 +39,7 @@ gpg --verify securityonion-2.1.0-rc2.iso.sig securityonion-2.1.0-rc2.iso The output should show "Good signature" and the Primary key fingerprint should match what's shown below: ``` -gpg: Signature made Fri 21 Aug 2020 01:09:22 PM EDT using RSA key ID FE507013 +gpg: Signature made Sun 23 Aug 2020 04:37:00 PM EDT using RSA key ID FE507013 gpg: Good signature from "Security Onion Solutions, LLC " gpg: WARNING: This key is not certified with a trusted signature! gpg: There is no indication that the signature belongs to the owner. From aa3e3c3cec4872cdac2637ddfde45398831fdfeb Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sun, 23 Aug 2020 20:25:06 -0400 Subject: [PATCH 374/376] Update Sig --- sigs/securityonion-2.1.0-rc2.iso.sig | Bin 543 -> 543 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/sigs/securityonion-2.1.0-rc2.iso.sig b/sigs/securityonion-2.1.0-rc2.iso.sig index 04a7abbab63d2f3e326d2478598b20e696e0b0d1..cc03c894d3f4f660af2eb28f7f557fc9a8722ce1 100644 GIT binary patch literal 543 zcmV+)0^t3L0vrSY0RjL91p;3}(`*0=2@re`V7LBIa1&`65B(GwnjzxKHz1aZDu$rq z?eT?eZ&Im4!5#n}rgn(a<7RX!O-gBAyM}mpvEx)qh_?gd(geh0%k?@T<3oC*QT#}t zPUsVsKSRjpd^Y)@L@BvMZP_;mS`pLJVeEb|nW--r#-jT~#j}wr=s>U&=}u9-CMC!r2n7bvQGF=L23RlV89Dao z*fU0!!Req6xJisI1k0As#q4-WG(Ik>*dFG&qrkN-AvA-=@R+&HN6W@m)Gc;?%{oTw zNLUOwLK{>>tBOrn37*qTz%Q=$Ld+#r118MkOD3#z)=dYx?S3?KTw+~@q#Zr36_#(O zSOz0v6y+NaZ~sCs=SkX*at9WeRu)!=FR;}XDMy9Z^h63A654sHI^`y|=k*KaJsXDI zxg>57fUo#}ABGuqgRq9QDJ-a=j)x5{>23IDn&0K=kygffcwHfu?j`&nEVP68_}$j` zh~B8))00 h3&(x_8Mwt8K03P$N5ybIZ3DH*1&e?{$^}Zb!;?|Y0ek=e literal 543 zcmV+)0^t3L0vrSY0RjL91p;3`|H1$Y2@re`V7LBIa1+S)5B@heAB>i<78)+z*p84R z^N%A);!s8)_!PiXKG+*sK-N6}PJu^CahEFal2+fHJ$7zY<>vY9JwGOLxG3=b8sOQF zikui4RGBdu@AGcoDYY6QcM_|utV@LlJwUbUKLO0ewjGx%PTzbadK4XW@L1-(tBBG? ziN;k_OOcWGzp-2*3{LmZ22FXfH0einzea`(PD)c~fLbx?Ep$67O%`((vdbYi3S33nAc@9mY;+A|yeQb1CH)F*aI zOL(PncGmAQyYGS0albThv|K$d7W7=MoB?+5w54CqI3kl4!l6;kfn!_ZVPiQGztPW5 zR)<28q^zxaj5b*=2Gl^SSof7K@`prU5C@;D7PRyr)Ynm*d5m zP*3VQR{wCQ)+FL%Oji098d4sJG(O;{%?_H2GnB%`i5Lj$6{%i Date: Mon, 24 Aug 2020 06:09:30 -0400 Subject: [PATCH 375/376] Update VERIFY_ISO.md --- VERIFY_ISO.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERIFY_ISO.md b/VERIFY_ISO.md index 036ff23fd..76898f341 100644 --- a/VERIFY_ISO.md +++ b/VERIFY_ISO.md @@ -2,7 +2,7 @@ ### Download and Verify -2.1.0-rc1 ISO image: +2.1.0-rc2 ISO image: https://download.securityonion.net/file/securityonion/securityonion-2.1.0-rc2.iso MD5: 9EAE772B64F5B3934C0DB7913E38D6D4 From b627f565c9a8ef6c4ae00ff18645378513f2aa5e Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Mon, 24 Aug 2020 10:03:28 -0400 Subject: [PATCH 376/376] Update VERIFY_ISO.md --- VERIFY_ISO.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERIFY_ISO.md b/VERIFY_ISO.md index 76898f341..35cb1b4fd 100644 --- a/VERIFY_ISO.md +++ b/VERIFY_ISO.md @@ -1,4 +1,4 @@ -### 2.1.0-rc2 ISO image built on 2020/08/20 +### 2.1.0-rc2 ISO image built on 2020/08/23 ### Download and Verify