diff --git a/README.md b/README.md index 4c76caa1b..d4e4e0a2b 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -## Security Onion 2.0.3.rc1 +## Security Onion 2.1.0.rc2 -Security Onion 2.0.3 RC1 is here! This version requires a fresh install, but there is good news - we have brought back soup! From now on, you should be able to run soup on the manager to upgrade your environment to RC2 and beyond! +Security Onion 2.1.0 RC2 is here! ### Warnings and Disclaimers @@ -14,24 +14,24 @@ Security Onion 2.0.3 RC1 is here! This version requires a fresh install, but the ### Release Notes -https://docs.securityonion.net/en/2.0/release-notes.html +https://docs.securityonion.net/en/2.1/release-notes.html ### Requirements -https://docs.securityonion.net/en/2.0/hardware.html +https://docs.securityonion.net/en/2.1/hardware.html ### Download -https://docs.securityonion.net/en/2.0/download.html +https://docs.securityonion.net/en/2.1/download.html ### Installation -https://docs.securityonion.net/en/2.0/installation.html +https://docs.securityonion.net/en/2.1/installation.html ### FAQ -https://docs.securityonion.net/en/2.0/faq.html +https://docs.securityonion.net/en/2.1/faq.html ### Feedback -https://docs.securityonion.net/en/2.0/community-support.html +https://docs.securityonion.net/en/2.1/community-support.html diff --git a/VERIFY_ISO.md b/VERIFY_ISO.md index a2b10bfab..35cb1b4fd 100644 --- a/VERIFY_ISO.md +++ b/VERIFY_ISO.md @@ -1,16 +1,16 @@ -### 2.0.3-rc1 ISO image built on 2020/07/28 +### 2.1.0-rc2 ISO image built on 2020/08/23 ### Download and Verify -2.0.3-rc1 ISO image: -https://download.securityonion.net/file/securityonion/securityonion-2.0.3-rc1.iso +2.1.0-rc2 ISO image: +https://download.securityonion.net/file/securityonion/securityonion-2.1.0-rc2.iso -MD5: 126EDE15589BCB44A64F51637E6BF720 -SHA1: 5804EB797C655177533C55BB34569E1E2E0B2685 -SHA256: CDB9EEFEA965BD70ACC2FC64981A52BD83B85B47812261F79EC3930BB1924463 +MD5: 9EAE772B64F5B3934C0DB7913E38D6D4 +SHA1: D0D347AE30564871DE81203C0CE53B950F8732CE +SHA256: 888AC7758C975FAA0A7267E5EFCB082164AC7AC8DCB3B370C06BA0B8493DAC44 Signature for ISO image: -https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.0.3-rc1.iso.sig +https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.1.0-rc2.iso.sig Signing key: https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS @@ -24,22 +24,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma Download the signature file for the ISO: ``` -wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.0.3-rc1.iso.sig +wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.1.0-rc2.iso.sig ``` Download the ISO image: ``` -wget https://download.securityonion.net/file/securityonion/securityonion-2.0.3-rc1.iso +wget https://download.securityonion.net/file/securityonion/securityonion-2.1.0-rc2.iso ``` Verify the downloaded ISO image using the signature file: ``` -gpg --verify securityonion-2.0.3-rc1.iso.sig securityonion-2.0.3-rc1.iso +gpg --verify securityonion-2.1.0-rc2.iso.sig securityonion-2.1.0-rc2.iso ``` The output should show "Good signature" and the Primary key fingerprint should match what's shown below: ``` -gpg: Signature made Tue 28 Jul 2020 10:36:55 PM EDT using RSA key ID FE507013 +gpg: Signature made Sun 23 Aug 2020 04:37:00 PM EDT using RSA key ID FE507013 gpg: Good signature from "Security Onion Solutions, LLC " gpg: WARNING: This key is not certified with a trusted signature! gpg: There is no indication that the signature belongs to the owner. @@ -47,4 +47,4 @@ Primary key fingerprint: C804 A93D 36BE 0C73 3EA1 9644 7C10 60B7 FE50 7013 ``` Once you've verified the ISO image, you're ready to proceed to our Installation guide: -https://docs.securityonion.net/en/2.0/installation.html +https://docs.securityonion.net/en/2.1/installation.html diff --git a/VERSION b/VERSION index fbb4866b2..9212e0f1f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0.3-rc.1 +2.1.0-rc.2 diff --git a/files/firewall/assigned_hostgroups.local.map.yaml b/files/firewall/assigned_hostgroups.local.map.yaml index 5d9b662b6..50ef751a4 100644 --- a/files/firewall/assigned_hostgroups.local.map.yaml +++ b/files/firewall/assigned_hostgroups.local.map.yaml @@ -13,6 +13,7 @@ role: fleet: heavynode: helixsensor: + import: manager: managersearch: standalone: diff --git a/pillar/data/addtotab.sh b/pillar/data/addtotab.sh index 696ec171e..ac3d913a5 100644 --- a/pillar/data/addtotab.sh +++ b/pillar/data/addtotab.sh @@ -44,11 +44,11 @@ echo " guid: $GUID" >> $local_salt_dir/pillar/data/$TYPE.sls echo " rootfs: $ROOTFS" >> $local_salt_dir/pillar/data/$TYPE.sls echo " nsmfs: $NSM" >> $local_salt_dir/pillar/data/$TYPE.sls if [ $TYPE == 'sensorstab' ]; then - echo " monint: $MONINT" >> $local_salt_dir/pillar/data/$TYPE.sls + echo " monint: bond0" >> $local_salt_dir/pillar/data/$TYPE.sls salt-call state.apply grafana queue=True fi if [ $TYPE == 'evaltab' ] || [ $TYPE == 'standalonetab' ]; then - echo " monint: $MONINT" >> $local_salt_dir/pillar/data/$TYPE.sls + echo " monint: bond0" >> $local_salt_dir/pillar/data/$TYPE.sls if [ ! $10 ]; then salt-call state.apply grafana queue=True salt-call state.apply utility queue=True diff --git a/pillar/docker/config.sls b/pillar/docker/config.sls index 4d70fd517..647151eef 100644 --- a/pillar/docker/config.sls +++ b/pillar/docker/config.sls @@ -1,11 +1,11 @@ -{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%} -{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%} +{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%} +{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%} {% set WAZUH = salt['pillar.get']('manager:wazuh', '0') %} {% set THEHIVE = salt['pillar.get']('manager:thehive', '0') %} {% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %} {% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %} {% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %} -{% set ZEEKVER = salt['pillar.get']('static:zeekversion', 'COMMUNITY') %} +{% set ZEEKVER = salt['pillar.get']('global:zeekversion', 'COMMUNITY') %} {% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %} eval: diff --git a/pillar/firewall/ports.sls b/pillar/firewall/ports.sls index 4f7c06bec..1e0be460b 100644 --- a/pillar/firewall/ports.sls +++ b/pillar/firewall/ports.sls @@ -33,6 +33,8 @@ firewall: - 9300 - 9400 - 9500 + - 9595 + - 9696 udp: - 1514 minions: diff --git a/pillar/logstash/init.sls b/pillar/logstash/init.sls index 6d51d0471..c2dfd9cfd 100644 --- a/pillar/logstash/init.sls +++ b/pillar/logstash/init.sls @@ -1,7 +1,6 @@ logstash: docker_options: port_bindings: - - 0.0.0.0:514:514 - 0.0.0.0:5044:5044 - 0.0.0.0:5644:5644 - 0.0.0.0:6050:6050 diff --git a/pillar/logstash/manager.sls b/pillar/logstash/manager.sls index 9c16d2625..6f3ba495b 100644 --- a/pillar/logstash/manager.sls +++ b/pillar/logstash/manager.sls @@ -1,3 +1,4 @@ +{%- set PIPELINE = salt['pillar.get']('global:pipeline', 'redis') %} logstash: pipelines: manager: @@ -5,3 +6,4 @@ logstash: - so/0009_input_beats.conf - so/0010_input_hhbeats.conf - so/9999_output_redis.conf.jinja + \ No newline at end of file diff --git a/pillar/logstash/search.sls b/pillar/logstash/search.sls index 486deb408..7a5aeec39 100644 --- a/pillar/logstash/search.sls +++ b/pillar/logstash/search.sls @@ -1,3 +1,4 @@ +{%- set PIPELINE = salt['pillar.get']('global:pipeline', 'minio') %} logstash: pipelines: search: diff --git a/pillar/top.sls b/pillar/top.sls index 889f0b63f..73d66ef2a 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -2,7 +2,7 @@ base: '*': - patch.needs_restarting - '*_eval or *_helix or *_heavynode or *_sensor or *_standalone': + '*_eval or *_helix or *_heavynode or *_sensor or *_standalone or *_import': - match: compound - zeek @@ -14,14 +14,14 @@ base: - elasticsearch.search '*_sensor': - - static + - global - zeeklogs - healthcheck.sensor - minions.{{ grains.id }} '*_manager or *_managersearch': - match: compound - - static + - global - data.* - secrets - minions.{{ grains.id }} @@ -36,7 +36,7 @@ base: - secrets - healthcheck.eval - elasticsearch.eval - - static + - global - minions.{{ grains.id }} '*_standalone': @@ -48,20 +48,20 @@ base: - zeeklogs - secrets - healthcheck.standalone - - static + - global - minions.{{ grains.id }} '*_node': - - static + - global - minions.{{ grains.id }} '*_heavynode': - - static + - global - zeeklogs - minions.{{ grains.id }} '*_helix': - - static + - global - fireeye - zeeklogs - logstash @@ -69,14 +69,21 @@ base: - minions.{{ grains.id }} '*_fleet': - - static + - global - data.* - secrets - minions.{{ grains.id }} '*_searchnode': - - static + - global - logstash - logstash.search - elasticsearch.search - minions.{{ grains.id }} + + '*_import': + - zeeklogs + - secrets + - elasticsearch.eval + - global + - minions.{{ grains.id }} \ No newline at end of file diff --git a/salt/ca/init.sls b/salt/ca/init.sls index da442cc2a..62b89d351 100644 --- a/salt/ca/init.sls +++ b/salt/ca/init.sls @@ -10,12 +10,16 @@ file.directory: [] pki_private_key: - x509.private_key_managed: - - name: /etc/pki/ca.key - - bits: 4096 - - passphrase: - - cipher: aes_256_cbc - - backup: True + x509.private_key_managed: + - name: /etc/pki/ca.key + - bits: 4096 + - passphrase: + - cipher: aes_256_cbc + - backup: True + {% if salt['file.file_exists']('/etc/pki/ca.key') -%} + - prereq: + - x509: /etc/pki/ca.crt + {%- endif %} /etc/pki/ca.crt: x509.certificate_managed: @@ -32,22 +36,19 @@ pki_private_key: - days_valid: 3650 - days_remaining: 0 - backup: True - - managed_private_key: - name: /etc/pki/ca.key - bits: 4096 - backup: True + - replace: False - require: - file: /etc/pki -send_x509_pem_entries_to_mine: +x509_pem_entries: module.run: - mine.send: - - func: x509.get_pem_entries - - glob_path: /etc/pki/ca.crt + - name: x509.get_pem_entries + - glob_path: /etc/pki/ca.crt cakeyperms: file.managed: - replace: False - name: /etc/pki/ca.key - mode: 640 - - group: 939 + - group: 939 \ No newline at end of file diff --git a/salt/common/maps/import.map.jinja b/salt/common/maps/import.map.jinja new file mode 100644 index 000000000..324536d11 --- /dev/null +++ b/salt/common/maps/import.map.jinja @@ -0,0 +1,10 @@ +{% set docker = { + 'containers': [ + 'so-filebeat', + 'so-nginx', + 'so-soc', + 'so-kratos', + 'so-elasticsearch', + 'so-kibana' + ] +} %} \ No newline at end of file diff --git a/salt/common/maps/so-status.map.jinja b/salt/common/maps/so-status.map.jinja index 93f5f3d13..21dd14ec9 100644 --- a/salt/common/maps/so-status.map.jinja +++ b/salt/common/maps/so-status.map.jinja @@ -20,7 +20,7 @@ {% if role in ['eval', 'managersearch', 'manager', 'standalone'] %} {{ append_containers('manager', 'grafana', 0) }} - {{ append_containers('static', 'fleet_manager', 0) }} + {{ append_containers('global', 'fleet_manager', 0) }} {{ append_containers('manager', 'wazuh', 0) }} {{ append_containers('manager', 'thehive', 0) }} {{ append_containers('manager', 'playbook', 0) }} @@ -29,11 +29,11 @@ {% endif %} {% if role in ['eval', 'heavynode', 'sensor', 'standalone'] %} - {{ append_containers('static', 'strelka', 0) }} + {{ append_containers('global', 'strelka', 0) }} {% endif %} {% if role in ['heavynode', 'standalone'] %} - {{ append_containers('static', 'zeekversion', 'SURICATA') }} + {{ append_containers('global', 'zeekversion', 'SURICATA') }} {% endif %} {% if role == 'searchnode' %} @@ -41,5 +41,5 @@ {% endif %} {% if role == 'sensor' %} - {{ append_containers('static', 'zeekversion', 'SURICATA') }} + {{ append_containers('global', 'zeekversion', 'SURICATA') }} {% endif %} \ No newline at end of file diff --git a/salt/common/tools/sbin/so-allow b/salt/common/tools/sbin/so-allow index c6d3d6bf0..a49a694a6 100755 --- a/salt/common/tools/sbin/so-allow +++ b/salt/common/tools/sbin/so-allow @@ -21,6 +21,30 @@ local_salt_dir=/opt/so/saltstack/local SKIP=0 +function usage { + +cat << EOF + +Usage: $0 [-abefhoprsw] [ -i IP ] + +This program allows you to add a firewall rule to allow connections from a new IP address or CIDR range. + +If you run this program with no arguments, it will present a menu for you to choose your options. + +If you want to automate and skip the menu, you can pass the desired options as command line arguments. + +EXAMPLES + +To add 10.1.2.3 to the analyst role: +so-allow -a -i 10.1.2.3 + +To add 10.1.2.0/24 to the osquery role: +so-allow -o -i 10.1.2.0/24 + +EOF + +} + while getopts "ahfesprbowi:" OPTION do case $OPTION in @@ -36,7 +60,7 @@ do FULLROLE="beats_endpoint" SKIP=1 ;; - e) + e) FULLROLE="elasticsearch_rest" SKIP=1 ;; @@ -127,7 +151,7 @@ salt-call state.apply firewall queue=True if grep -q -R "wazuh: 1" $local_salt_dir/pillar/*; then # If analyst, add to Wazuh AR whitelist if [ "$FULLROLE" == "analyst" ]; then - WAZUH_MGR_CFG="/opt/so/wazuh/etc/ossec.conf" + WAZUH_MGR_CFG="/nsm/wazuh/etc/ossec.conf" if ! grep -q "$IP" $WAZUH_MGR_CFG ; then DATE=$(date) sed -i 's/<\/ossec_config>//' $WAZUH_MGR_CFG diff --git a/salt/common/tools/sbin/so-docker-refresh b/salt/common/tools/sbin/so-docker-refresh index ace1e9554..770d9f241 100755 --- a/salt/common/tools/sbin/so-docker-refresh +++ b/salt/common/tools/sbin/so-docker-refresh @@ -76,6 +76,7 @@ if [ $MANAGERCHECK != 'so-helix' ]; then "so-kibana:$VERSION" \ "so-kratos:$VERSION" \ "so-logstash:$VERSION" \ + "so-minio:$VERSION" \ "so-mysql:$VERSION" \ "so-nginx:$VERSION" \ "so-pcaptools:$VERSION" \ diff --git a/salt/common/tools/sbin/so-elastic-clear b/salt/common/tools/sbin/so-elastic-clear index 04c153f85..15b1041e1 100755 --- a/salt/common/tools/sbin/so-elastic-clear +++ b/salt/common/tools/sbin/so-elastic-clear @@ -14,7 +14,7 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%} +{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') -%} . /usr/sbin/so-common SKIP=0 diff --git a/salt/common/tools/sbin/so-features-enable b/salt/common/tools/sbin/so-features-enable index c94aebcba..070ecedc0 100755 --- a/salt/common/tools/sbin/so-features-enable +++ b/salt/common/tools/sbin/so-features-enable @@ -29,9 +29,9 @@ manager_check() { } manager_check -VERSION=$(grep soversion $local_salt_dir/pillar/static.sls | cut -d':' -f2|sed 's/ //g') -# Modify static.sls to enable Features -sed -i 's/features: False/features: True/' $local_salt_dir/pillar/static.sls +VERSION=$(grep soversion $local_salt_dir/pillar/global.sls | cut -d':' -f2|sed 's/ //g') +# Modify global.sls to enable Features +sed -i 's/features: False/features: True/' $local_salt_dir/pillar/global.sls SUFFIX="-features" TRUSTED_CONTAINERS=( \ "so-elasticsearch:$VERSION$SUFFIX" \ diff --git a/salt/common/tools/sbin/so-import-pcap b/salt/common/tools/sbin/so-import-pcap index a45fe6777..f10f5fad9 100755 --- a/salt/common/tools/sbin/so-import-pcap +++ b/salt/common/tools/sbin/so-import-pcap @@ -15,10 +15,13 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set MANAGER = salt['grains.get']('master') %} -{% set VERSION = salt['pillar.get']('static:soversion') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} -{%- set MANAGERIP = salt['pillar.get']('static:managerip') -%} +{%- set MANAGER = salt['grains.get']('master') %} +{%- set VERSION = salt['pillar.get']('global:soversion') %} +{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} +{%- set MANAGERIP = salt['pillar.get']('global:managerip') -%} +{%- set URLBASE = salt['pillar.get']('global:url_base') %} + +. /usr/sbin/so-common function usage { cat << EOF @@ -32,13 +35,13 @@ EOF function pcapinfo() { PCAP=$1 ARGS=$2 - docker run --rm -v $PCAP:/input.pcap --entrypoint capinfos {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap $ARGS + docker run --rm -v "$PCAP:/input.pcap" --entrypoint capinfos {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap $ARGS } function pcapfix() { PCAP=$1 PCAP_OUT=$2 - docker run --rm -v $PCAP:/input.pcap -v $PCAP_OUT:$PCAP_OUT --entrypoint pcapfix {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap -o $PCAP_OUT > /dev/null 2>&1 + docker run --rm -v "$PCAP:/input.pcap" -v $PCAP_OUT:$PCAP_OUT --entrypoint pcapfix {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap -o $PCAP_OUT > /dev/null 2>&1 } function suricata() { @@ -57,7 +60,7 @@ function suricata() { -v /opt/so/conf/suricata/rules:/etc/suricata/rules:ro \ -v ${LOG_PATH}:/var/log/suricata/:rw \ -v ${NSM_PATH}/:/nsm/:rw \ - -v $PCAP:/input.pcap:ro \ + -v "$PCAP:/input.pcap:ro" \ -v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \ {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \ --runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1 @@ -76,7 +79,7 @@ function zeek() { -v $NSM_PATH/logs:/nsm/zeek/logs:rw \ -v $NSM_PATH/spool:/nsm/zeek/spool:rw \ -v $NSM_PATH/extracted:/nsm/zeek/extracted:rw \ - -v $PCAP:/input.pcap:ro \ + -v "$PCAP:/input.pcap:ro" \ -v /opt/so/conf/zeek/local.zeek:/opt/zeek/share/zeek/site/local.zeek:ro \ -v /opt/so/conf/zeek/node.cfg:/opt/zeek/etc/node.cfg:ro \ -v /opt/so/conf/zeek/zeekctl.cfg:/opt/zeek/etc/zeekctl.cfg:ro \ @@ -210,9 +213,9 @@ cat << EOF Import complete! You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser: -https://{{ MANAGERIP }}/#/hunt?q=%2a%20%7C%20groupby%20event.module%20event.dataset&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM +https://{{ URLBASE }}/#/hunt?q=import.id:${HASH}%20%7C%20groupby%20event.module%20event.dataset&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC -or you can manually set your Time Range to be: +or you can manually set your Time Range to be (in UTC): From: $START_OLDEST To: $END_NEWEST Please note that it may take 30 seconds or more for events to appear in Onion Hunt. diff --git a/salt/common/tools/sbin/so-kibana-config-export b/salt/common/tools/sbin/so-kibana-config-export index 8ee3f59b5..7f578a3ba 100755 --- a/salt/common/tools/sbin/so-kibana-config-export +++ b/salt/common/tools/sbin/so-kibana-config-export @@ -1,9 +1,9 @@ #!/bin/bash # -# {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager', False) -%} -# {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%} -# {%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', '') %} -# {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %} +# {%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager', False) -%} +# {%- set FLEET_NODE = salt['pillar.get']('global:fleet_node', False) -%} +# {%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', '') %} +# {%- set MANAGER = salt['pillar.get']('global:url_base', '') %} # # Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC # diff --git a/salt/common/tools/sbin/so-playbook-sync b/salt/common/tools/sbin/so-playbook-sync index 8b2817eaa..f4c2c456e 100755 --- a/salt/common/tools/sbin/so-playbook-sync +++ b/salt/common/tools/sbin/so-playbook-sync @@ -17,4 +17,4 @@ . /usr/sbin/so-common -docker exec so-soctopus python3 playbook_play-sync.py >> /opt/so/log/soctopus/so-playbook-sync.log 2>&1 +docker exec so-soctopus python3 playbook_play-sync.py diff --git a/salt/common/tools/sbin/so-rule-update b/salt/common/tools/sbin/so-rule-update index f50d49322..19466c2b3 100755 --- a/salt/common/tools/sbin/so-rule-update +++ b/salt/common/tools/sbin/so-rule-update @@ -10,4 +10,4 @@ got_root() { } got_root -docker exec -it so-idstools /bin/bash -c 'cd /opt/so/idstools/etc && idstools-rulecat' +docker exec -d so-idstools /bin/bash -c 'cd /opt/so/idstools/etc && idstools-rulecat' diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index b2b8cacc4..d9fadce29 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -18,13 +18,18 @@ . /usr/sbin/so-common UPDATE_DIR=/tmp/sogh/securityonion INSTALLEDVERSION=$(cat /etc/soversion) -default_salt_dir=/opt/so/saltstack/default +INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'}) +DEFAULT_SALT_DIR=/opt/so/saltstack/default +BATCHSIZE=5 +SOUP_LOG=/root/soup.log +exec 3>&1 1>${SOUP_LOG} 2>&1 manager_check() { # Check to see if this is a manager MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}') - if [[ "$MANAGERCHECK" =~ ^('so-eval'|'so-manager'|'so-standalone'|'so-managersearch')$ ]]; then - echo "This is a manager. We can proceed" + if [[ "$MANAGERCHECK" =~ ^('so-eval'|'so-manager'|'so-standalone'|'so-managersearch'|'so-import')$ ]]; then + echo "This is a manager. We can proceed." + MINIONID=$(salt-call grains.get id --out=txt|awk -F: {'print $2'}|tr -d ' ') else echo "Please run soup on the manager. The manager controls all updates." exit 0 @@ -58,28 +63,122 @@ clone_to_tmp() { copy_new_files() { # Copy new files over to the salt dir cd /tmp/sogh/securityonion - rsync -a salt $default_salt_dir/ - rsync -a pillar $default_salt_dir/ - chown -R socore:socore $default_salt_dir/ - chmod 755 $default_salt_dir/pillar/firewall/addfirewall.sh + rsync -a salt $DEFAULT_SALT_DIR/ + rsync -a pillar $DEFAULT_SALT_DIR/ + chown -R socore:socore $DEFAULT_SALT_DIR/ + chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh cd /tmp } +detect_os() { + # Detect Base OS + echo "Determining Base OS." >> "$SOUP_LOG" 2>&1 + if [ -f /etc/redhat-release ]; then + OS="centos" + elif [ -f /etc/os-release ]; then + OS="ubuntu" + fi + echo "Found OS: $OS" >> "$SOUP_LOG" 2>&1 +} + highstate() { # Run a highstate but first cancel a running one. salt-call saltutil.kill_all_jobs - salt-call state.highstate + salt-call state.highstate -l info +} + +masterlock() { + echo "Locking Salt Master" + if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then + TOPFILE=/opt/so/saltstack/default/salt/top.sls + BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup + mv -v $TOPFILE $BACKUPTOPFILE + echo "base:" > $TOPFILE + echo " $MINIONID:" >> $TOPFILE + echo " - ca" >> $TOPFILE + echo " - ssl" >> $TOPFILE + echo " - elasticsearch" >> $TOPFILE + fi +} + +masterunlock() { + echo "Unlocking Salt Master" + if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then + mv -v $BACKUPTOPFILE $TOPFILE + fi +} + +playbook() { + echo "Applying playbook settings" + if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then + salt-call state.apply playbook.db_init + rm -f /opt/so/rules/elastalert/playbook/*.yaml + so-playbook-ruleupdate >> /root/soup_playbook_rule_update.log 2>&1 & + fi } pillar_changes() { # This function is to add any new pillar items if needed. - echo "Checking to see if pillar changes are needed" + echo "Checking to see if pillar changes are needed." + + # Move baseurl in global.sls + if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then + # Move the static file to global.sls + echo "Migrating static.sls to global.sls" + mv -v /opt/so/saltstack/local/pillar/static.sls /opt/so/saltstack/local/pillar/global.sls >> "$SOUP_LOG" 2>&1 + sed -i '1c\global:' /opt/so/saltstack/local/pillar/global.sls >> "$SOUP_LOG" 2>&1 + # Moving baseurl from minion sls file to inside global.sls + local line=$(grep '^ url_base:' /opt/so/saltstack/local/pillar/minions/$MINIONID.sls) + sed -i '/^ url_base:/d' /opt/so/saltstack/local/pillar/minions/$MINIONID.sls; + sed -i "/^global:/a \\$line" /opt/so/saltstack/local/pillar/global.sls; + + # Adding play values to the global.sls + local HIVEPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + local CORTEXPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + sed -i "/^global:/a \\ hiveplaysecret: $HIVEPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls; + sed -i "/^global:/a \\ cortexplaysecret: $CORTEXPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls; + + # Move storage nodes to hostname for SSL + # Get a list we can use: + grep -A1 searchnode /opt/so/saltstack/local/pillar/data/nodestab.sls | grep -v '\-\-' | sed '$!N;s/\n/ /' | awk '{print $1,$3}' | awk '/_searchnode:/{gsub(/\_searchnode:/, "_searchnode"); print}' >/tmp/nodes.txt + # Remove the nodes from cluster settings + while read p; do + local NAME=$(echo $p | awk '{print $1}') + local IP=$(echo $p | awk '{print $2}') + echo "Removing the old cross cluster config for $NAME" + curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_cluster/settings -d '{"persistent":{"cluster":{"remote":{"'$NAME'":{"skip_unavailable":null,"seeds":null}}}}}' + done /etc/soversion - sed -i "s/$INSTALLEDVERSION/$NEWVERSION/g" /opt/so/saltstack/local/pillar/static.sls + sed -i "s/$INSTALLEDVERSION/$NEWVERSION/g" /opt/so/saltstack/local/pillar/global.sls } upgrade_check() { @@ -154,8 +254,44 @@ upgrade_check() { if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then echo "You are already running the latest version of Security Onion." exit 0 + fi +} + +upgrade_check_salt() { + NEWSALTVERSION=$(grep version: $UPDATE_DIR/salt/salt/master.defaults.yaml | awk {'print $2'}) + if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then + echo "You are already running the correct version of Salt for Security Onion." else - echo "Performing Upgrade from $INSTALLEDVERSION to $NEWVERSION" + SALTUPGRADED=True + echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION." + echo "" + # If CentOS + if [ "$OS" == "centos" ]; then + echo "Removing yum versionlock for Salt." + echo "" + yum versionlock delete "salt-*" + echo "Updating Salt packages and restarting services." + echo "" + sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION" + echo "Applying yum versionlock for Salt." + echo "" + yum versionlock add "salt-*" + # Else do Ubuntu things + elif [ "$OS" == "ubuntu" ]; then + echo "Removing apt hold for Salt." + echo "" + apt-mark unhold "salt-common" + apt-mark unhold "salt-master" + apt-mark unhold "salt-minion" + echo "Updating Salt packages and restarting services." + echo "" + sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION" + echo "Applying apt hold for Salt." + echo "" + apt-mark hold "salt-common" + apt-mark hold "salt-master" + apt-mark hold "salt-minion" + fi fi } @@ -167,41 +303,111 @@ verify_latest_update_script() { echo "This version of the soup script is up to date. Proceeding." else echo "You are not running the latest soup version. Updating soup." - cp $UPDATE_DIR/salt/common/tools/sbin/soup $default_salt_dir/salt/common/tools/sbin/ + cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/ salt-call state.apply common queue=True echo "" - echo "soup has been updated. Please run soup again" + echo "soup has been updated. Please run soup again." exit 0 fi } -echo "Checking to see if this is a manager" +main () { +while getopts ":b" opt; do + case "$opt" in + b ) # process option b + shift + BATCHSIZE=$1 + if ! [[ "$BATCHSIZE" =~ ^[0-9]+$ ]]; then + echo "Batch size must be a number greater than 0." + exit 1 + fi + ;; + \? ) echo "Usage: cmd [-b]" + ;; + esac +done + +echo "Checking to see if this is a manager." +echo "" manager_check -echo "Cloning latest code to a temporary location" +echo "Found that Security Onion $INSTALLEDVERSION is currently installed." +echo "" +detect_os +echo "" +echo "Cloning Security Onion github repo into $UPDATE_DIR." clone_to_tmp echo "" -echo "Verifying we have the latest script" +echo "Verifying we have the latest soup script." verify_latest_update_script echo "" -echo "Let's see if we need to update" + +echo "Let's see if we need to update Security Onion." upgrade_check + + echo "" -echo "Making pillar changes" +echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION." +echo "" +echo "Stopping Salt Minion service." +systemctl stop salt-minion +echo "" +echo "Stopping Salt Master service." +systemctl stop salt-master +echo "" +echo "Checking for Salt Master and Minion updates." +upgrade_check_salt + + +echo "Making pillar changes." pillar_changes echo "" -echo "Cleaning up old dockers" + +echo "Cleaning up old dockers." clean_dockers echo "" -echo "Updating docker to $NEWVERSION" +echo "Updating dockers to $NEWVERSION." update_dockers + echo "" -echo "Copying new code" +echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR." copy_new_files echo "" -echo "Updating version" update_version + echo "" -echo "Running a highstate to complete upgrade" +echo "Locking down Salt Master for upgrade" +masterlock + +echo "" +echo "Starting Salt Master service." +systemctl start salt-master + +echo "" +echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes." highstate echo "" echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete." + +echo "" +echo "Stopping Salt Master to remove ACL" +systemctl stop salt-master + +masterunlock + +echo "" +echo "Starting Salt Master service." +systemctl start salt-master +highstate +playbook + +SALTUPGRADED="True" +if [[ "$SALTUPGRADED" == "True" ]]; then + echo "" + echo "Upgrading Salt on the remaining Security Onion nodes from $INSTALLEDSALTVERSION to $NEWSALTVERSION." + salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion + echo "" +fi + +} + +main "$@" | tee /dev/fd/3 diff --git a/salt/curator/init.sls b/salt/curator/init.sls index 8873f401a..b98eaf6cb 100644 --- a/salt/curator/init.sls +++ b/salt/curator/init.sls @@ -1,5 +1,5 @@ -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% if grains['role'] in ['so-eval', 'so-node', 'so-managersearch', 'so-heavynode', 'so-standalone'] %} # Curator diff --git a/salt/deprecated-launcher/init.sls b/salt/deprecated-launcher/init.sls index 3ba9ad3a6..3805be5d7 100644 --- a/salt/deprecated-launcher/init.sls +++ b/salt/deprecated-launcher/init.sls @@ -1,4 +1,4 @@ -{%- set FLEETSETUP = salt['pillar.get']('static:fleetsetup', '0') -%} +{%- set FLEETSETUP = salt['pillar.get']('global:fleetsetup', '0') -%} {%- if FLEETSETUP != 0 %} launcherpkg: diff --git a/salt/domainstats/init.sls b/salt/domainstats/init.sls index 8d329c785..764435e5f 100644 --- a/salt/domainstats/init.sls +++ b/salt/domainstats/init.sls @@ -13,7 +13,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} # Create the group dstatsgroup: diff --git a/salt/elastalert/files/elastalert_config.yaml b/salt/elastalert/files/elastalert_config.yaml index 7646e8221..28d26bac0 100644 --- a/salt/elastalert/files/elastalert_config.yaml +++ b/salt/elastalert/files/elastalert_config.yaml @@ -16,12 +16,12 @@ disable_rules_on_error: false # How often ElastAlert will query Elasticsearch # The unit can be anything from weeks to seconds run_every: - minutes: 1 + minutes: 3 # ElastAlert will buffer results from the most recent # period of time, in case some log sources are not in real time buffer_time: - minutes: 1 + minutes: 10 # The maximum time between queries for ElastAlert to start at the most recently # run query. When ElastAlert starts, for each rule, it will search elastalert_metadata @@ -38,7 +38,7 @@ es_host: {{ esip }} es_port: {{ esport }} # Sets timeout for connecting to and reading from es_host -es_conn_timeout: 60 +es_conn_timeout: 55 # The maximum number of documents that will be downloaded from Elasticsearch in # a single query. The default is 10,000, and if you expect to get near this number, diff --git a/salt/elastalert/files/modules/so/playbook-es.py b/salt/elastalert/files/modules/so/playbook-es.py index c794bdf12..46d6c8f45 100644 --- a/salt/elastalert/files/modules/so/playbook-es.py +++ b/salt/elastalert/files/modules/so/playbook-es.py @@ -16,7 +16,7 @@ class PlaybookESAlerter(Alerter): today = strftime("%Y.%m.%d", gmtime()) timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S", gmtime()) headers = {"Content-Type": "application/json"} - payload = {"rule.name": self.rule['play_title'],"event.severity": self.rule['event.severity'],"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"event.module": self.rule['event.module'],"event.dataset": self.rule['event.dataset'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"rule.category": self.rule['rule.category'],"data": match, "@timestamp": timestamp} + payload = {"rule.name": self.rule['play_title'],"event.severity": self.rule['event.severity'],"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"event.module": self.rule['event.module'],"event.dataset": self.rule['event.dataset'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"rule.category": self.rule['rule.category'],"event_data": match, "@timestamp": timestamp} url = f"http://{self.rule['elasticsearch_host']}/so-playbook-alerts-{today}/_doc/" requests.post(url, data=json.dumps(payload), headers=headers, verify=False) diff --git a/salt/elastalert/files/rules/so/suricata_thehive.yaml b/salt/elastalert/files/rules/so/suricata_thehive.yaml index cd887c9f9..714d63d21 100644 --- a/salt/elastalert/files/rules/so/suricata_thehive.yaml +++ b/salt/elastalert/files/rules/so/suricata_thehive.yaml @@ -1,21 +1,17 @@ -{% set es = salt['pillar.get']('static:managerip', '') %} -{% set hivehost = salt['pillar.get']('static:managerip', '') %} -{% set hivekey = salt['pillar.get']('static:hivekey', '') %} -{% set MANAGER = salt['pillar.get']('manager:url_base', '') %} +{% set es = salt['pillar.get']('global:managerip', '') %} +{% set hivehost = salt['pillar.get']('global:managerip', '') %} +{% set hivekey = salt['pillar.get']('global:hivekey', '') %} +{% set MANAGER = salt['pillar.get']('global:url_base', '') %} # Elastalert rule to forward Suricata alerts from Security Onion to a specified TheHive instance. # es_host: {{es}} es_port: 9200 name: Suricata-Alert -type: frequency -index: "so-ids-*" -num_events: 1 -timeframe: - minutes: 10 +type: any +index: "*:so-ids-*" buffer_time: - minutes: 10 -allow_buffer_time_overlap: true + minutes: 5 query_key: ["rule.uuid","source.ip","destination.ip"] realert: days: 1 diff --git a/salt/elastalert/files/rules/so/wazuh_thehive.yaml b/salt/elastalert/files/rules/so/wazuh_thehive.yaml index ccb79e1e5..7e5c6e7c0 100644 --- a/salt/elastalert/files/rules/so/wazuh_thehive.yaml +++ b/salt/elastalert/files/rules/so/wazuh_thehive.yaml @@ -1,21 +1,17 @@ -{% set es = salt['pillar.get']('static:managerip', '') %} -{% set hivehost = salt['pillar.get']('static:managerip', '') %} -{% set hivekey = salt['pillar.get']('static:hivekey', '') %} -{% set MANAGER = salt['pillar.get']('manager:url_base', '') %} +{% set es = salt['pillar.get']('global:managerip', '') %} +{% set hivehost = salt['pillar.get']('global:managerip', '') %} +{% set hivekey = salt['pillar.get']('global:hivekey', '') %} +{% set MANAGER = salt['pillar.get']('global:url_base', '') %} # Elastalert rule to forward high level Wazuh alerts from Security Onion to a specified TheHive instance. # es_host: {{es}} es_port: 9200 name: Wazuh-Alert -type: frequency -index: "so-ossec-*" -num_events: 1 -timeframe: - minutes: 10 +type: any +index: "*:so-ossec-*" buffer_time: - minutes: 10 -allow_buffer_time_overlap: true + minutes: 5 realert: days: 1 filter: diff --git a/salt/elastalert/init.sls b/salt/elastalert/init.sls index 5703b8717..c6c3afb2f 100644 --- a/salt/elastalert/init.sls +++ b/salt/elastalert/init.sls @@ -12,8 +12,8 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %} diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml index 4d5d5b2e4..acad465d1 100644 --- a/salt/elasticsearch/files/elasticsearch.yml +++ b/salt/elasticsearch/files/elasticsearch.yml @@ -5,6 +5,7 @@ {%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:esclustername', '') %} {%- endif %} {%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} cluster.name: "{{ ESCLUSTERNAME }}" network.host: 0.0.0.0 @@ -16,12 +17,30 @@ discovery.zen.minimum_master_nodes: 1 path.logs: /var/log/elasticsearch action.destructive_requires_name: true transport.bind_host: 0.0.0.0 -transport.publish_host: {{ NODEIP }} +transport.publish_host: {{ grains.host }} transport.publish_port: 9300 cluster.routing.allocation.disk.threshold_enabled: true cluster.routing.allocation.disk.watermark.low: 95% cluster.routing.allocation.disk.watermark.high: 98% cluster.routing.allocation.disk.watermark.flood_stage: 98% +{%- if FEATURES is sameas true %} +#xpack.security.enabled: false +#xpack.security.http.ssl.enabled: false +#xpack.security.transport.ssl.enabled: false +#xpack.security.http.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key +#xpack.security.http.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt +#xpack.security.http.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt +#xpack.security.transport.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key +#xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt +#xpack.security.transport.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt +#xpack.security.transport.ssl.verification_mode: none +#xpack.security.http.ssl.client_authentication: none +#xpack.security.authc: +# anonymous: +# username: anonymous_user +# roles: superuser +# authz_exception: true +{%- endif %} node.attr.box_type: {{ NODE_ROUTE_TYPE }} node.name: {{ ESCLUSTERNAME }} script.max_compilations_rate: 1000/1m diff --git a/salt/elasticsearch/files/ingest/beats.common b/salt/elasticsearch/files/ingest/beats.common index cafbc9e94..4e358582e 100644 --- a/salt/elasticsearch/files/ingest/beats.common +++ b/salt/elasticsearch/files/ingest/beats.common @@ -1,53 +1,8 @@ { "description" : "beats.common", "processors" : [ - {"community_id": {"if": "ctx.winlog.event_data?.Protocol != null", "field":["winlog.event_data.SourceIp","winlog.event_data.SourcePort","winlog.event_data.DestinationIp","winlog.event_data.DestinationPort","winlog.event_data.Protocol"],"target_field":"network.community_id"}}, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "field": "event.module", "value": "sysmon", "override": true } }, - { "set": { "if": "ctx.winlog?.channel != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } }, - { "set": { "if": "ctx.agent?.type != null", "field": "module", "value": "{{agent.type}}", "override": true } }, - { "set": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "field": "event.dataset", "value": "{{winlog.channel}}", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.category", "value": "host,process,network", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.category", "value": "host,process", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.dataset", "value": "process_creation", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 2", "field": "event.dataset", "value": "process_changed_file", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.dataset", "value": "network_connection", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 5", "field": "event.dataset", "value": "process_terminated", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 6", "field": "event.dataset", "value": "driver_loaded", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 7", "field": "event.dataset", "value": "image_loaded", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 8", "field": "event.dataset", "value": "create_remote_thread", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 9", "field": "event.dataset", "value": "raw_file_access_read", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 10", "field": "event.dataset", "value": "process_access", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 11", "field": "event.dataset", "value": "file_create", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 12", "field": "event.dataset", "value": "registry_create_delete", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 13", "field": "event.dataset", "value": "registry_value_set", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 14", "field": "event.dataset", "value": "registry_key_value_rename", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 15", "field": "event.dataset", "value": "file_create_stream_hash", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 16", "field": "event.dataset", "value": "config_change", "override": true } }, - { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 22", "field": "event.dataset", "value": "dns_query", "override": true } }, - { "rename": { "field": "agent.hostname", "target_field": "agent.name", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.DestinationHostname", "target_field": "destination.hostname", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.DestinationIp", "target_field": "destination.ip", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.DestinationPort", "target_field": "destination.port", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.Image", "target_field": "process.executable", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.ProcessID", "target_field": "process.pid", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.ProcessGuid", "target_field": "process.entity_id", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.CommandLine", "target_field": "process.command_line", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.CurrentDirectory", "target_field": "process.working_directory", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.Description", "target_field": "process.pe.description", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.Product", "target_field": "process.pe.product", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.OriginalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.FileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.ParentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.ParentImage", "target_field": "process.parent.executable", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.ParentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.ParentProcessId", "target_field": "process.ppid", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.Protocol", "target_field": "network.transport", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.SourceHostname", "target_field": "source.hostname", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.SourceIp", "target_field": "source.ip", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.SourcePort", "target_field": "source.port", "ignore_missing": true } }, - { "rename": { "field": "winlog.event_data.targetFilename", "target_field": "file.target", "ignore_missing": true } }, + { "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } }, + { "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } }, { "pipeline": { "name": "common" } } ] } \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/common b/salt/elasticsearch/files/ingest/common index 9db5a039b..01d18529b 100644 --- a/salt/elasticsearch/files/ingest/common +++ b/salt/elasticsearch/files/ingest/common @@ -42,13 +42,14 @@ { "set": { "if": "ctx.event?.severity == 3", "field": "event.severity_label", "value": "high", "override": true } }, { "set": { "if": "ctx.event?.severity == 4", "field": "event.severity_label", "value": "critical", "override": true } }, { "rename": { "field": "module", "target_field": "event.module", "ignore_failure": true, "ignore_missing": true } }, - { "rename": { "field": "dataset", "target_field": "event.dataset", "ignore_failure": true, "ignore_missing": true } }, - { "rename": { "field": "category", "target_field": "event.category", "ignore_missing": true } }, + { "rename": { "field": "dataset", "target_field": "event.dataset", "ignore_failure": true, "ignore_missing": true } }, + { "rename": { "field": "category", "target_field": "event.category", "ignore_failure": true, "ignore_missing": true } }, { "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_failure": true, "ignore_missing": true } }, { "lowercase": { "field": "event.dataset", "ignore_failure": true, "ignore_missing": true } }, { "convert": { "field": "destination.port", "type": "integer", "ignore_failure": true, "ignore_missing": true } }, { "convert": { "field": "source.port", "type": "integer", "ignore_failure": true, "ignore_missing": true } }, { "convert": { "field": "log.id.uid", "type": "string", "ignore_failure": true, "ignore_missing": true } }, + { "convert": { "field": "agent.id", "type": "string", "ignore_failure": true, "ignore_missing": true } }, { "convert": { "field": "event.severity", "type": "integer", "ignore_failure": true, "ignore_missing": true } }, { "remove": { diff --git a/salt/elasticsearch/files/ingest/osquery.query_result b/salt/elasticsearch/files/ingest/osquery.query_result index 80ed32d73..3a6ed15a3 100644 --- a/salt/elasticsearch/files/ingest/osquery.query_result +++ b/salt/elasticsearch/files/ingest/osquery.query_result @@ -2,78 +2,24 @@ "description" : "osquery", "processors" : [ { "json": { "field": "message", "target_field": "message2", "ignore_failure": true } }, + { "gsub": { "field": "message2.columns.data", "pattern": "\\\\xC2\\\\xAE", "replacement": "", "ignore_missing": true } }, - { "json": { "field": "message2.columns.data", "target_field": "message2.columns.winlog", "ignore_failure": true } }, + { "rename": { "if": "ctx.message2.columns?.eventid != null", "field": "message2.columns", "target_field": "winlog", "ignore_missing": true } }, + { "json": { "field": "winlog.data", "target_field": "temp", "ignore_failure": true } }, + { "rename": { "field": "temp.Data", "target_field": "winlog.event_data", "ignore_missing": true } }, + { "rename": { "field": "winlog.source", "target_field": "winlog.channel", "ignore_missing": true } }, + { "rename": { "field": "winlog.eventid", "target_field": "winlog.event_id", "ignore_missing": true } }, + { "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } }, + { "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } }, + { "script": { "lang": "painless", "source": "def dict = ['result': new HashMap()]; for (entry in ctx['message2'].entrySet()) { dict['result'][entry.getKey()] = entry.getValue(); } ctx['osquery'] = dict; " } }, - { "rename": { "field": "osquery.result.hostIdentifier", "target_field": "osquery.result.host_identifier", "ignore_missing": true } }, - { "rename": { "field": "osquery.result.calendarTime", "target_field": "osquery.result.calendar_time", "ignore_missing": true } }, - { "rename": { "field": "osquery.result.unixTime", "target_field": "osquery.result.unix_time", "ignore_missing": true } }, - { "json": { "field": "message", "target_field": "message3", "ignore_failure": true } }, - { "gsub": { "field": "message3.columns.data", "pattern": "\\\\xC2\\\\xAE", "replacement": "", "ignore_missing": true } }, - { "json": { "field": "message3.columns.data", "target_field": "message3.columns.winlog", "ignore_failure": true } }, - { "rename": { "field": "message3.columns.username", "target_field": "user.name", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.uid", "target_field": "user.uid", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.gid", "target_field": "user.gid", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.shell", "target_field": "user.shell", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.cmdline", "target_field": "process.command_line", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.pid", "target_field": "process.pid", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.parent", "target_field": "process.ppid", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.cwd", "target_field": "process.working_directory", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.community_id", "target_field": "network.community_id", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.local_address", "target_field": "local.ip", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.local_port", "target_field": "local.port", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.remote_address", "target_field": "remote.ip", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.remote_port", "target_field": "remote.port", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.process_name", "target_field": "process.name", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.eventid", "target_field": "event.code", "ignore_missing": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational'", "field": "event.module", "value": "sysmon", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } }, - { "set": { "if": "ctx.message3.columns?.source != 'Microsoft-Windows-Sysmon/Operational'", "field": "event.dataset", "value": "{{message3.columns.source}}", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.dataset", "value": "process_creation", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 2", "field": "event.dataset", "value": "process_changed_file", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.dataset", "value": "network_connection", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 5", "field": "event.dataset", "value": "process_terminated", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 6", "field": "event.dataset", "value": "driver_loaded", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 7", "field": "event.dataset", "value": "image_loaded", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 8", "field": "event.dataset", "value": "create_remote_thread", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 9", "field": "event.dataset", "value": "raw_file_access_read", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 10", "field": "event.dataset", "value": "process_access", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 11", "field": "event.dataset", "value": "file_create", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 12", "field": "event.dataset", "value": "registry_create_delete", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 13", "field": "event.dataset", "value": "registry_value_set", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 14", "field": "event.dataset", "value": "registry_key_value_rename", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 15", "field": "event.dataset", "value": "file_create_stream_hash", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 16", "field": "event.dataset", "value": "config_change", "override": true } }, - { "set": { "if": "ctx.message3.columns?.source == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 22", "field": "event.dataset", "value": "dns_query", "override": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.SubjectUserName", "target_field": "user.name", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.destinationHostname", "target_field": "destination.hostname", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.destinationIp", "target_field": "destination.ip", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.destinationPort", "target_field": "destination.port", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.Image", "target_field": "process.executable", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.ProcessID", "target_field": "process.pid", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.ProcessGuid", "target_field": "process.entity_id", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.CommandLine", "target_field": "process.command_line", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.CurrentDirectory", "target_field": "process.working_directory", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.Description", "target_field": "process.pe.description", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.Product", "target_field": "process.pe.product", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.OriginalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.FileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.ParentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.ParentImage", "target_field": "process.parent.executable", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.ParentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.ParentProcessId", "target_field": "process.ppid", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.User", "target_field": "user.name", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.parentImage", "target_field": "parent_image_path", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.sourceHostname", "target_field": "source.hostname", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.sourceIp", "target_field": "source_ip", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.sourcePort", "target_field": "source.port", "ignore_missing": true } }, - { "rename": { "field": "message3.columns.winlog.EventData.targetFilename", "target_field": "file.target", "ignore_missing": true } }, - { "remove": { "field": [ "message3"], "ignore_failure": false } }, + { "set": { "field": "event.module", "value": "osquery", "override": false } }, + { "set": { "field": "event.dataset", "value": "{{osquery.result.name}}", "override": false} }, { "pipeline": { "name": "common" } } ] } \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/sysmon b/salt/elasticsearch/files/ingest/sysmon new file mode 100644 index 000000000..5fe46b3a5 --- /dev/null +++ b/salt/elasticsearch/files/ingest/sysmon @@ -0,0 +1,54 @@ +{ + "description" : "sysmon", + "processors" : [ + {"community_id": {"if": "ctx.winlog.event_data?.Protocol != null", "field":["winlog.event_data.SourceIp","winlog.event_data.SourcePort","winlog.event_data.DestinationIp","winlog.event_data.DestinationPort","winlog.event_data.Protocol"],"target_field":"network.community_id"}}, + { "set": { "field": "event.code", "value": "{{winlog.event_id}}", "override": true } }, + { "set": { "field": "event.module", "value": "sysmon", "override": true } }, + { "set": { "if": "ctx.winlog?.computer_name != null", "field": "observer.name", "value": "{{winlog.computer_name}}", "override": true } }, + { "set": { "if": "ctx.event?.code == '3'", "field": "event.category", "value": "host,process,network", "override": true } }, + { "set": { "if": "ctx.event?.code == '1'", "field": "event.category", "value": "host,process", "override": true } }, + { "set": { "if": "ctx.event?.code == '5'", "field": "event.category", "value": "host,process", "override": true } }, + { "set": { "if": "ctx.event?.code == '6'", "field": "event.category", "value": "host,driver", "override": true } }, + { "set": { "if": "ctx.event?.code == '22'", "field": "event.category", "value": "network", "override": true } }, + { "set": { "if": "ctx.event?.code == '1'", "field": "event.dataset", "value": "process_creation", "override": true } }, + { "set": { "if": "ctx.event?.code == '2'", "field": "event.dataset", "value": "process_changed_file", "override": true } }, + { "set": { "if": "ctx.event?.code == '3'", "field": "event.dataset", "value": "network_connection", "override": true } }, + { "set": { "if": "ctx.event?.code == '5'", "field": "event.dataset", "value": "process_terminated", "override": true } }, + { "set": { "if": "ctx.event?.code == '6'", "field": "event.dataset", "value": "driver_loaded", "override": true } }, + { "set": { "if": "ctx.event?.code == '7'", "field": "event.dataset", "value": "image_loaded", "override": true } }, + { "set": { "if": "ctx.event?.code == '8'", "field": "event.dataset", "value": "create_remote_thread", "override": true } }, + { "set": { "if": "ctx.event?.code == '9'", "field": "event.dataset", "value": "raw_file_access_read", "override": true } }, + { "set": { "if": "ctx.event?.code == '10'", "field": "event.dataset", "value": "process_access", "override": true } }, + { "set": { "if": "ctx.event?.code == '11'", "field": "event.dataset", "value": "file_create", "override": true } }, + { "set": { "if": "ctx.event?.code == '12'", "field": "event.dataset", "value": "registry_create_delete", "override": true } }, + { "set": { "if": "ctx.event?.code == '13'", "field": "event.dataset", "value": "registry_value_set", "override": true } }, + { "set": { "if": "ctx.event?.code == '14'", "field": "event.dataset", "value": "registry_key_value_rename", "override": true } }, + { "set": { "if": "ctx.event?.code == '15'", "field": "event.dataset", "value": "file_create_stream_hash", "override": true } }, + { "set": { "if": "ctx.event?.code == '16'", "field": "event.dataset", "value": "config_change", "override": true } }, + { "set": { "if": "ctx.event?.code == '22'", "field": "event.dataset", "value": "dns_query", "override": true } }, + { "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.DestinationHostname", "target_field": "destination.hostname", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.DestinationIp", "target_field": "destination.ip", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.DestinationPort", "target_field": "destination.port", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.Image", "target_field": "process.executable", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.ProcessID", "target_field": "process.pid", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.ProcessGuid", "target_field": "process.entity_id", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.CommandLine", "target_field": "process.command_line", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.CurrentDirectory", "target_field": "process.working_directory", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.Description", "target_field": "process.pe.description", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.Product", "target_field": "process.pe.product", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.Company", "target_field": "process.pe.company", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.OriginalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.FileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.ParentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.ParentImage", "target_field": "process.parent.executable", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.ParentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.ParentProcessId", "target_field": "process.ppid", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.Protocol", "target_field": "network.transport", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.SourceHostname", "target_field": "source.hostname", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.SourceIp", "target_field": "source.ip", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.SourcePort", "target_field": "source.port", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.targetFilename", "target_field": "file.target", "ignore_missing": true } } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/win.eventlogs b/salt/elasticsearch/files/ingest/win.eventlogs new file mode 100644 index 000000000..0a128aae9 --- /dev/null +++ b/salt/elasticsearch/files/ingest/win.eventlogs @@ -0,0 +1,10 @@ +{ + "description" : "win.eventlogs", + "processors" : [ + { "set": { "if": "ctx.winlog?.channel != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } }, + { "set": { "if": "ctx.winlog?.channel != null", "field": "event.dataset", "value": "{{winlog.channel}}", "override": true } }, + { "set": { "if": "ctx.winlog?.computer_name != null", "field": "observer.name", "value": "{{winlog.computer_name}}", "override": true } }, + { "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } }, + { "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/scripts/so-catrust b/salt/elasticsearch/files/scripts/so-catrust new file mode 100644 index 000000000..aee83a379 --- /dev/null +++ b/salt/elasticsearch/files/scripts/so-catrust @@ -0,0 +1,32 @@ +#!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +{%- set VERSION = salt['pillar.get']('global:soversion', '') %} +{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} +{%- set MANAGER = salt['grains.get']('master') %} +. /usr/sbin/so-common +# Check to see if we have extracted the ca cert. +if [ ! -f /opt/so/saltstack/local/salt/common/cacerts ]; then + docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }} -keystore /etc/pki/ca-trust/extracted/java/cacerts -alias SOSCA -import -file /etc/pki/ca.crt -storepass changeit -noprompt + docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/java/cacerts /opt/so/saltstack/local/salt/common/cacerts + docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/common/tls-ca-bundle.pem + docker rm so-elasticsearchca + echo "" >> /opt/so/saltstack/local/salt/common/tls-ca-bundle.pem + echo "sosca" >> /opt/so/saltstack/local/salt/common/tls-ca-bundle.pem + cat /etc/pki/ca.crt >> /opt/so/saltstack/local/salt/common/tls-ca-bundle.pem +else + exit 0 +fi \ No newline at end of file diff --git a/salt/elasticsearch/files/sotls.yml b/salt/elasticsearch/files/sotls.yml new file mode 100644 index 000000000..c676f4a56 --- /dev/null +++ b/salt/elasticsearch/files/sotls.yml @@ -0,0 +1,12 @@ +keystore.path: /usr/share/elasticsearch/config/sokeys +keystore.password: changeit +keystore.algorithm: SunX509 +truststore.path: /etc/pki/java/cacerts +truststore.password: changeit +truststore.algorithm: PKIX +protocols: +- TLSv1.2 +ciphers: +- TLS_RSA_WITH_AES_128_CBC_SHA256 +transport.encrypted: true +http.encrypted: false diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 909d30152..cc2d91537 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -12,23 +12,27 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %} +{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%} -{% if FEATURES %} - {% set FEATURES = "-features" %} + +{%- if FEATURES is sameas true %} + {% set FEATUREZ = "-features" %} {% else %} - {% set FEATURES = '' %} + {% set FEATUREZ = '' %} {% endif %} -{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %} +{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone', 'so-import'] %} {% set esclustername = salt['pillar.get']('manager:esclustername', '') %} {% set esheap = salt['pillar.get']('manager:esheap', '') %} + {% set ismanager = True %} {% elif grains['role'] in ['so-node','so-heavynode'] %} {% set esclustername = salt['pillar.get']('elasticsearch:esclustername', '') %} {% set esheap = salt['pillar.get']('elasticsearch:esheap', '') %} + {% set ismanager = False %} {% endif %} {% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %} @@ -37,6 +41,46 @@ vm.max_map_count: sysctl.present: - value: 262144 +{% if ismanager %} +# We have to add the Manager CA to the CA list +cascriptsync: + file.managed: + - name: /usr/sbin/so-catrust + - source: salt://elasticsearch/files/scripts/so-catrust + - user: 939 + - group: 939 + - mode: 750 + - template: jinja + +# Run the CA magic +cascriptfun: + cmd.run: + - name: /usr/sbin/so-catrust + +{% endif %} + +# Move our new CA over so Elastic and Logstash can use SSL with the internal CA +catrustdir: + file.directory: + - name: /opt/so/conf/ca + - user: 939 + - group: 939 + - makedirs: True + +cacertz: + file.managed: + - name: /opt/so/conf/ca/cacerts + - source: salt://common/cacerts + - user: 939 + - group: 939 + +capemz: + file.managed: + - name: /opt/so/conf/ca/tls-ca-bundle.pem + - source: salt://common/tls-ca-bundle.pem + - user: 939 + - group: 939 + # Add ES Group elasticsearchgroup: group.present: @@ -95,6 +139,13 @@ esyml: - group: 939 - template: jinja +sotls: + file.managed: + - name: /opt/so/conf/elasticsearch/sotls.yml + - source: salt://elasticsearch/files/sotls.yml + - user: 930 + - group: 939 + #sync templates to /opt/so/conf/elasticsearch/templates {% for TEMPLATE in TEMPLATES %} es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}: @@ -126,18 +177,23 @@ eslogdir: so-elasticsearch: docker_container.running: - - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }}{{ FEATURES }} + - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }}{{ FEATUREZ }} - hostname: elasticsearch - name: so-elasticsearch - user: elasticsearch + - extra_hosts: + - {{ grains.host }}:{{ NODEIP }} + {%- if ismanager %} + {%- if salt['pillar.get']('nodestab', {}) %} + {%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} + - {{ SN.split('_')|first }}:{{ SNDATA.ip }} + {%- endfor %} + {%- endif %} + {%- endif %} - environment: - discovery.type=single-node - #- bootstrap.memory_lock=true - #- cluster.name={{ esclustername }} - ES_JAVA_OPTS=-Xms{{ esheap }} -Xmx{{ esheap }} - #- http.host=0.0.0.0 - #- transport.host=127.0.0.1 - - ulimits: + ulimits: - memlock=-1:-1 - nofile=65536:65536 - nproc=4096 @@ -149,6 +205,16 @@ so-elasticsearch: - /opt/so/conf/elasticsearch/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro - /nsm/elasticsearch:/usr/share/elasticsearch/data:rw - /opt/so/log/elasticsearch:/var/log/elasticsearch:rw + - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro + - /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro + - /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro + - /opt/so/conf/elasticsearch/sotls.yml:/usr/share/elasticsearch/config/sotls.yml:ro + + - watch: + - file: cacertz + - file: esyml + - file: esingestconf + - file: so-elasticsearch-pipelines-file so-elasticsearch-pipelines-file: file.managed: diff --git a/salt/elasticsearch/templates/so/so-common-template.json b/salt/elasticsearch/templates/so/so-common-template.json index c0167198d..745abbc28 100644 --- a/salt/elasticsearch/templates/so/so-common-template.json +++ b/salt/elasticsearch/templates/so/so-common-template.json @@ -18,7 +18,7 @@ "@version":{ "type":"keyword" }, - "osquery":{ + "osquery":{ "type":"object", "dynamic":true }, @@ -85,7 +85,7 @@ "type":"object", "dynamic": true }, - "client":{ + "client":{ "type":"object", "dynamic": true }, @@ -177,6 +177,10 @@ "type":"object", "dynamic": true }, + "import":{ + "type":"object", + "dynamic": true + }, "ingest":{ "type":"object", "dynamic": true @@ -185,7 +189,7 @@ "type":"object", "dynamic": true }, - "irc":{ + "irc":{ "type":"object", "dynamic": true }, @@ -201,7 +205,7 @@ "type":"object", "dynamic": true }, - "message":{ + "message":{ "type":"text", "fields":{ "keyword":{ @@ -213,7 +217,7 @@ "type":"object", "dynamic": true }, - "mysql":{ + "mysql":{ "type":"object", "dynamic": true }, @@ -221,7 +225,7 @@ "type":"object", "dynamic": true }, - "notice":{ + "notice":{ "type":"object", "dynamic": true }, @@ -269,7 +273,7 @@ "type":"object", "dynamic": true }, - "request":{ + "request":{ "type":"object", "dynamic": true }, @@ -281,7 +285,7 @@ "type":"object", "dynamic": true }, - "scan":{ + "scan":{ "type":"object", "dynamic": true }, @@ -317,7 +321,7 @@ "type":"object", "dynamic": true }, - "source":{ + "source":{ "type":"object", "dynamic": true }, @@ -329,7 +333,7 @@ "type":"object", "dynamic": true }, - "syslog":{ + "syslog":{ "type":"object", "dynamic": true }, @@ -383,8 +387,16 @@ }, "winlog":{ "type":"object", - "dynamic": true - }, + "dynamic": true, + "properties":{ + "event_id":{ + "type":"long" + }, + "event_data":{ + "type":"object" + } + } + }, "x509":{ "type":"object", "dynamic": true diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml index 825ffaf64..6849b1c08 100644 --- a/salt/filebeat/etc/filebeat.yml +++ b/salt/filebeat/etc/filebeat.yml @@ -1,16 +1,16 @@ {%- if grains.role == 'so-heavynode' %} -{%- set MANAGER = salt['pillar.get']('sensor:mainip' '') %} +{%- set MANAGER = salt['grains.get']('host' '') %} {%- else %} {%- set MANAGER = salt['grains.get']('master') %} {%- endif %} {%- set HOSTNAME = salt['grains.get']('host', '') %} -{%- set ZEEKVER = salt['pillar.get']('static:zeekversion', 'COMMUNITY') %} -{%- set WAZUHENABLED = salt['pillar.get']('static:wazuh', '0') %} +{%- set ZEEKVER = salt['pillar.get']('global:zeekversion', 'COMMUNITY') %} +{%- set WAZUHENABLED = salt['pillar.get']('global:wazuh', '0') %} {%- set STRELKAENABLED = salt['pillar.get']('strelka:enabled', '0') %} -{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%} -{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%} +{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%} +{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%} name: {{ HOSTNAME }} @@ -74,7 +74,7 @@ filebeat.modules: # List of prospectors to fetch data. filebeat.inputs: #------------------------------ Log prospector -------------------------------- -{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" or grains['role'] == "so-helix" or grains['role'] == "so-heavynode" or grains['role'] == "so-standalone" %} +{%- if grains['role'] in ['so-sensor', "so-eval", "so-helix", "so-heavynode", "so-standalone", "so-import"] %} - type: udp enabled: true host: "0.0.0.0:514" @@ -253,7 +253,7 @@ output.{{ type }}: {%- endfor %} {%- else %} #----------------------------- Elasticsearch/Logstash output --------------------------------- - {%- if grains['role'] == "so-eval" %} + {%- if grains['role'] in ["so-eval", "so-import"] %} output.elasticsearch: enabled: true hosts: ["{{ MANAGER }}:9200"] diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls index 6889b892f..ee7c5ae10 100644 --- a/salt/filebeat/init.sls +++ b/salt/filebeat/init.sls @@ -11,12 +11,12 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} -{% set MANAGERIP = salt['pillar.get']('static:managerip', '') %} +{% set MANAGERIP = salt['pillar.get']('global:managerip', '') %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %} -{% if FEATURES %} +{%- if FEATURES is sameas true %} {% set FEATURES = "-features" %} {% else %} {% set FEATURES = '' %} @@ -60,8 +60,8 @@ so-filebeat: - /nsm:/nsm:ro - /opt/so/log/filebeat:/usr/share/filebeat/logs:rw - /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro - - /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro - - /opt/so/wazuh/logs/archives:/wazuh/archives:ro + - /nsm/wazuh/logs/alerts:/wazuh/alerts:ro + - /nsm/wazuh/logs/archives:/wazuh/archives:ro - /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro - /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro - /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro diff --git a/salt/firewall/assigned_hostgroups.map.yaml b/salt/firewall/assigned_hostgroups.map.yaml index 2500c604a..f7f87eb5f 100644 --- a/salt/firewall/assigned_hostgroups.map.yaml +++ b/salt/firewall/assigned_hostgroups.map.yaml @@ -15,6 +15,7 @@ role: - {{ portgroups.mysql }} - {{ portgroups.kibana }} - {{ portgroups.redis }} + - {{ portgroups.minio }} - {{ portgroups.influxdb }} - {{ portgroups.fleet_api }} - {{ portgroups.cortex }} @@ -38,6 +39,7 @@ role: search_node: portgroups: - {{ portgroups.redis }} + - {{ portgroups.minio }} - {{ portgroups.elasticsearch_node }} self: portgroups: @@ -99,6 +101,7 @@ role: - {{ portgroups.mysql }} - {{ portgroups.kibana }} - {{ portgroups.redis }} + - {{ portgroups.minio }} - {{ portgroups.influxdb }} - {{ portgroups.fleet_api }} - {{ portgroups.cortex }} @@ -122,6 +125,7 @@ role: search_node: portgroups: - {{ portgroups.redis }} + - {{ portgroups.minio }} - {{ portgroups.elasticsearch_node }} self: portgroups: @@ -180,6 +184,7 @@ role: - {{ portgroups.mysql }} - {{ portgroups.kibana }} - {{ portgroups.redis }} + - {{ portgroups.minio }} - {{ portgroups.influxdb }} - {{ portgroups.fleet_api }} - {{ portgroups.cortex }} @@ -203,6 +208,7 @@ role: search_node: portgroups: - {{ portgroups.redis }} + - {{ portgroups.minio }} - {{ portgroups.elasticsearch_node }} self: portgroups: @@ -261,6 +267,7 @@ role: - {{ portgroups.mysql }} - {{ portgroups.kibana }} - {{ portgroups.redis }} + - {{ portgroups.minio }} - {{ portgroups.influxdb }} - {{ portgroups.fleet_api }} - {{ portgroups.cortex }} @@ -284,6 +291,7 @@ role: search_node: portgroups: - {{ portgroups.redis }} + - {{ portgroups.minio }} - {{ portgroups.elasticsearch_node }} self: portgroups: @@ -434,16 +442,24 @@ role: chain: DOCKER-USER: hostgroups: - self: + manager: portgroups: - - {{ portgroups.redis }} - - {{ portgroups.beats_5044 }} - - {{ portgroups.beats_5644 }} + - {{ portgroups.elasticsearch_node }} + dockernet: + portgroups: + - {{ portgroups.elasticsearch_node }} + - {{ portgroups.elasticsearch_rest }} + elasticsearch_rest: + portgroups: + - {{ portgroups.elasticsearch_rest }} INPUT: hostgroups: anywhere: portgroups: - {{ portgroups.ssh }} + dockernet: + portgroups: + - {{ portgroups.all }} localhost: portgroups: - {{ portgroups.all }} @@ -480,3 +496,55 @@ role: localhost: portgroups: - {{ portgroups.all }} + import: + chain: + DOCKER-USER: + hostgroups: + manager: + portgroups: + - {{ portgroups.kibana }} + - {{ portgroups.redis }} + - {{ portgroups.influxdb }} + - {{ portgroups.elasticsearch_rest }} + - {{ portgroups.elasticsearch_node }} + minion: + portgroups: + - {{ portgroups.docker_registry }} + sensor: + portgroups: + - {{ portgroups.beats_5044 }} + - {{ portgroups.beats_5644 }} + - {{ portgroups.sensoroni }} + search_node: + portgroups: + - {{ portgroups.redis }} + - {{ portgroups.elasticsearch_node }} + self: + portgroups: + - {{ portgroups.syslog}} + beats_endpoint: + portgroups: + - {{ portgroups.beats_5044 }} + beats_endpoint_ssl: + portgroups: + - {{ portgroups.beats_5644 }} + elasticsearch_rest: + portgroups: + - {{ portgroups.elasticsearch_rest }} + analyst: + portgroups: + - {{ portgroups.nginx }} + INPUT: + hostgroups: + anywhere: + portgroups: + - {{ portgroups.ssh }} + dockernet: + portgroups: + - {{ portgroups.all }} + localhost: + portgroups: + - {{ portgroups.all }} + minion: + portgroups: + - {{ portgroups.salt_manager }} \ No newline at end of file diff --git a/salt/firewall/portgroups.yaml b/salt/firewall/portgroups.yaml index b8d86f253..8771df8ef 100644 --- a/salt/firewall/portgroups.yaml +++ b/salt/firewall/portgroups.yaml @@ -45,6 +45,9 @@ firewall: kibana: tcp: - 5601 + minio: + tcp: + - 9595 mysql: tcp: - 3306 @@ -61,6 +64,7 @@ firewall: redis: tcp: - 6379 + - 9696 salt_manager: tcp: - 4505 diff --git a/salt/fleet/event_gen-packages.sls b/salt/fleet/event_gen-packages.sls index 24b013704..7506763dd 100644 --- a/salt/fleet/event_gen-packages.sls +++ b/salt/fleet/event_gen-packages.sls @@ -1,17 +1,17 @@ {% set MANAGER = salt['grains.get']('master') %} {% set ENROLLSECRET = salt['pillar.get']('secrets:fleet_enroll-secret') %} -{% set CURRENTPACKAGEVERSION = salt['pillar.get']('static:fleet_packages-version') %} -{% set VERSION = salt['pillar.get']('static:soversion') %} -{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} -{%- set FLEETNODE = salt['pillar.get']('static:fleet_node') -%} +{% set CURRENTPACKAGEVERSION = salt['pillar.get']('global:fleet_packages-version') %} +{% set VERSION = salt['pillar.get']('global:soversion') %} +{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} +{%- set FLEETNODE = salt['pillar.get']('global:fleet_node') -%} {% if CUSTOM_FLEET_HOSTNAME != None and CUSTOM_FLEET_HOSTNAME != '' %} {% set HOSTNAME = CUSTOM_FLEET_HOSTNAME %} {% elif FLEETNODE %} {% set HOSTNAME = grains.host %} {% else %} - {% set HOSTNAME = salt['pillar.get']('manager:url_base') %} + {% set HOSTNAME = salt['pillar.get']('global:url_base') %} {% endif %} so/fleet: diff --git a/salt/fleet/event_update-custom-hostname.sls b/salt/fleet/event_update-custom-hostname.sls index 9278862ed..b404b2828 100644 --- a/salt/fleet/event_update-custom-hostname.sls +++ b/salt/fleet/event_update-custom-hostname.sls @@ -1,4 +1,4 @@ -{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %} +{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %} so/fleet: event.send: diff --git a/salt/fleet/files/packs/osquery-config.conf b/salt/fleet/files/packs/osquery-config.conf index 2558efd88..4ce82cb8d 100644 --- a/salt/fleet/files/packs/osquery-config.conf +++ b/salt/fleet/files/packs/osquery-config.conf @@ -22,6 +22,8 @@ spec: distributed_tls_max_attempts: 3 distributed_tls_read_endpoint: /api/v1/osquery/distributed/read distributed_tls_write_endpoint: /api/v1/osquery/distributed/write + enable_windows_events_publisher: true + enable_windows_events_subscriber: true logger_plugin: tls logger_tls_endpoint: /api/v1/osquery/log logger_tls_period: 10 diff --git a/salt/fleet/init.sls b/salt/fleet/init.sls index 0b402a54b..b2a3bb516 100644 --- a/salt/fleet/init.sls +++ b/salt/fleet/init.sls @@ -1,8 +1,8 @@ {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%} {%- set FLEETPASS = salt['pillar.get']('secrets:fleet', None) -%} {%- set FLEETJWT = salt['pillar.get']('secrets:fleet_jwt', None) -%} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set FLEETARCH = salt['grains.get']('role') %} @@ -10,7 +10,7 @@ {% set MAININT = salt['pillar.get']('host:mainint') %} {% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %} {% else %} - {% set MAINIP = salt['pillar.get']('static:managerip') %} + {% set MAINIP = salt['pillar.get']('global:managerip') %} {% endif %} include: diff --git a/salt/fleet/install_package.sls b/salt/fleet/install_package.sls index d09de540c..9063464d8 100644 --- a/salt/fleet/install_package.sls +++ b/salt/fleet/install_package.sls @@ -1,8 +1,8 @@ -{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%} -{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%} -{%- set FLEETHOSTNAME = salt['pillar.get']('static:fleet_hostname', False) -%} -{%- set FLEETIP = salt['pillar.get']('static:fleet_ip', False) -%} -{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %} +{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%} +{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%} +{%- set FLEETHOSTNAME = salt['pillar.get']('global:fleet_hostname', False) -%} +{%- set FLEETIP = salt['pillar.get']('global:fleet_ip', False) -%} +{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %} {% if CUSTOM_FLEET_HOSTNAME != (None and '') %} diff --git a/salt/freqserver/init.sls b/salt/freqserver/init.sls index 08661f3da..f48b66cff 100644 --- a/salt/freqserver/init.sls +++ b/salt/freqserver/init.sls @@ -13,7 +13,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} # Create the user fservergroup: diff --git a/salt/grafana/etc/datasources/influxdb.yaml b/salt/grafana/etc/datasources/influxdb.yaml index c70fd7137..a10bed981 100644 --- a/salt/grafana/etc/datasources/influxdb.yaml +++ b/salt/grafana/etc/datasources/influxdb.yaml @@ -1,4 +1,4 @@ -{%- set MANAGER = salt['pillar.get']('static:managerip', '') %} +{%- set MANAGER = salt['pillar.get']('global:managerip', '') %} apiVersion: 1 deleteDatasources: diff --git a/salt/grafana/init.sls b/salt/grafana/init.sls index e63c9a9c4..9fdd26b12 100644 --- a/salt/grafana/init.sls +++ b/salt/grafana/init.sls @@ -1,7 +1,7 @@ {% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %} {% set MANAGER = salt['grains.get']('master') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %} @@ -91,7 +91,6 @@ dashboard-manager: - defaults: SERVERNAME: {{ SN }} MANINT: {{ SNDATA.manint }} - MONINT: {{ SNDATA.manint }} CPUS: {{ SNDATA.totalcpus }} UID: so_overview ROOTFS: {{ SNDATA.rootfs }} @@ -114,7 +113,6 @@ dashboard-managersearch: - defaults: SERVERNAME: {{ SN }} MANINT: {{ SNDATA.manint }} - MONINT: {{ SNDATA.manint }} CPUS: {{ SNDATA.totalcpus }} UID: so_overview ROOTFS: {{ SNDATA.rootfs }} @@ -137,7 +135,7 @@ dashboard-standalone: - defaults: SERVERNAME: {{ SN }} MANINT: {{ SNDATA.manint }} - MONINT: {{ SNDATA.manint }} + MONINT: {{ SNDATA.monint }} CPUS: {{ SNDATA.totalcpus }} UID: so_overview ROOTFS: {{ SNDATA.rootfs }} @@ -159,8 +157,8 @@ dashboard-{{ SN }}: - source: salt://grafana/dashboards/sensor_nodes/sensor.json - defaults: SERVERNAME: {{ SN }} - MONINT: {{ SNDATA.monint }} MANINT: {{ SNDATA.manint }} + MONINT: {{ SNDATA.monint }} CPUS: {{ SNDATA.totalcpus }} UID: {{ SNDATA.guid }} ROOTFS: {{ SNDATA.rootfs }} @@ -183,7 +181,6 @@ dashboardsearch-{{ SN }}: - defaults: SERVERNAME: {{ SN }} MANINT: {{ SNDATA.manint }} - MONINT: {{ SNDATA.manint }} CPUS: {{ SNDATA.totalcpus }} UID: {{ SNDATA.guid }} ROOTFS: {{ SNDATA.rootfs }} diff --git a/salt/idstools/init.sls b/salt/idstools/init.sls index 3313fa901..93db83759 100644 --- a/salt/idstools/init.sls +++ b/salt/idstools/init.sls @@ -12,8 +12,8 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} # IDSTools Setup idstoolsdir: diff --git a/salt/influxdb/init.sls b/salt/influxdb/init.sls index 6d8ba4566..d35ab6cae 100644 --- a/salt/influxdb/init.sls +++ b/salt/influxdb/init.sls @@ -1,7 +1,7 @@ {% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %} {% set MANAGER = salt['grains.get']('master') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %} diff --git a/salt/kibana/bin/so-kibana-config-load b/salt/kibana/bin/so-kibana-config-load index 451e848a1..9d970b1e3 100644 --- a/salt/kibana/bin/so-kibana-config-load +++ b/salt/kibana/bin/so-kibana-config-load @@ -1,7 +1,7 @@ #!/bin/bash -# {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager', False) -%} -# {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%} -# {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %} +# {%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager', False) -%} +# {%- set FLEET_NODE = salt['pillar.get']('global:fleet_node', False) -%} +# {%- set MANAGER = salt['pillar.get']('global:url_base', '') %} KIBANA_VERSION="7.6.1" diff --git a/salt/kibana/etc/kibana.yml b/salt/kibana/etc/kibana.yml index 4d19b251b..4bcc22016 100644 --- a/salt/kibana/etc/kibana.yml +++ b/salt/kibana/etc/kibana.yml @@ -1,6 +1,7 @@ --- # Default Kibana configuration from kibana-docker. {%- set ES = salt['pillar.get']('manager:mainip', '') -%} +{%- set FEATURES = salt['pillar.get']('elastic:features', False) %} server.name: kibana server.host: "0" server.basePath: /kibana diff --git a/salt/kibana/init.sls b/salt/kibana/init.sls index 9521c5bb1..8711d47d1 100644 --- a/salt/kibana/init.sls +++ b/salt/kibana/init.sls @@ -1,8 +1,8 @@ -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %} -{% if FEATURES %} +{%- if FEATURES is sameas true %} {% set FEATURES = "-features" %} {% else %} {% set FEATURES = '' %} diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index 61d6aecc1..33fc496dc 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -12,12 +12,13 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} +{% set MANAGERIP = salt['pillar.get']('global:managerip') %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %} -{% if FEATURES %} +{%- if FEATURES is sameas true %} {% set FEATURES = "-features" %} {% else %} {% set FEATURES = '' %} @@ -127,7 +128,7 @@ importdir: # Create the logstash data directory nsmlsdir: file.directory: - - name: /nsm/logstash + - name: /nsm/logstash/tmp - user: 931 - group: 939 - makedirs: True @@ -146,6 +147,8 @@ so-logstash: - hostname: so-logstash - name: so-logstash - user: logstash + - extra_hosts: + - {{ MANAGER }}:{{ MANAGERIP }} - environment: - LS_JAVA_OPTS=-Xms{{ lsheap }} -Xmx{{ lsheap }} - port_bindings: @@ -165,12 +168,19 @@ so-logstash: - /sys/fs/cgroup:/sys/fs/cgroup:ro - /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro - /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro + {% if grains['role'] == 'so-heavynode' %} + - /etc/ssl/certs/intca.crt:/usr/share/filebeat/ca.crt:ro + {% else %} - /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro + {% endif %} + - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro + - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro + - /etc/pki/ca.cer:/ca/ca.crt:ro {%- if grains['role'] == 'so-eval' %} - /nsm/zeek:/nsm/zeek:ro - /nsm/suricata:/suricata:ro - - /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro - - /opt/so/wazuh/logs/archives:/wazuh/archives:ro + - /nsm/wazuh/logs/alerts:/wazuh/alerts:ro + - /nsm/wazuh/logs/archives:/wazuh/archives:ro - /opt/so/log/fleet/:/osquery/logs:ro - /opt/so/log/strelka:/strelka:ro {%- endif %} @@ -184,4 +194,4 @@ so-logstash: {% endfor %} {% for TEMPLATE in TEMPLATES %} - file: es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }} -{% endfor %} \ No newline at end of file +{% endfor %} diff --git a/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja b/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja new file mode 100644 index 000000000..59e457115 --- /dev/null +++ b/salt/logstash/pipelines/config/so/0899_input_minio.conf.jinja @@ -0,0 +1,23 @@ +{%- if grains.role == 'so-heavynode' %} +{%- set MANAGER = salt['grains.get']('host') %} +{%- else %} +{%- set MANAGER = salt['grains.get']('master') %} +{% endif -%} +{%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %} +{%- set access_key = salt['pillar.get']('minio:access_key', '') %} +{%- set access_secret = salt['pillar.get']('minio:access_secret', '') %} +{%- set INTERVAL = salt['pillar.get']('s3_settings:interval', 5) %} +input { + s3 { + access_key_id => "{{ access_key }}" + secret_access_key => "{{ access_secret }}" + endpoint => "https://{{ MANAGER }}:9595" + bucket => "logstash" + delete => true + interval => {{ INTERVAL }} + codec => json + additional_settings => { + "force_path_style" => true + } + } +} diff --git a/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja b/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja index 2ce204875..c98a2a388 100644 --- a/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja +++ b/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja @@ -1,13 +1,11 @@ -{%- if grains.role == 'so-heavynode' %} -{%- set MANAGER = salt['pillar.get']('elasticsearch:mainip', '') %} -{%- else %} -{%- set MANAGER = salt['pillar.get']('static:managerip', '') %} -{% endif -%} +{%- set MANAGER = salt['grains.get']('master') %} {%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %} input { redis { host => '{{ MANAGER }}' + port => 9696 + ssl => true data_type => 'list' key => 'logstash:unparsed' type => 'redis-input' diff --git a/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja b/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja index f86bf946c..98a842b2d 100644 --- a/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja +++ b/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja @@ -3,6 +3,7 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if [module] =~ "zeek" and "import" not in [tags] { elasticsearch { diff --git a/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja b/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja index 52c9f034a..315c892e2 100644 --- a/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja +++ b/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja @@ -3,6 +3,7 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if "import" in [tags] { elasticsearch { diff --git a/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja b/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja index 740676367..889a3567f 100644 --- a/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja +++ b/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja @@ -3,6 +3,7 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if [event_type] == "sflow" { elasticsearch { diff --git a/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja b/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja index fed1ffdf5..96d2ae5ba 100644 --- a/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja +++ b/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja @@ -3,6 +3,7 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if [event_type] == "ids" and "import" not in [tags] { elasticsearch { diff --git a/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja b/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja index 5087f41da..ee5c57c5a 100644 --- a/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja +++ b/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja @@ -3,6 +3,7 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if [module] =~ "syslog" { elasticsearch { diff --git a/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja b/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja index 01436cf5f..a9e5ac64d 100644 --- a/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja +++ b/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja @@ -3,6 +3,7 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if [module] =~ "osquery" { elasticsearch { diff --git a/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja b/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja index a295b5f7a..f8aa07b1b 100644 --- a/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja +++ b/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja @@ -3,6 +3,7 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if "firewall" in [tags] { elasticsearch { diff --git a/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja b/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja index ace7cccf1..e65952cca 100644 --- a/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja +++ b/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja @@ -3,6 +3,7 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if [module] =~ "suricata" and "import" not in [tags] { elasticsearch { diff --git a/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja b/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja index ed513f597..10700733e 100644 --- a/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja +++ b/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja @@ -3,6 +3,7 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if "beat-ext" in [tags] and "import" not in [tags] { elasticsearch { diff --git a/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja b/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja index 14a9bc1d1..89d1a9466 100644 --- a/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja +++ b/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja @@ -3,6 +3,7 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if [module] =~ "ossec" { elasticsearch { diff --git a/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja b/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja index 0e6977e29..cdc340b39 100644 --- a/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja +++ b/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja @@ -3,6 +3,7 @@ {%- else %} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- endif %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} output { if [module] =~ "strelka" { elasticsearch { diff --git a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja new file mode 100644 index 000000000..a38d2cd44 --- /dev/null +++ b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja @@ -0,0 +1,25 @@ +{%- set MANAGER = salt['grains.get']('master') %} +{%- set access_key = salt['pillar.get']('minio:access_key', '') %} +{%- set access_secret = salt['pillar.get']('minio:access_secret', '') %} +{%- set SIZE_FILE = salt['pillar.get']('s3_settings:size_file', 2048) %} +{%- set TIME_FILE = salt['pillar.get']('s3_settings:time_file', 1) %} +{%- set UPLOAD_QUEUE_SIZE = salt['pillar.get']('s3_settings:upload_queue_size', 4) %} +{%- set ENCODING = salt['pillar.get']('s3_settings:encoding', 'gzip') %} +output { + s3 { + access_key_id => "{{ access_key }}" + secret_access_key => "{{ access_secret}}" + endpoint => "https://{{ MANAGER }}:9595" + bucket => "logstash" + size_file => {{ SIZE_FILE }} + time_file => {{ TIME_FILE }} + codec => json + encoding => {{ ENCODING }} + upload_queue_size => {{ UPLOAD_QUEUE_SIZE }} + temporary_directory => "/usr/share/logstash/data/tmp" + validate_credentials_on_root_bucket => false + additional_settings => { + "force_path_style" => true + } + } +} diff --git a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja index 71ec9f639..626ed62c3 100644 --- a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja +++ b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja @@ -1,8 +1,9 @@ -{% set MANAGER = salt['pillar.get']('static:managerip', '') %} +{%- set MANAGER = salt['grains.get']('master') %} {% set BATCH = salt['pillar.get']('logstash_settings:ls_pipeline_batch_size', 125) %} output { redis { host => '{{ MANAGER }}' + port => 6379 data_type => 'list' key => 'logstash:unparsed' congestion_interval => 1 diff --git a/salt/manager/init.sls b/salt/manager/init.sls index aef705724..3b4852542 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -12,10 +12,10 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} -{% set managerproxy = salt['pillar.get']('static:managerupdate', '0') %} +{% set managerproxy = salt['pillar.get']('global:managerupdate', '0') %} socore_own_saltstack: file.directory: diff --git a/salt/minio/init.sls b/salt/minio/init.sls index 2d5941301..ece8673bd 100644 --- a/salt/minio/init.sls +++ b/salt/minio/init.sls @@ -13,47 +13,47 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set access_key = salt['pillar.get']('manager:access_key', '') %} -{% set access_secret = salt['pillar.get']('manager:access_secret', '') %} +{% set access_key = salt['pillar.get']('minio:access_key', '') %} +{% set access_secret = salt['pillar.get']('minio:access_secret', '') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} +{% set MANAGER = salt['grains.get']('master') %} # Minio Setup minioconfdir: file.directory: - - name: /opt/so/conf/minio/etc + - name: /opt/so/conf/minio/etc/certs - user: 939 - group: 939 - makedirs: True miniodatadir: file.directory: - - name: /nsm/minio/data + - name: /nsm/minio/data/ - user: 939 - group: 939 - makedirs: True -#redisconfsync: -# file.recurse: -# - name: /opt/so/conf/redis/etc -# - source: salt://redis/etc -# - user: 939 -# - group: 939 -# - template: jinja +logstashbucket: + file.directory: + - name: /nsm/minio/data/logstash + - user: 939 + - group: 939 + - makedirs: True -minio/minio: - docker_image.present - -minio: +so-minio: docker_container.running: - - image: minio/minio + - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-minio:{{ VERSION }} - hostname: so-minio - user: socore - port_bindings: - - 0.0.0.0:9000:9000 + - 0.0.0.0:9595:9595 - environment: - MINIO_ACCESS_KEY: {{ access_key }} - MINIO_SECRET_KEY: {{ access_secret }} - binds: - /nsm/minio/data:/data:rw - - /opt/so/conf/minio/etc:/root/.minio:rw - - entrypoint: "/usr/bin/docker-entrypoint.sh server /data" - - network_mode: so-elastic-net + - /opt/so/conf/minio/etc:/.minio:rw + - /etc/pki/minio.key:/.minio/certs/private.key:ro + - /etc/pki/minio.crt:/.minio/certs/public.crt:ro + - entrypoint: "/usr/bin/docker-entrypoint.sh server --certs-dir /.minio/certs --address :9595 /data" \ No newline at end of file diff --git a/salt/motd/files/so_motd.jinja b/salt/motd/files/so_motd.jinja index 43ad3b4de..1efb77254 100644 --- a/salt/motd/files/so_motd.jinja +++ b/salt/motd/files/so_motd.jinja @@ -1,6 +1,6 @@ {% set needs_restarting_check = salt['mine.get']('*', 'needs_restarting.check', tgt_type='glob') -%} {% set role = grains.id.split('_') | last -%} -{% set url = salt['pillar.get']('manager:url_base') -%} +{% set url = salt['pillar.get']('global:url_base') -%} {% if role in ['eval', 'managersearch', 'manager', 'standalone'] %} Access the Security Onion web interface at https://{{ url }} diff --git a/salt/mysql/init.sls b/salt/mysql/init.sls index c4caa5fcd..c9c6fde41 100644 --- a/salt/mysql/init.sls +++ b/salt/mysql/init.sls @@ -1,7 +1,7 @@ {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) %} -{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set MAINIP = salt['pillar.get']('elasticsearch:mainip') %} {% set FLEETARCH = salt['grains.get']('role') %} @@ -10,7 +10,7 @@ {% set MAININT = salt['pillar.get']('host:mainint') %} {% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %} {% else %} - {% set MAINIP = salt['pillar.get']('static:managerip') %} + {% set MAINIP = salt['pillar.get']('global:managerip') %} {% endif %} # MySQL Setup @@ -89,7 +89,7 @@ so-mysql: - /opt/so/conf/mysql/etc cmd.run: - name: until nc -z {{ MAINIP }} 3306; do sleep 1; done - - timeout: 120 + - timeout: 900 - onchanges: - docker_container: so-mysql -{% endif %} \ No newline at end of file +{% endif %} diff --git a/salt/nginx/etc/nginx.conf.so-eval b/salt/nginx/etc/nginx.conf.so-eval index 2998a5bf2..8032ed0ce 100644 --- a/salt/nginx/etc/nginx.conf.so-eval +++ b/salt/nginx/etc/nginx.conf.so-eval @@ -1,7 +1,7 @@ {%- set managerip = salt['pillar.get']('manager:mainip', '') %} -{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %} -{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %} -{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %} +{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} +{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} +{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} # For more information on configuration, see: # * Official English Documentation: http://nginx.org/en/docs/ # * Official Russian Documentation: http://nginx.org/ru/docs/ @@ -297,6 +297,9 @@ http { } location /sensoroniagents/ { + if ($http_authorization = "") { + return 403; + } proxy_pass http://{{ managerip }}:9822/; proxy_read_timeout 90; proxy_connect_timeout 90; diff --git a/salt/nginx/etc/nginx.conf.so-import b/salt/nginx/etc/nginx.conf.so-import new file mode 100644 index 000000000..9c919c764 --- /dev/null +++ b/salt/nginx/etc/nginx.conf.so-import @@ -0,0 +1,326 @@ +{%- set managerip = salt['pillar.get']('manager:mainip', '') %} +{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} +{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} +{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} +# For more information on configuration, see: +# * Official English Documentation: http://nginx.org/en/docs/ +# * Official Russian Documentation: http://nginx.org/ru/docs/ + +worker_processes auto; +error_log /var/log/nginx/error.log; +pid /run/nginx.pid; + +# Load dynamic modules. See /usr/share/nginx/README.dynamic. +include /usr/share/nginx/modules/*.conf; + +events { + worker_connections 1024; +} + +http { + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + client_max_body_size 1024M; + + include /etc/nginx/mime.types; + default_type application/octet-stream; + + # Load modular configuration files from the /etc/nginx/conf.d directory. + # See http://nginx.org/en/docs/ngx_core_module.html#include + # for more information. + include /etc/nginx/conf.d/*.conf; + + #server { + # listen 80 default_server; + # listen [::]:80 default_server; + # server_name _; + # root /opt/socore/html; + # index index.html; + + # Load configuration files for the default server block. + #include /etc/nginx/default.d/*.conf; + + # location / { + # } + + # error_page 404 /404.html; + # location = /40x.html { + # } + + # error_page 500 502 503 504 /50x.html; + # location = /50x.html { + # } + #} + server { + listen 80 default_server; + server_name _; + return 301 https://$host$request_uri; + } + +{% if FLEET_MANAGER %} + server { + listen 8090 ssl http2 default_server; + server_name _; + root /opt/socore/html; + index blank.html; + + ssl_certificate "/etc/pki/nginx/server.crt"; + ssl_certificate_key "/etc/pki/nginx/server.key"; + ssl_session_cache shared:SSL:1m; + ssl_session_timeout 10m; + ssl_ciphers HIGH:!aNULL:!MD5; + ssl_prefer_server_ciphers on; + + location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ { + grpc_pass grpcs://{{ managerip }}:8080; + grpc_set_header Host $host; + grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_buffering off; + } + + } +{% endif %} + +# Settings for a TLS enabled server. + + server { + listen 443 ssl http2 default_server; + #listen [::]:443 ssl http2 default_server; + server_name _; + root /opt/socore/html; + index index.html; + + ssl_certificate "/etc/pki/nginx/server.crt"; + ssl_certificate_key "/etc/pki/nginx/server.key"; + ssl_session_cache shared:SSL:1m; + ssl_session_timeout 10m; + ssl_ciphers HIGH:!aNULL:!MD5; + ssl_prefer_server_ciphers on; + + # Load configuration files for the default server block. + #include /etc/nginx/default.d/*.conf; + + location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) { + proxy_pass http://{{ managerip }}:9822; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location / { + auth_request /auth/sessions/whoami; + proxy_pass http://{{ managerip }}:9822/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location ~ ^/auth/.*?(whoami|login|logout|settings) { + rewrite /auth/(.*) /$1 break; + proxy_pass http://{{ managerip }}:4433; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /cyberchef/ { + auth_request /auth/sessions/whoami; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /navigator/ { + auth_request /auth/sessions/whoami; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /packages/ { + try_files $uri =206; + auth_request /auth/sessions/whoami; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /grafana/ { + auth_request /auth/sessions/whoami; + rewrite /grafana/(.*) /$1 break; + proxy_pass http://{{ managerip }}:3000/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /kibana/ { + auth_request /auth/sessions/whoami; + rewrite /kibana/(.*) /$1 break; + proxy_pass http://{{ managerip }}:5601/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /nodered/ { + proxy_pass http://{{ managerip }}:1880/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /playbook/ { + proxy_pass http://{{ managerip }}:3200/playbook/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + {%- if FLEET_NODE %} + location /fleet/ { + return 301 https://{{ FLEET_IP }}/fleet; + } + {%- else %} + location /fleet/ { + proxy_pass https://{{ managerip }}:8080; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + {%- endif %} + + location /thehive/ { + proxy_pass http://{{ managerip }}:9000/thehive/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_http_version 1.1; # this is essential for chunked responses to work + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /cortex/ { + proxy_pass http://{{ managerip }}:9001/cortex/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_http_version 1.1; # this is essential for chunked responses to work + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /soctopus/ { + proxy_pass http://{{ managerip }}:7000/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /kibana/app/soc/ { + rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent; + } + + location /kibana/app/fleet/ { + rewrite ^/kibana/app/fleet/(.*) /fleet/$1 permanent; + } + + location /kibana/app/soctopus/ { + rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent; + } + + location /sensoroniagents/ { + proxy_pass http://{{ managerip }}:9822/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + + error_page 401 = @error401; + + location @error401 { + add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400"; + return 302 /auth/self-service/browser/flows/login; + } + + #error_page 404 /404.html; + # location = /usr/share/nginx/html/40x.html { + #} + + error_page 500 502 503 504 /50x.html; + location = /usr/share/nginx/html/50x.html { + } + } + +} diff --git a/salt/nginx/etc/nginx.conf.so-manager b/salt/nginx/etc/nginx.conf.so-manager index bdb342cac..42caa7841 100644 --- a/salt/nginx/etc/nginx.conf.so-manager +++ b/salt/nginx/etc/nginx.conf.so-manager @@ -1,7 +1,7 @@ {%- set managerip = salt['pillar.get']('manager:mainip', '') %} -{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %} -{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %} -{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %} +{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} +{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} +{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} # For more information on configuration, see: # * Official English Documentation: http://nginx.org/en/docs/ # * Official Russian Documentation: http://nginx.org/ru/docs/ @@ -297,6 +297,9 @@ http { } location /sensoroniagents/ { + if ($http_authorization = "") { + return 403; + } proxy_pass http://{{ managerip }}:9822/; proxy_read_timeout 90; proxy_connect_timeout 90; diff --git a/salt/nginx/etc/nginx.conf.so-managersearch b/salt/nginx/etc/nginx.conf.so-managersearch index cb7576923..0f0e052c8 100644 --- a/salt/nginx/etc/nginx.conf.so-managersearch +++ b/salt/nginx/etc/nginx.conf.so-managersearch @@ -1,7 +1,7 @@ {%- set managerip = salt['pillar.get']('manager:mainip', '') %} -{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %} -{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %} -{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %} +{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} +{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} +{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} # For more information on configuration, see: # * Official English Documentation: http://nginx.org/en/docs/ # * Official Russian Documentation: http://nginx.org/ru/docs/ @@ -296,6 +296,9 @@ http { } location /sensoroniagents/ { + if ($http_authorization = "") { + return 403; + } proxy_pass http://{{ managerip }}:9822/; proxy_read_timeout 90; proxy_connect_timeout 90; diff --git a/salt/nginx/etc/nginx.conf.so-standalone b/salt/nginx/etc/nginx.conf.so-standalone index bdb342cac..42caa7841 100644 --- a/salt/nginx/etc/nginx.conf.so-standalone +++ b/salt/nginx/etc/nginx.conf.so-standalone @@ -1,7 +1,7 @@ {%- set managerip = salt['pillar.get']('manager:mainip', '') %} -{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %} -{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %} -{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %} +{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %} +{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %} +{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %} # For more information on configuration, see: # * Official English Documentation: http://nginx.org/en/docs/ # * Official Russian Documentation: http://nginx.org/ru/docs/ @@ -297,6 +297,9 @@ http { } location /sensoroniagents/ { + if ($http_authorization = "") { + return 403; + } proxy_pass http://{{ managerip }}:9822/; proxy_read_timeout 90; proxy_connect_timeout 90; diff --git a/salt/nginx/files/navigator_config.json b/salt/nginx/files/navigator_config.json index bd40e09ef..d4f6e0908 100644 --- a/salt/nginx/files/navigator_config.json +++ b/salt/nginx/files/navigator_config.json @@ -1,4 +1,4 @@ -{%- set ip = salt['pillar.get']('static:managerip', '') %} +{%- set URL_BASE = salt['pillar.get']('global:url_base', '') %} { "enterprise_attack_url": "https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json", @@ -16,7 +16,7 @@ "domain": "mitre-enterprise", - "custom_context_menu_items": [ {"label": "view related plays","url": " https://{{ip}}/playbook/projects/detection-playbooks/issues?utf8=%E2%9C%93&set_filter=1&sort=id%3Adesc&f%5B%5D=cf_15&op%5Bcf_15%5D=%3D&f%5B%5D=&c%5B%5D=status&c%5B%5D=cf_10&c%5B%5D=cf_13&c%5B%5D=cf_18&c%5B%5D=cf_19&c%5B%5D=cf_1&c%5B%5D=updated_on&v%5Bcf_15%5D%5B%5D=~Technique_ID~"}], + "custom_context_menu_items": [ {"label": "view related plays","url": " https://{{URL_BASE}}/playbook/projects/detection-playbooks/issues?utf8=%E2%9C%93&set_filter=1&sort=id%3Adesc&f%5B%5D=cf_15&op%5Bcf_15%5D=%3D&f%5B%5D=&c%5B%5D=status&c%5B%5D=cf_10&c%5B%5D=cf_13&c%5B%5D=cf_18&c%5B%5D=cf_19&c%5B%5D=cf_1&c%5B%5D=updated_on&v%5Bcf_15%5D%5B%5D=~Technique_ID~"}], "default_layers": { "enabled": true, diff --git a/salt/nginx/init.sls b/salt/nginx/init.sls index 53bb13eec..2e67a6b2c 100644 --- a/salt/nginx/init.sls +++ b/salt/nginx/init.sls @@ -1,8 +1,8 @@ -{% set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) %} -{% set FLEETNODE = salt['pillar.get']('static:fleet_node', False) %} +{% set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) %} +{% set FLEETNODE = salt['pillar.get']('global:fleet_node', False) %} {% set MANAGER = salt['grains.get']('master') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} # Drop the correct nginx config based on role nginxconfdir: diff --git a/salt/nodered/files/nodered_load_flows b/salt/nodered/files/nodered_load_flows index 985c1c49a..78bab818a 100644 --- a/salt/nodered/files/nodered_load_flows +++ b/salt/nodered/files/nodered_load_flows @@ -1,4 +1,4 @@ -{%- set ip = salt['pillar.get']('static:managerip', '') -%} +{%- set ip = salt['pillar.get']('global:managerip', '') -%} #!/bin/bash default_salt_dir=/opt/so/saltstack/default diff --git a/salt/nodered/files/so_flows.json b/salt/nodered/files/so_flows.json index ad780ceb9..a8a6e2c69 100644 --- a/salt/nodered/files/so_flows.json +++ b/salt/nodered/files/so_flows.json @@ -1,4 +1,4 @@ -{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%} -{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') -%} -{%- set CORTEXKEY = salt['pillar.get']('static:cortexkey', '') -%} +{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') -%} +{%- set HIVEKEY = salt['pillar.get']('global:hivekey', '') -%} +{%- set CORTEXKEY = salt['pillar.get']('global:cortexkey', '') -%} [{"id":"dca608c3.7d8af8","type":"tab","label":"TheHive - Webhook Events","disabled":false,"info":""},{"id":"4db74fa6.2556d","type":"tls-config","z":"","name":"","cert":"","key":"","ca":"","certname":"","keyname":"","caname":"","servername":"","verifyservercert":false},{"id":"aa6cf50d.a02fc8","type":"http in","z":"dca608c3.7d8af8","name":"TheHive Listener","url":"/thehive","method":"post","upload":false,"swaggerDoc":"","x":120,"y":780,"wires":[["2b92aebb.853dc2","2fce29bb.1b1376","82ad0f08.7a53f"]]},{"id":"2b92aebb.853dc2","type":"debug","z":"dca608c3.7d8af8","name":"","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"payload","targetType":"msg","x":470,"y":940,"wires":[]},{"id":"a4ecb84a.805958","type":"switch","z":"dca608c3.7d8af8","name":"Operation","property":"payload.operation","propertyType":"msg","rules":[{"t":"eq","v":"Creation","vt":"str"},{"t":"eq","v":"Update","vt":"str"},{"t":"eq","v":"Delete","vt":"str"}],"checkall":"false","repair":false,"outputs":3,"x":580,"y":780,"wires":[["f1e954fd.3c21d8"],["65928861.c90a48"],["a259a26c.a21"]],"outputLabels":["Creation","Update","Delete"]},{"id":"f1e954fd.3c21d8","type":"switch","z":"dca608c3.7d8af8","name":"Creation","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_task","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"},{"t":"eq","v":"case_artifact_job","vt":"str"},{"t":"eq","v":"alert","vt":"str"},{"t":"eq","v":"user","vt":"str"}],"checkall":"false","repair":false,"outputs":7,"x":900,"y":480,"wires":[["e88b4cc2.f6afe"],["8c54e39.a1b4f2"],["64203fe8.e0ad5"],["3511de51.889a02"],["14544a8b.b6b2f5"],["44c595a4.45d45c"],["3eb4bedf.6e20a2"]],"inputLabels":["Operation"],"outputLabels":["case","case_artifact","case_task","case_task_log","action","alert","user"],"info":"No webhook data is received for the following events:\n\n- Creation of Dashboard\n- Creation of Case Templates\n"},{"id":"65928861.c90a48","type":"switch","z":"dca608c3.7d8af8","name":"Update","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_artifact_job","vt":"str"},{"t":"eq","v":"case_task","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"},{"t":"eq","v":"alert","vt":"str"},{"t":"eq","v":"user","vt":"str"}],"checkall":"false","repair":false,"outputs":7,"x":900,"y":860,"wires":[["eebe1748.1cd348"],["d703adc0.12fd1"],["2b738415.408d4c"],["6d97371a.406348"],["4ae621e1.9ae6"],["5786cee2.98109"],["54077728.447648"]],"inputLabels":["Operation"],"outputLabels":["case","case_artifact",null,"case_task","case_task_log","alert","user"]},{"id":"a259a26c.a21","type":"switch","z":"dca608c3.7d8af8","name":"Delete","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"}],"checkall":"false","repair":false,"outputs":3,"x":890,"y":1200,"wires":[["60c8bcfb.eff1f4"],["df708bab.348308"],["e9a8650c.e20cc8"]],"outputLabels":["case","case_artifact",""],"info":"Deleting a case task doesnt actually trigger a delete event. It triggers an `update` event where the status = cancelled"},{"id":"54077728.447648","type":"switch","z":"dca608c3.7d8af8","name":"User","property":"payload.object.status","propertyType":"msg","rules":[{"t":"eq","v":"Locked","vt":"str"},{"t":"eq","v":"Ok","vt":"str"}],"checkall":"false","repair":false,"outputs":2,"x":1130,"y":980,"wires":[["9429d6c5.5ac788"],["4e3e091c.d35388"]]},{"id":"9429d6c5.5ac788","type":"function","z":"dca608c3.7d8af8","name":"status: Locked","func":"msg.topic = \"[The Hive] A user account was locked\";\nmsg.from = \"from@example.com\";\nmsg.to = \"to@example.com\";\nreturn msg;","outputs":1,"noerr":0,"x":1380,"y":972,"wires":[[]],"info":"- User account was locked"},{"id":"4e3e091c.d35388","type":"function","z":"dca608c3.7d8af8","name":"status: Ok","func":"msg.topic = \"[The Hive] A user account was changed\";\nmsg.from = \"from@example.com\";\nmsg.to = \"to@example.com\";\nreturn msg;","outputs":1,"noerr":0,"x":1360,"y":1020,"wires":[[]],"info":"- User account was unlocked\n- User description was changed\n- User role was changed\n- User API key was added\n- User API key was revoked\n"},{"id":"485f3be.1ffcfc4","type":"function","z":"dca608c3.7d8af8","name":"status: Open","func":"// Fires when a Case is updated AND status = open\n// This can include things like TLP/PAP changes\n\nreturn msg;","outputs":1,"noerr":0,"x":1370,"y":660,"wires":[[]]},{"id":"eebe1748.1cd348","type":"switch","z":"dca608c3.7d8af8","name":"case","property":"payload.object.status","propertyType":"msg","rules":[{"t":"eq","v":"Open","vt":"str"}],"checkall":"true","repair":false,"outputs":1,"x":1130,"y":740,"wires":[["485f3be.1ffcfc4","e4b7b4bf.2fb828"]],"info":"- A case was modified"},{"id":"8c54e39.a1b4f2","type":"switch","z":"dca608c3.7d8af8","name":"case_artifact: Run Analyzer","property":"payload.object.dataType","propertyType":"msg","rules":[{"t":"eq","v":"ip","vt":"str"},{"t":"eq","v":"domain","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":1600,"y":340,"wires":[["eb8cfeb7.a7118","a5dd8a8a.065b88"],["eb8cfeb7.a7118","a5dd8a8a.065b88"]],"info":"# References\n\n\n"},{"id":"2fce29bb.1b1376","type":"function","z":"dca608c3.7d8af8","name":"Add headers","func":"msg.thehive_url = 'https://{{ MANAGERIP }}/thehive';\nmsg.cortex_url = 'https://{{ MANAGERIP }}/cortex';\nmsg.cortex_id = 'CORTEX-SERVER-ID';\nreturn msg;","outputs":1,"noerr":0,"x":350,"y":780,"wires":[["a4ecb84a.805958"]]},{"id":"e4b7b4bf.2fb828","type":"function","z":"dca608c3.7d8af8","name":"status: Resolved","func":"// Fires when a case is closed (resolved)\n\nreturn msg;","outputs":1,"noerr":0,"x":1390,"y":720,"wires":[[]]},{"id":"e88b4cc2.f6afe","type":"function","z":"dca608c3.7d8af8","name":"case","func":"// Fires when a case is created\n// or when a responder is generated against a case\n\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":320,"wires":[[]]},{"id":"64203fe8.e0ad5","type":"function","z":"dca608c3.7d8af8","name":"case_task","func":"// Fires when a case task is created\nreturn msg;","outputs":1,"noerr":0,"x":1140,"y":400,"wires":[[]]},{"id":"3511de51.889a02","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"// Fires when a case task log is created\n\nreturn msg;","outputs":1,"noerr":0,"x":1163,"y":440,"wires":[[]]},{"id":"14544a8b.b6b2f5","type":"function","z":"dca608c3.7d8af8","name":"case_artifact_job","func":"// Fires when a Responder or Analyzser is Run on an existing observable\n\nreturn msg;","outputs":1,"noerr":0,"x":1173,"y":480,"wires":[[]]},{"id":"2b738415.408d4c","type":"function","z":"dca608c3.7d8af8","name":"case_artifact_job","func":"\nreturn msg;","outputs":1,"noerr":0,"x":1170,"y":820,"wires":[[]]},{"id":"3eb4bedf.6e20a2","type":"function","z":"dca608c3.7d8af8","name":"user","func":"// Fires when a user is created\n\nreturn msg;","outputs":1,"noerr":0,"x":1133,"y":560,"wires":[[]]},{"id":"d703adc0.12fd1","type":"function","z":"dca608c3.7d8af8","name":"case_artifact","func":"// Fires when an artifact is updated\nreturn msg;","outputs":1,"noerr":0,"x":1150,"y":780,"wires":[[]]},{"id":"6d97371a.406348","type":"function","z":"dca608c3.7d8af8","name":"case_task","func":"// Fires when a case task is updated\nreturn msg;","outputs":1,"noerr":0,"x":1140,"y":860,"wires":[[]]},{"id":"4ae621e1.9ae6","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"//Fires when a case_task_log is updated\n\nreturn msg;","outputs":1,"noerr":0,"x":1160,"y":900,"wires":[[]]},{"id":"60c8bcfb.eff1f4","type":"function","z":"dca608c3.7d8af8","name":"case","func":"//Fires when a case is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":1160,"wires":[[]]},{"id":"df708bab.348308","type":"function","z":"dca608c3.7d8af8","name":"case_artifact","func":"//Fires when a case_artifact is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1150,"y":1200,"wires":[[]]},{"id":"e9a8650c.e20cc8","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"//Fires when a case_task_log is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1160,"y":1240,"wires":[[]]},{"id":"5786cee2.98109","type":"function","z":"dca608c3.7d8af8","name":"alert","func":"//Fires when an alert is updated\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":940,"wires":[[]]},{"id":"44c595a4.45d45c","type":"change","z":"dca608c3.7d8af8","d":true,"name":"Convert Alert Msg to Artifacts","rules":[{"t":"move","p":"payload.object.artifacts","pt":"msg","to":"payload","tot":"msg"}],"action":"","property":"","from":"","to":"","reg":false,"x":1200,"y":520,"wires":[["6dcca25e.04bd2c"]]},{"id":"6dcca25e.04bd2c","type":"split","z":"dca608c3.7d8af8","name":"Split Artifacts","splt":"\\n","spltType":"str","arraySplt":1,"arraySpltType":"len","stream":false,"addname":"","x":1430,"y":520,"wires":[["767c84f2.c9ba2c"]]},{"id":"767c84f2.c9ba2c","type":"switch","z":"dca608c3.7d8af8","name":"alert: Run Analyzer","property":"payload.dataType","propertyType":"msg","rules":[{"t":"eq","v":"ip","vt":"str"},{"t":"eq","v":"domain","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":1630,"y":400,"wires":[["eb8cfeb7.a7118","a5dd8a8a.065b88"],["a5dd8a8a.065b88","eb8cfeb7.a7118"]],"info":"# References\n\n\n"},{"id":"82ad0f08.7a53f","type":"http response","z":"dca608c3.7d8af8","name":"Ack Event Receipt","statusCode":"200","headers":{},"x":250,"y":940,"wires":[]},{"id":"a5dd8a8a.065b88","type":"function","z":"dca608c3.7d8af8","name":"Run Analyzer: CERT DNS","func":"msg.analyzer_id = \"4f28afc20d78f98df425e36e561af33f\";\n\nif (msg.payload.objectId) {\n msg.tag = \"case_artifact\"\n msg.artifact_id = msg.payload.objectId\n msg.url = msg.thehive_url + '/api/connector/cortex/job';\n msg.payload = {\n 'cortexId' : msg.cortex_id,\n 'artifactId': msg.artifact_id,\n 'analyzerId': msg.analyzer_id\n };\n}\nelse {\n msg.tag = \"observable\"\n msg.observable = msg.payload.data\n msg.dataType = msg.payload.dataType\n\n msg.url = msg.cortex_url + '/api/analyzer/' + msg.analyzer_id + '/run';\n msg.payload = {\n 'data' : msg.observable,\n 'dataType': msg.dataType \n };\n}\nreturn msg;","outputs":1,"noerr":0,"x":1930,"y":420,"wires":[["f050a09f.b2201"]]},{"id":"eb8cfeb7.a7118","type":"function","z":"dca608c3.7d8af8","name":"Run Analyzer: Urlscan","func":"msg.analyzer_id = \"54e51b62c6c8ddc3cbc3cbdd889a0557\";\n\nif (msg.payload.objectId) {\n msg.tag = \"case_artifact\"\n msg.artifact_id = msg.payload.objectId\n msg.url = msg.thehive_url + '/api/connector/cortex/job';\n msg.payload = {\n 'cortexId' : msg.cortex_id,\n 'artifactId': msg.artifact_id,\n 'analyzerId': msg.analyzer_id\n };\n}\nelse {\n msg.tag = \"observable\"\n msg.observable = msg.payload.data\n msg.dataType = msg.payload.dataType\n\n msg.url = msg.cortex_url + '/api/analyzer/' + msg.analyzer_id + '/run';\n msg.payload = {\n 'data' : msg.observable,\n 'dataType': msg.dataType \n };\n}\nreturn msg;","outputs":1,"noerr":0,"x":1920,"y":320,"wires":[["f050a09f.b2201"]]},{"id":"1c448528.3032fb","type":"http request","z":"dca608c3.7d8af8","name":"Submit to Cortex","method":"POST","ret":"obj","paytoqs":false,"url":"","tls":"4db74fa6.2556d","persist":false,"proxy":"","authType":"bearer","credentials": {"user": "", "password": "{{ CORTEXKEY }}"},"x":2450,"y":420,"wires":[["ea6614fb.752a78"]]},{"id":"ea6614fb.752a78","type":"debug","z":"dca608c3.7d8af8","name":"Debug","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"true","targetType":"full","x":2670,"y":360,"wires":[]},{"id":"f050a09f.b2201","type":"switch","z":"dca608c3.7d8af8","name":"Cases vs Alerts","property":"tag","propertyType":"msg","rules":[{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"observable","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":2200,"y":360,"wires":[["f7fca977.a73b28"],["1c448528.3032fb"]],"inputLabels":["Data"],"outputLabels":["Cases","Alerts"]},{"id":"f7fca977.a73b28","type":"http request","z":"dca608c3.7d8af8","name":"Submit to TheHive","method":"POST","ret":"obj","paytoqs":false,"url":"","tls":"4db74fa6.2556d","persist":false,"proxy":"","authType":"bearer","credentials": {"user": "", "password": "{{ HIVEKEY }}"},"x":2450,"y":280,"wires":[["ea6614fb.752a78"]]}] diff --git a/salt/nodered/init.sls b/salt/nodered/init.sls index bec8f266a..34aacbd81 100644 --- a/salt/nodered/init.sls +++ b/salt/nodered/init.sls @@ -13,7 +13,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} # Create the nodered group noderedgroup: diff --git a/salt/patch/needs_restarting.sls b/salt/patch/needs_restarting.sls index f60909d22..40280d6e2 100644 --- a/salt/patch/needs_restarting.sls +++ b/salt/patch/needs_restarting.sls @@ -1,5 +1,5 @@ needs_restarting: module.run: - mine.send: - - func: needs_restarting.check + - name: needs_restarting.check - order: last diff --git a/salt/pcap/files/sensoroni.json b/salt/pcap/files/sensoroni.json index ab99c175c..79e97a75b 100644 --- a/salt/pcap/files/sensoroni.json +++ b/salt/pcap/files/sensoroni.json @@ -1,5 +1,5 @@ {%- set MANAGER = salt['grains.get']('master') -%} -{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%} +{%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') -%} {%- set CHECKININTERVALMS = salt['pillar.get']('pcap:sensor_checkin_interval_ms', 10000) -%} { "logFilename": "/opt/sensoroni/logs/sensoroni.log", diff --git a/salt/pcap/init.sls b/salt/pcap/init.sls index 1a9de6611..135b49334 100644 --- a/salt/pcap/init.sls +++ b/salt/pcap/init.sls @@ -12,12 +12,13 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set INTERFACE = salt['pillar.get']('sensor:interface', 'bond0') %} {% set BPF_STENO = salt['pillar.get']('steno:bpf', None) %} {% set BPF_COMPILED = "" %} +{% from "pcap/map.jinja" import START with context %} # PCAP Section @@ -131,6 +132,7 @@ sensoronilog: so-steno: docker_container.running: - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-steno:{{ VERSION }} + - start: {{ START }} - network_mode: host - privileged: True - port_bindings: diff --git a/salt/pcap/map.jinja b/salt/pcap/map.jinja new file mode 100644 index 000000000..ad4d70e80 --- /dev/null +++ b/salt/pcap/map.jinja @@ -0,0 +1,6 @@ +# don't start the docker container if it is an import node +{% if grains.id.split('_')|last == 'import' %} + {% set START = False %} +{% else %} + {% set START = True %} +{% endif %} \ No newline at end of file diff --git a/salt/playbook/files/playbook_db_init.sql b/salt/playbook/files/playbook_db_init.sql index 1b1535fe3..83e5d6f54 100644 --- a/salt/playbook/files/playbook_db_init.sql +++ b/salt/playbook/files/playbook_db_init.sql @@ -455,7 +455,7 @@ CREATE TABLE `custom_values` ( PRIMARY KEY (`id`), KEY `custom_values_customized` (`customized_type`,`customized_id`), KEY `index_custom_values_on_custom_field_id` (`custom_field_id`) -) ENGINE=InnoDB AUTO_INCREMENT=134139 DEFAULT CHARSET=latin1; +) ENGINE=InnoDB AUTO_INCREMENT=145325 DEFAULT CHARSET=latin1; /*!40101 SET character_set_client = @saved_cs_client */; -- @@ -825,7 +825,7 @@ CREATE TABLE `journal_details` ( `value` longtext, PRIMARY KEY (`id`), KEY `journal_details_journal_id` (`journal_id`) -) ENGINE=InnoDB AUTO_INCREMENT=9 DEFAULT CHARSET=latin1; +) ENGINE=InnoDB AUTO_INCREMENT=792 DEFAULT CHARSET=latin1; /*!40101 SET character_set_client = @saved_cs_client */; -- @@ -857,7 +857,7 @@ CREATE TABLE `journals` ( KEY `index_journals_on_user_id` (`user_id`), KEY `index_journals_on_journalized_id` (`journalized_id`), KEY `index_journals_on_created_on` (`created_on`) -) ENGINE=InnoDB AUTO_INCREMENT=8218 DEFAULT CHARSET=latin1; +) ENGINE=InnoDB AUTO_INCREMENT=9502 DEFAULT CHARSET=latin1; /*!40101 SET character_set_client = @saved_cs_client */; -- @@ -1146,7 +1146,7 @@ CREATE TABLE `queries` ( LOCK TABLES `queries` WRITE; /*!40000 ALTER TABLE `queries` DISABLE KEYS */; -INSERT INTO `queries` VALUES (3,1,'All Plays','---\ntracker_id:\n :operator: \"=\"\n :values:\n - \'1\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(4,NULL,'Disabled Plays','---\nstatus_id:\n :operator: \"=\"\n :values:\n - \'6\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(5,NULL,'Draft Plays','---\nstatus_id:\n :operator: \"=\"\n :values:\n - \'2\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(6,NULL,'Playbook - Community Sigma','---\ncf_13:\n :operator: \"=\"\n :values:\n - community\n',1,'---\n- :status\n- :cf_10\n- :cf_18\n- :cf_19\n- :cf_20\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(8,NULL,'Playbook - Internal','---\ncf_13:\n :operator: \"=\"\n :values:\n - Internal\n',1,'---\n- :status\n- :cf_10\n- :cf_14\n- :cf_16\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(9,NULL,'Active Plays','---\ntracker_id:\n :operator: \"=\"\n :values:\n - \'1\'\nstatus_id:\n :operator: \"=\"\n :values:\n - \'3\'\n',1,'---\n- :status\n- :cf_10\n- :cf_13\n- :cf_18\n- :cf_19\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'); +INSERT INTO `queries` VALUES (3,1,'All Plays','---\ntracker_id:\n :operator: \"=\"\n :values:\n - \'1\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(4,NULL,'Inactive Plays','---\nstatus_id:\n :operator: \"=\"\n :values:\n - \'4\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(5,NULL,'Draft Plays','---\nstatus_id:\n :operator: \"=\"\n :values:\n - \'2\'\n',1,NULL,'---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(6,NULL,'Playbook - Community Sigma','---\ncf_13:\n :operator: \"=\"\n :values:\n - community\n',1,'---\n- :status\n- :cf_10\n- :cf_18\n- :cf_19\n- :cf_20\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(8,NULL,'Playbook - Internal','---\ncf_13:\n :operator: \"=\"\n :values:\n - Internal\n',1,'---\n- :status\n- :cf_10\n- :cf_14\n- :cf_16\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'),(9,NULL,'Active Plays','---\ntracker_id:\n :operator: \"=\"\n :values:\n - \'1\'\nstatus_id:\n :operator: \"=\"\n :values:\n - \'3\'\n',1,'---\n- :status\n- :cf_10\n- :cf_13\n- :cf_18\n- :cf_19\n- :cf_1\n- :updated_on\n','---\n- - id\n - desc\n','','IssueQuery',2,'---\n:totalable_names: []\n:display_type: list\n:draw_relations: \n:draw_progress_line: \n:draw_selected_columns: \n'); /*!40000 ALTER TABLE `queries` ENABLE KEYS */; UNLOCK TABLES; @@ -1310,7 +1310,7 @@ CREATE TABLE `settings` ( LOCK TABLES `settings` WRITE; /*!40000 ALTER TABLE `settings` DISABLE KEYS */; -INSERT INTO `settings` VALUES (1,'ui_theme','circle','2020-04-26 13:11:26'),(2,'default_language','en','2020-04-26 13:11:26'),(3,'force_default_language_for_anonymous','0','2020-04-26 13:11:26'),(4,'force_default_language_for_loggedin','0','2020-04-26 13:11:26'),(5,'start_of_week','','2020-04-26 13:11:26'),(6,'date_format','','2020-04-26 13:11:26'),(7,'time_format','','2020-04-26 13:11:26'),(8,'timespan_format','decimal','2020-04-26 13:11:26'),(9,'user_format','firstname_lastname','2020-05-02 12:45:00'),(10,'gravatar_enabled','1','2020-05-02 12:41:07'),(11,'thumbnails_enabled','1','2020-04-26 13:11:26'),(12,'thumbnails_size','100','2020-04-26 13:11:26'),(13,'new_item_menu_tab','0','2020-04-26 13:11:30'),(14,'login_required','0','2020-07-10 19:32:45'),(15,'autologin','0','2020-04-26 13:11:54'),(16,'self_registration','0','2020-04-26 13:11:54'),(17,'show_custom_fields_on_registration','0','2020-04-26 13:11:54'),(18,'password_min_length','8','2020-04-26 13:11:54'),(19,'password_required_char_classes','--- []\n','2020-04-26 13:11:54'),(20,'password_max_age','0','2020-04-26 13:11:54'),(21,'lost_password','1','2020-04-26 13:11:54'),(22,'openid','0','2020-04-26 13:11:55'),(23,'session_lifetime','0','2020-04-26 13:11:55'),(24,'session_timeout','0','2020-04-26 13:11:55'),(25,'rest_api_enabled','1','2020-04-26 13:11:58'),(26,'jsonp_enabled','0','2020-04-26 13:11:58'),(27,'default_projects_public','0','2020-04-26 13:12:21'),(28,'default_projects_modules','---\n- sigma_editor\n','2020-04-26 13:12:21'),(29,'default_projects_tracker_ids','--- []\n','2020-04-26 13:12:21'),(30,'sequential_project_identifiers','0','2020-04-26 13:12:21'),(31,'project_list_defaults','---\n:column_names:\n- name\n- identifier\n- short_description\n','2020-04-26 13:12:21'),(32,'app_title','Playbook','2020-04-26 18:17:51'),(33,'welcome_text','','2020-04-26 18:17:51'),(34,'per_page_options','25,75,150','2020-05-02 12:41:38'),(35,'search_results_per_page','10','2020-04-26 18:17:51'),(36,'activity_days_default','30','2020-04-26 18:17:51'),(37,'host_name','localhost:3000','2020-04-26 18:17:51'),(38,'protocol','http','2020-04-26 18:17:51'),(39,'text_formatting','textile','2020-04-26 18:17:51'),(40,'cache_formatted_text','0','2020-04-26 18:17:51'),(41,'wiki_compression','','2020-04-26 18:17:51'),(42,'feeds_limit','15','2020-04-26 18:17:51'),(43,'plugin_redmine_playbook','--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nproject: \'1\'\nconvert_url: http://10.66.166.188:7000/playbook/sigmac\ncreate_url: http://10.66.166.188:7000/playbook/play','2020-05-02 12:39:20'),(44,'cross_project_issue_relations','0','2020-05-01 16:27:33'),(45,'link_copied_issue','no','2020-05-01 16:27:33'),(46,'cross_project_subtasks','','2020-05-01 16:27:33'),(47,'close_duplicate_issues','0','2020-05-01 16:27:33'),(48,'issue_group_assignment','0','2020-05-01 16:27:33'),(49,'default_issue_start_date_to_creation_date','1','2020-05-01 16:27:33'),(50,'display_subprojects_issues','0','2020-05-01 16:27:33'),(51,'issue_done_ratio','issue_field','2020-05-01 16:27:33'),(52,'non_working_week_days','---\n- \'6\'\n- \'7\'\n','2020-05-01 16:27:33'),(53,'issues_export_limit','500','2020-05-01 16:27:33'),(54,'gantt_items_limit','500','2020-05-01 16:27:33'),(55,'gantt_months_limit','24','2020-05-01 16:27:33'),(56,'parent_issue_dates','derived','2020-05-01 16:27:33'),(57,'parent_issue_priority','derived','2020-05-01 16:27:33'),(58,'parent_issue_done_ratio','derived','2020-05-01 16:27:33'),(59,'issue_list_default_columns','---\n- status\n- cf_10\n- cf_13\n- cf_14\n- cf_1\n- updated_on\n','2020-05-01 19:32:13'),(60,'issue_list_default_totals','--- []\n','2020-05-01 16:27:33'),(61,'enabled_scm','--- []\n','2020-05-01 16:27:47'),(62,'autofetch_changesets','0','2020-05-01 16:27:47'),(63,'sys_api_enabled','0','2020-05-01 16:27:47'),(64,'repository_log_display_limit','100','2020-05-01 16:27:47'),(65,'commit_logs_formatting','1','2020-05-01 16:27:47'),(66,'commit_ref_keywords','refs,references,IssueID','2020-05-01 16:27:47'),(67,'commit_cross_project_ref','0','2020-05-01 16:27:47'),(68,'commit_logtime_enabled','0','2020-05-01 16:27:47'),(69,'commit_update_keywords','--- []\n','2020-05-01 16:27:47'),(70,'gravatar_default','','2020-05-02 12:41:07'); +INSERT INTO `settings` VALUES (1,'ui_theme','circle','2020-04-26 13:11:26'),(2,'default_language','en','2020-04-26 13:11:26'),(3,'force_default_language_for_anonymous','0','2020-04-26 13:11:26'),(4,'force_default_language_for_loggedin','0','2020-04-26 13:11:26'),(5,'start_of_week','','2020-04-26 13:11:26'),(6,'date_format','','2020-04-26 13:11:26'),(7,'time_format','','2020-04-26 13:11:26'),(8,'timespan_format','decimal','2020-04-26 13:11:26'),(9,'user_format','firstname_lastname','2020-05-02 12:45:00'),(10,'gravatar_enabled','1','2020-05-02 12:41:07'),(11,'thumbnails_enabled','1','2020-04-26 13:11:26'),(12,'thumbnails_size','100','2020-04-26 13:11:26'),(13,'new_item_menu_tab','0','2020-04-26 13:11:30'),(14,'login_required','0','2020-07-10 19:32:45'),(15,'autologin','0','2020-04-26 13:11:54'),(16,'self_registration','0','2020-04-26 13:11:54'),(17,'show_custom_fields_on_registration','0','2020-04-26 13:11:54'),(18,'password_min_length','8','2020-04-26 13:11:54'),(19,'password_required_char_classes','--- []\n','2020-04-26 13:11:54'),(20,'password_max_age','0','2020-04-26 13:11:54'),(21,'lost_password','1','2020-04-26 13:11:54'),(22,'openid','0','2020-04-26 13:11:55'),(23,'session_lifetime','0','2020-04-26 13:11:55'),(24,'session_timeout','0','2020-04-26 13:11:55'),(25,'rest_api_enabled','1','2020-04-26 13:11:58'),(26,'jsonp_enabled','0','2020-04-26 13:11:58'),(27,'default_projects_public','0','2020-04-26 13:12:21'),(28,'default_projects_modules','---\n- sigma_editor\n','2020-04-26 13:12:21'),(29,'default_projects_tracker_ids','--- []\n','2020-04-26 13:12:21'),(30,'sequential_project_identifiers','0','2020-04-26 13:12:21'),(31,'project_list_defaults','---\n:column_names:\n- name\n- identifier\n- short_description\n','2020-04-26 13:12:21'),(32,'app_title','Playbook','2020-04-26 18:17:51'),(33,'welcome_text','','2020-04-26 18:17:51'),(34,'per_page_options','25,75,150','2020-05-02 12:41:38'),(35,'search_results_per_page','10','2020-04-26 18:17:51'),(36,'activity_days_default','30','2020-04-26 18:17:51'),(37,'host_name','localhost:3000','2020-04-26 18:17:51'),(38,'protocol','http','2020-04-26 18:17:51'),(39,'text_formatting','textile','2020-04-26 18:17:51'),(40,'cache_formatted_text','0','2020-04-26 18:17:51'),(41,'wiki_compression','','2020-04-26 18:17:51'),(42,'feeds_limit','15','2020-04-26 18:17:51'),(43,'plugin_redmine_playbook','--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess\nproject: \'1\'\nconvert_url: http://10.66.166.135:7000/playbook/sigmac\ncreate_url: http://10.66.166.135:7000/playbook/play','2020-05-02 12:39:20'),(44,'cross_project_issue_relations','0','2020-05-01 16:27:33'),(45,'link_copied_issue','no','2020-05-01 16:27:33'),(46,'cross_project_subtasks','','2020-05-01 16:27:33'),(47,'close_duplicate_issues','0','2020-05-01 16:27:33'),(48,'issue_group_assignment','0','2020-05-01 16:27:33'),(49,'default_issue_start_date_to_creation_date','1','2020-05-01 16:27:33'),(50,'display_subprojects_issues','0','2020-05-01 16:27:33'),(51,'issue_done_ratio','issue_field','2020-05-01 16:27:33'),(52,'non_working_week_days','---\n- \'6\'\n- \'7\'\n','2020-05-01 16:27:33'),(53,'issues_export_limit','500','2020-05-01 16:27:33'),(54,'gantt_items_limit','500','2020-05-01 16:27:33'),(55,'gantt_months_limit','24','2020-05-01 16:27:33'),(56,'parent_issue_dates','derived','2020-05-01 16:27:33'),(57,'parent_issue_priority','derived','2020-05-01 16:27:33'),(58,'parent_issue_done_ratio','derived','2020-05-01 16:27:33'),(59,'issue_list_default_columns','---\n- status\n- cf_10\n- cf_13\n- cf_14\n- cf_1\n- updated_on\n','2020-05-01 19:32:13'),(60,'issue_list_default_totals','--- []\n','2020-05-01 16:27:33'),(61,'enabled_scm','--- []\n','2020-05-01 16:27:47'),(62,'autofetch_changesets','0','2020-05-01 16:27:47'),(63,'sys_api_enabled','0','2020-05-01 16:27:47'),(64,'repository_log_display_limit','100','2020-05-01 16:27:47'),(65,'commit_logs_formatting','1','2020-05-01 16:27:47'),(66,'commit_ref_keywords','refs,references,IssueID','2020-05-01 16:27:47'),(67,'commit_cross_project_ref','0','2020-05-01 16:27:47'),(68,'commit_logtime_enabled','0','2020-05-01 16:27:47'),(69,'commit_update_keywords','--- []\n','2020-05-01 16:27:47'),(70,'gravatar_default','','2020-05-02 12:41:07'); /*!40000 ALTER TABLE `settings` ENABLE KEYS */; UNLOCK TABLES; @@ -1371,7 +1371,7 @@ CREATE TABLE `tokens` ( PRIMARY KEY (`id`), UNIQUE KEY `tokens_value` (`value`), KEY `index_tokens_on_user_id` (`user_id`) -) ENGINE=InnoDB AUTO_INCREMENT=62 DEFAULT CHARSET=latin1; +) ENGINE=InnoDB AUTO_INCREMENT=67 DEFAULT CHARSET=latin1; /*!40101 SET character_set_client = @saved_cs_client */; -- @@ -1380,7 +1380,7 @@ CREATE TABLE `tokens` ( LOCK TABLES `tokens` WRITE; /*!40000 ALTER TABLE `tokens` DISABLE KEYS */; -INSERT INTO `tokens` VALUES (3,1,'feeds','6e5575602e1227c188cd85ef6d12608bb8701193','2020-04-26 13:10:46','2020-04-26 13:10:46'),(4,1,'session','999412fa9badda7423c6c654d6364c32c20b3eac','2020-04-26 18:07:03','2020-04-26 18:12:02'),(5,1,'session','124ad4acbf87a942426350e7ad028c1d119c3851','2020-04-26 18:17:11','2020-04-26 18:19:24'),(9,1,'session','2890c663e0552f26ddb92acad6ab3b6d05b92915','2020-04-26 18:51:15','2020-04-26 18:51:15'),(19,1,'session','b7ffb106ea0b34650dd9c1770f74c2b0ffe166b2','2020-05-01 16:52:33','2020-05-01 18:02:30'),(20,1,'session','f44cfcf918eef59ffda47991c431d9c2b2ac6113','2020-05-01 18:05:56','2020-05-01 18:05:56'),(23,9,'feeds','211918c9d7168979b5dc19bebb14573b928a5067','2020-05-01 18:26:17','2020-05-01 18:26:17'),(25,9,'api','de6639318502476f2fa5aa06f43f51fb389a3d7f','2020-05-01 18:26:31','2020-05-01 18:26:31'),(46,1,'session','2d0c8f8ae641c06d8c2362746846440d465d53c0','2020-05-06 20:48:01','2020-05-06 20:48:07'),(59,1,'session','2afe6590653d59a697d1436729c64f322a2eff82','2020-07-01 18:11:07','2020-07-01 20:30:43'),(61,1,'session','b01f95709ca1ab086a049cf9c5afd81ca9d4526e','2020-07-15 16:30:42','2020-07-15 16:31:40'); +INSERT INTO `tokens` VALUES (3,1,'feeds','6e5575602e1227c188cd85ef6d12608bb8701193','2020-04-26 13:10:46','2020-04-26 13:10:46'),(4,1,'session','999412fa9badda7423c6c654d6364c32c20b3eac','2020-04-26 18:07:03','2020-04-26 18:12:02'),(5,1,'session','124ad4acbf87a942426350e7ad028c1d119c3851','2020-04-26 18:17:11','2020-04-26 18:19:24'),(9,1,'session','2890c663e0552f26ddb92acad6ab3b6d05b92915','2020-04-26 18:51:15','2020-04-26 18:51:15'),(19,1,'session','b7ffb106ea0b34650dd9c1770f74c2b0ffe166b2','2020-05-01 16:52:33','2020-05-01 18:02:30'),(20,1,'session','f44cfcf918eef59ffda47991c431d9c2b2ac6113','2020-05-01 18:05:56','2020-05-01 18:05:56'),(23,9,'feeds','211918c9d7168979b5dc19bebb14573b928a5067','2020-05-01 18:26:17','2020-05-01 18:26:17'),(25,9,'api','de6639318502476f2fa5aa06f43f51fb389a3d7f','2020-05-01 18:26:31','2020-05-01 18:26:31'),(46,1,'session','2d0c8f8ae641c06d8c2362746846440d465d53c0','2020-05-06 20:48:01','2020-05-06 20:48:07'),(59,1,'session','2afe6590653d59a697d1436729c64f322a2eff82','2020-07-01 18:11:07','2020-07-01 20:30:43'),(61,1,'session','b01f95709ca1ab086a049cf9c5afd81ca9d4526e','2020-07-15 16:30:42','2020-07-15 16:31:40'),(62,1,'session','d29acdcd0b8e4ebf78ef8f696d3e76df7e2ab2ac','2020-08-17 14:51:59','2020-08-17 14:53:22'); /*!40000 ALTER TABLE `tokens` ENABLE KEYS */; UNLOCK TABLES; @@ -1481,7 +1481,7 @@ CREATE TABLE `users` ( LOCK TABLES `users` WRITE; /*!40000 ALTER TABLE `users` DISABLE KEYS */; -INSERT INTO `users` VALUES (1,'admin','95535e9f7a386c412f20134ebb869c00cf346477','Admin','Admin',1,1,'2020-07-15 16:30:42','',NULL,'2020-04-26 13:08:34','2020-04-26 13:10:45','User',NULL,'all','5ceb2c95ce1593d4ba034d385ceefb2f',0,'2020-04-26 13:10:27'),(2,'','','','Anonymous users',0,1,NULL,'',NULL,'2020-04-26 13:08:38','2020-04-26 13:08:38','GroupAnonymous',NULL,'',NULL,0,NULL),(3,'','','','Non member users',0,1,NULL,'',NULL,'2020-04-26 13:08:38','2020-04-26 13:08:38','GroupNonMember',NULL,'',NULL,0,NULL),(4,'','','','Anonymous',0,0,NULL,'',NULL,'2020-04-26 13:09:44','2020-04-26 13:09:44','AnonymousUser',NULL,'only_my_events',NULL,0,NULL),(5,'','','','Analysts',0,1,NULL,'',NULL,'2020-04-26 18:43:40','2020-04-26 18:43:40','Group',NULL,'',NULL,0,NULL),(6,'','','','Automation',0,1,NULL,'',NULL,'2020-04-26 18:43:47','2020-04-26 18:43:47','Group',NULL,'',NULL,0,NULL),(7,'','','','Admins',0,1,NULL,'',NULL,'2020-04-26 18:43:58','2020-04-26 18:43:58','Group',NULL,'',NULL,0,NULL),(9,'automation','d2e7d78af1f0c0637765ae8cf1a359c4a30034c9','SecOps','Automation',0,1,'2020-05-01 18:26:17','en',NULL,'2020-04-26 18:47:46','2020-05-01 18:26:10','User',NULL,'none','41043e596f70e327e34fc99c861f5b31',0,'2020-05-01 18:26:10'); +INSERT INTO `users` VALUES (1,'admin','95535e9f7a386c412f20134ebb869c00cf346477','Admin','Admin',1,1,'2020-08-17 18:03:20','',NULL,'2020-04-26 13:08:34','2020-04-26 13:10:45','User',NULL,'all','5ceb2c95ce1593d4ba034d385ceefb2f',0,'2020-04-26 13:10:27'),(2,'','','','Anonymous users',0,1,NULL,'',NULL,'2020-04-26 13:08:38','2020-04-26 13:08:38','GroupAnonymous',NULL,'',NULL,0,NULL),(3,'','','','Non member users',0,1,NULL,'',NULL,'2020-04-26 13:08:38','2020-04-26 13:08:38','GroupNonMember',NULL,'',NULL,0,NULL),(4,'','','','Anonymous',0,0,NULL,'',NULL,'2020-04-26 13:09:44','2020-04-26 13:09:44','AnonymousUser',NULL,'only_my_events',NULL,0,NULL),(5,'','','','Analysts',0,1,NULL,'',NULL,'2020-04-26 18:43:40','2020-04-26 18:43:40','Group',NULL,'',NULL,0,NULL),(6,'','','','Automation',0,1,NULL,'',NULL,'2020-04-26 18:43:47','2020-04-26 18:43:47','Group',NULL,'',NULL,0,NULL),(7,'','','','Admins',0,1,NULL,'',NULL,'2020-04-26 18:43:58','2020-04-26 18:43:58','Group',NULL,'',NULL,0,NULL),(9,'automation','d2e7d78af1f0c0637765ae8cf1a359c4a30034c9','SecOps','Automation',0,1,'2020-05-01 18:26:17','en',NULL,'2020-04-26 18:47:46','2020-05-01 18:26:10','User',NULL,'none','41043e596f70e327e34fc99c861f5b31',0,'2020-05-01 18:26:10'); /*!40000 ALTER TABLE `users` ENABLE KEYS */; UNLOCK TABLES; @@ -1567,7 +1567,7 @@ CREATE TABLE `webhooks` ( LOCK TABLES `webhooks` WRITE; /*!40000 ALTER TABLE `webhooks` DISABLE KEYS */; -INSERT INTO `webhooks` VALUES (1,'http://10.66.166.188:7000/playbook/webhook',1); +INSERT INTO `webhooks` VALUES (1,'http://10.66.166.135:7000/playbook/webhook',1); /*!40000 ALTER TABLE `webhooks` ENABLE KEYS */; UNLOCK TABLES; @@ -1742,7 +1742,7 @@ CREATE TABLE `workflows` ( KEY `index_workflows_on_role_id` (`role_id`), KEY `index_workflows_on_new_status_id` (`new_status_id`), KEY `index_workflows_on_tracker_id` (`tracker_id`) -) ENGINE=InnoDB AUTO_INCREMENT=648 DEFAULT CHARSET=latin1; +) ENGINE=InnoDB AUTO_INCREMENT=652 DEFAULT CHARSET=latin1; /*!40101 SET character_set_client = @saved_cs_client */; -- @@ -1751,7 +1751,7 @@ CREATE TABLE `workflows` ( LOCK TABLES `workflows` WRITE; /*!40000 ALTER TABLE `workflows` DISABLE KEYS */; -INSERT INTO `workflows` VALUES (132,1,2,0,3,0,0,'WorkflowPermission','14','readonly'),(134,1,2,0,3,0,0,'WorkflowPermission','16','readonly'),(151,1,3,0,3,0,0,'WorkflowPermission','14','readonly'),(153,1,3,0,3,0,0,'WorkflowPermission','16','readonly'),(170,1,4,0,3,0,0,'WorkflowPermission','14','readonly'),(172,1,4,0,3,0,0,'WorkflowPermission','16','readonly'),(189,1,5,0,3,0,0,'WorkflowPermission','14','readonly'),(191,1,5,0,3,0,0,'WorkflowPermission','16','readonly'),(208,1,6,0,3,0,0,'WorkflowPermission','14','readonly'),(210,1,6,0,3,0,0,'WorkflowPermission','16','readonly'),(220,1,2,3,3,0,0,'WorkflowTransition',NULL,NULL),(221,1,2,3,4,0,0,'WorkflowTransition',NULL,NULL),(222,1,2,3,5,0,0,'WorkflowTransition',NULL,NULL),(223,1,2,6,3,0,0,'WorkflowTransition',NULL,NULL),(224,1,2,6,4,0,0,'WorkflowTransition',NULL,NULL),(225,1,2,6,5,0,0,'WorkflowTransition',NULL,NULL),(226,1,3,4,3,0,0,'WorkflowTransition',NULL,NULL),(227,1,3,4,4,0,0,'WorkflowTransition',NULL,NULL),(228,1,3,4,5,0,0,'WorkflowTransition',NULL,NULL),(229,1,4,5,3,0,0,'WorkflowTransition',NULL,NULL),(230,1,4,5,4,0,0,'WorkflowTransition',NULL,NULL),(231,1,4,5,5,0,0,'WorkflowTransition',NULL,NULL),(232,1,4,6,3,0,0,'WorkflowTransition',NULL,NULL),(233,1,4,6,4,0,0,'WorkflowTransition',NULL,NULL),(234,1,4,6,5,0,0,'WorkflowTransition',NULL,NULL),(235,1,6,3,3,0,0,'WorkflowTransition',NULL,NULL),(236,1,6,3,4,0,0,'WorkflowTransition',NULL,NULL),(237,1,6,3,5,0,0,'WorkflowTransition',NULL,NULL),(239,1,2,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(240,1,3,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(241,1,4,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(242,1,5,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(243,1,6,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(244,1,0,2,5,0,0,'WorkflowTransition',NULL,NULL),(245,1,0,2,4,0,0,'WorkflowTransition',NULL,NULL),(246,1,0,6,5,0,0,'WorkflowTransition',NULL,NULL),(352,1,2,0,3,0,0,'WorkflowPermission','project_id','readonly'),(353,1,2,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(354,1,2,0,3,0,0,'WorkflowPermission','subject','readonly'),(355,1,2,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(356,1,2,0,3,0,0,'WorkflowPermission','is_private','readonly'),(357,1,2,0,3,0,0,'WorkflowPermission','description','readonly'),(358,1,2,0,3,0,0,'WorkflowPermission','1','readonly'),(359,1,2,0,3,0,0,'WorkflowPermission','2','readonly'),(360,1,2,0,3,0,0,'WorkflowPermission','10','readonly'),(361,1,2,0,3,0,0,'WorkflowPermission','20','readonly'),(362,1,2,0,3,0,0,'WorkflowPermission','8','readonly'),(363,1,2,0,3,0,0,'WorkflowPermission','15','readonly'),(364,1,2,0,3,0,0,'WorkflowPermission','11','readonly'),(365,1,2,0,3,0,0,'WorkflowPermission','12','readonly'),(366,1,2,0,3,0,0,'WorkflowPermission','19','readonly'),(367,1,2,0,3,0,0,'WorkflowPermission','7','readonly'),(368,1,2,0,3,0,0,'WorkflowPermission','3','readonly'),(369,1,2,0,3,0,0,'WorkflowPermission','5','readonly'),(370,1,2,0,3,0,0,'WorkflowPermission','6','readonly'),(371,1,2,0,3,0,0,'WorkflowPermission','22','readonly'),(372,1,3,0,3,0,0,'WorkflowPermission','project_id','readonly'),(373,1,3,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(374,1,3,0,3,0,0,'WorkflowPermission','subject','readonly'),(375,1,3,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(376,1,3,0,3,0,0,'WorkflowPermission','is_private','readonly'),(377,1,3,0,3,0,0,'WorkflowPermission','description','readonly'),(378,1,3,0,3,0,0,'WorkflowPermission','1','readonly'),(379,1,3,0,3,0,0,'WorkflowPermission','2','readonly'),(380,1,3,0,3,0,0,'WorkflowPermission','10','readonly'),(381,1,3,0,3,0,0,'WorkflowPermission','20','readonly'),(382,1,3,0,3,0,0,'WorkflowPermission','8','readonly'),(383,1,3,0,3,0,0,'WorkflowPermission','15','readonly'),(384,1,3,0,3,0,0,'WorkflowPermission','11','readonly'),(385,1,3,0,3,0,0,'WorkflowPermission','12','readonly'),(386,1,3,0,3,0,0,'WorkflowPermission','19','readonly'),(387,1,3,0,3,0,0,'WorkflowPermission','7','readonly'),(388,1,3,0,3,0,0,'WorkflowPermission','3','readonly'),(389,1,3,0,3,0,0,'WorkflowPermission','5','readonly'),(390,1,3,0,3,0,0,'WorkflowPermission','6','readonly'),(391,1,3,0,3,0,0,'WorkflowPermission','22','readonly'),(392,1,4,0,3,0,0,'WorkflowPermission','project_id','readonly'),(393,1,4,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(394,1,4,0,3,0,0,'WorkflowPermission','subject','readonly'),(395,1,4,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(396,1,4,0,3,0,0,'WorkflowPermission','is_private','readonly'),(397,1,4,0,3,0,0,'WorkflowPermission','description','readonly'),(398,1,4,0,3,0,0,'WorkflowPermission','1','readonly'),(399,1,4,0,3,0,0,'WorkflowPermission','2','readonly'),(400,1,4,0,3,0,0,'WorkflowPermission','10','readonly'),(401,1,4,0,3,0,0,'WorkflowPermission','20','readonly'),(402,1,4,0,3,0,0,'WorkflowPermission','8','readonly'),(403,1,4,0,3,0,0,'WorkflowPermission','15','readonly'),(404,1,4,0,3,0,0,'WorkflowPermission','11','readonly'),(405,1,4,0,3,0,0,'WorkflowPermission','12','readonly'),(406,1,4,0,3,0,0,'WorkflowPermission','19','readonly'),(407,1,4,0,3,0,0,'WorkflowPermission','7','readonly'),(408,1,4,0,3,0,0,'WorkflowPermission','3','readonly'),(409,1,4,0,3,0,0,'WorkflowPermission','5','readonly'),(410,1,4,0,3,0,0,'WorkflowPermission','6','readonly'),(411,1,4,0,3,0,0,'WorkflowPermission','22','readonly'),(412,1,5,0,3,0,0,'WorkflowPermission','project_id','readonly'),(413,1,5,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(414,1,5,0,3,0,0,'WorkflowPermission','subject','readonly'),(415,1,5,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(416,1,5,0,3,0,0,'WorkflowPermission','is_private','readonly'),(417,1,5,0,3,0,0,'WorkflowPermission','description','readonly'),(418,1,5,0,3,0,0,'WorkflowPermission','1','readonly'),(419,1,5,0,3,0,0,'WorkflowPermission','2','readonly'),(420,1,5,0,3,0,0,'WorkflowPermission','10','readonly'),(421,1,5,0,3,0,0,'WorkflowPermission','20','readonly'),(422,1,5,0,3,0,0,'WorkflowPermission','8','readonly'),(423,1,5,0,3,0,0,'WorkflowPermission','15','readonly'),(424,1,5,0,3,0,0,'WorkflowPermission','11','readonly'),(425,1,5,0,3,0,0,'WorkflowPermission','12','readonly'),(426,1,5,0,3,0,0,'WorkflowPermission','19','readonly'),(427,1,5,0,3,0,0,'WorkflowPermission','7','readonly'),(428,1,5,0,3,0,0,'WorkflowPermission','3','readonly'),(429,1,5,0,3,0,0,'WorkflowPermission','5','readonly'),(430,1,5,0,3,0,0,'WorkflowPermission','6','readonly'),(431,1,5,0,3,0,0,'WorkflowPermission','22','readonly'),(432,1,6,0,3,0,0,'WorkflowPermission','project_id','readonly'),(433,1,6,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(434,1,6,0,3,0,0,'WorkflowPermission','subject','readonly'),(435,1,6,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(436,1,6,0,3,0,0,'WorkflowPermission','is_private','readonly'),(437,1,6,0,3,0,0,'WorkflowPermission','description','readonly'),(438,1,6,0,3,0,0,'WorkflowPermission','1','readonly'),(439,1,6,0,3,0,0,'WorkflowPermission','2','readonly'),(440,1,6,0,3,0,0,'WorkflowPermission','10','readonly'),(441,1,6,0,3,0,0,'WorkflowPermission','20','readonly'),(442,1,6,0,3,0,0,'WorkflowPermission','8','readonly'),(443,1,6,0,3,0,0,'WorkflowPermission','15','readonly'),(444,1,6,0,3,0,0,'WorkflowPermission','11','readonly'),(445,1,6,0,3,0,0,'WorkflowPermission','12','readonly'),(446,1,6,0,3,0,0,'WorkflowPermission','19','readonly'),(447,1,6,0,3,0,0,'WorkflowPermission','7','readonly'),(448,1,6,0,3,0,0,'WorkflowPermission','3','readonly'),(449,1,6,0,3,0,0,'WorkflowPermission','5','readonly'),(450,1,6,0,3,0,0,'WorkflowPermission','6','readonly'),(451,1,6,0,3,0,0,'WorkflowPermission','22','readonly'),(537,1,2,0,2,0,0,'WorkflowPermission','project_id','readonly'),(538,1,2,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(539,1,2,0,2,0,0,'WorkflowPermission','subject','readonly'),(540,1,2,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(541,1,2,0,2,0,0,'WorkflowPermission','is_private','readonly'),(542,1,2,0,2,0,0,'WorkflowPermission','description','readonly'),(543,1,2,0,2,0,0,'WorkflowPermission','1','readonly'),(544,1,2,0,2,0,0,'WorkflowPermission','2','readonly'),(545,1,2,0,2,0,0,'WorkflowPermission','10','readonly'),(546,1,2,0,2,0,0,'WorkflowPermission','20','readonly'),(547,1,2,0,2,0,0,'WorkflowPermission','8','readonly'),(548,1,2,0,2,0,0,'WorkflowPermission','15','readonly'),(549,1,2,0,2,0,0,'WorkflowPermission','11','readonly'),(550,1,2,0,2,0,0,'WorkflowPermission','12','readonly'),(551,1,2,0,2,0,0,'WorkflowPermission','19','readonly'),(552,1,2,0,2,0,0,'WorkflowPermission','17','readonly'),(553,1,2,0,2,0,0,'WorkflowPermission','7','readonly'),(554,1,2,0,2,0,0,'WorkflowPermission','3','readonly'),(555,1,2,0,2,0,0,'WorkflowPermission','5','readonly'),(556,1,2,0,2,0,0,'WorkflowPermission','6','readonly'),(557,1,2,0,2,0,0,'WorkflowPermission','22','readonly'),(558,1,3,0,2,0,0,'WorkflowPermission','project_id','readonly'),(559,1,3,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(560,1,3,0,2,0,0,'WorkflowPermission','subject','readonly'),(561,1,3,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(562,1,3,0,2,0,0,'WorkflowPermission','is_private','readonly'),(563,1,3,0,2,0,0,'WorkflowPermission','description','readonly'),(564,1,3,0,2,0,0,'WorkflowPermission','1','readonly'),(565,1,3,0,2,0,0,'WorkflowPermission','2','readonly'),(566,1,3,0,2,0,0,'WorkflowPermission','10','readonly'),(567,1,3,0,2,0,0,'WorkflowPermission','20','readonly'),(568,1,3,0,2,0,0,'WorkflowPermission','8','readonly'),(569,1,3,0,2,0,0,'WorkflowPermission','15','readonly'),(570,1,3,0,2,0,0,'WorkflowPermission','11','readonly'),(571,1,3,0,2,0,0,'WorkflowPermission','12','readonly'),(572,1,3,0,2,0,0,'WorkflowPermission','19','readonly'),(573,1,3,0,2,0,0,'WorkflowPermission','17','readonly'),(574,1,3,0,2,0,0,'WorkflowPermission','7','readonly'),(575,1,3,0,2,0,0,'WorkflowPermission','3','readonly'),(576,1,3,0,2,0,0,'WorkflowPermission','5','readonly'),(577,1,3,0,2,0,0,'WorkflowPermission','6','readonly'),(578,1,3,0,2,0,0,'WorkflowPermission','22','readonly'),(579,1,4,0,2,0,0,'WorkflowPermission','project_id','readonly'),(580,1,4,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(581,1,4,0,2,0,0,'WorkflowPermission','subject','readonly'),(582,1,4,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(583,1,4,0,2,0,0,'WorkflowPermission','is_private','readonly'),(584,1,4,0,2,0,0,'WorkflowPermission','description','readonly'),(585,1,4,0,2,0,0,'WorkflowPermission','1','readonly'),(586,1,4,0,2,0,0,'WorkflowPermission','2','readonly'),(587,1,4,0,2,0,0,'WorkflowPermission','10','readonly'),(588,1,4,0,2,0,0,'WorkflowPermission','20','readonly'),(589,1,4,0,2,0,0,'WorkflowPermission','8','readonly'),(590,1,4,0,2,0,0,'WorkflowPermission','15','readonly'),(591,1,4,0,2,0,0,'WorkflowPermission','11','readonly'),(592,1,4,0,2,0,0,'WorkflowPermission','12','readonly'),(593,1,4,0,2,0,0,'WorkflowPermission','19','readonly'),(594,1,4,0,2,0,0,'WorkflowPermission','17','readonly'),(595,1,4,0,2,0,0,'WorkflowPermission','7','readonly'),(596,1,4,0,2,0,0,'WorkflowPermission','3','readonly'),(597,1,4,0,2,0,0,'WorkflowPermission','5','readonly'),(598,1,4,0,2,0,0,'WorkflowPermission','6','readonly'),(599,1,4,0,2,0,0,'WorkflowPermission','22','readonly'),(600,1,5,0,2,0,0,'WorkflowPermission','project_id','readonly'),(601,1,5,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(602,1,5,0,2,0,0,'WorkflowPermission','subject','readonly'),(603,1,5,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(604,1,5,0,2,0,0,'WorkflowPermission','is_private','readonly'),(605,1,5,0,2,0,0,'WorkflowPermission','description','readonly'),(606,1,5,0,2,0,0,'WorkflowPermission','1','readonly'),(607,1,5,0,2,0,0,'WorkflowPermission','2','readonly'),(608,1,5,0,2,0,0,'WorkflowPermission','10','readonly'),(609,1,5,0,2,0,0,'WorkflowPermission','20','readonly'),(610,1,5,0,2,0,0,'WorkflowPermission','8','readonly'),(611,1,5,0,2,0,0,'WorkflowPermission','15','readonly'),(612,1,5,0,2,0,0,'WorkflowPermission','11','readonly'),(613,1,5,0,2,0,0,'WorkflowPermission','12','readonly'),(614,1,5,0,2,0,0,'WorkflowPermission','19','readonly'),(615,1,5,0,2,0,0,'WorkflowPermission','17','readonly'),(616,1,5,0,2,0,0,'WorkflowPermission','7','readonly'),(617,1,5,0,2,0,0,'WorkflowPermission','3','readonly'),(618,1,5,0,2,0,0,'WorkflowPermission','5','readonly'),(619,1,5,0,2,0,0,'WorkflowPermission','6','readonly'),(620,1,5,0,2,0,0,'WorkflowPermission','22','readonly'),(621,1,6,0,2,0,0,'WorkflowPermission','project_id','readonly'),(622,1,6,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(623,1,6,0,2,0,0,'WorkflowPermission','subject','readonly'),(624,1,6,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(625,1,6,0,2,0,0,'WorkflowPermission','is_private','readonly'),(626,1,6,0,2,0,0,'WorkflowPermission','description','readonly'),(627,1,6,0,2,0,0,'WorkflowPermission','1','readonly'),(628,1,6,0,2,0,0,'WorkflowPermission','2','readonly'),(629,1,6,0,2,0,0,'WorkflowPermission','10','readonly'),(630,1,6,0,2,0,0,'WorkflowPermission','20','readonly'),(631,1,6,0,2,0,0,'WorkflowPermission','8','readonly'),(632,1,6,0,2,0,0,'WorkflowPermission','15','readonly'),(633,1,6,0,2,0,0,'WorkflowPermission','11','readonly'),(634,1,6,0,2,0,0,'WorkflowPermission','12','readonly'),(635,1,6,0,2,0,0,'WorkflowPermission','19','readonly'),(636,1,6,0,2,0,0,'WorkflowPermission','17','readonly'),(637,1,6,0,2,0,0,'WorkflowPermission','7','readonly'),(638,1,6,0,2,0,0,'WorkflowPermission','3','readonly'),(639,1,6,0,2,0,0,'WorkflowPermission','5','readonly'),(640,1,6,0,2,0,0,'WorkflowPermission','6','readonly'),(641,1,6,0,2,0,0,'WorkflowPermission','22','readonly'),(642,1,2,3,2,0,0,'WorkflowTransition',NULL,NULL),(643,1,2,6,2,0,0,'WorkflowTransition',NULL,NULL),(644,1,3,4,2,0,0,'WorkflowTransition',NULL,NULL),(645,1,4,5,2,0,0,'WorkflowTransition',NULL,NULL),(646,1,4,6,2,0,0,'WorkflowTransition',NULL,NULL),(647,1,6,3,2,0,0,'WorkflowTransition',NULL,NULL); +INSERT INTO `workflows` VALUES (132,1,2,0,3,0,0,'WorkflowPermission','14','readonly'),(134,1,2,0,3,0,0,'WorkflowPermission','16','readonly'),(151,1,3,0,3,0,0,'WorkflowPermission','14','readonly'),(153,1,3,0,3,0,0,'WorkflowPermission','16','readonly'),(170,1,4,0,3,0,0,'WorkflowPermission','14','readonly'),(172,1,4,0,3,0,0,'WorkflowPermission','16','readonly'),(189,1,5,0,3,0,0,'WorkflowPermission','14','readonly'),(191,1,5,0,3,0,0,'WorkflowPermission','16','readonly'),(208,1,6,0,3,0,0,'WorkflowPermission','14','readonly'),(210,1,6,0,3,0,0,'WorkflowPermission','16','readonly'),(220,1,2,3,3,0,0,'WorkflowTransition',NULL,NULL),(221,1,2,3,4,0,0,'WorkflowTransition',NULL,NULL),(222,1,2,3,5,0,0,'WorkflowTransition',NULL,NULL),(226,1,3,4,3,0,0,'WorkflowTransition',NULL,NULL),(227,1,3,4,4,0,0,'WorkflowTransition',NULL,NULL),(228,1,3,4,5,0,0,'WorkflowTransition',NULL,NULL),(229,1,4,5,3,0,0,'WorkflowTransition',NULL,NULL),(230,1,4,5,4,0,0,'WorkflowTransition',NULL,NULL),(231,1,4,5,5,0,0,'WorkflowTransition',NULL,NULL),(232,1,4,6,3,0,0,'WorkflowTransition',NULL,NULL),(233,1,4,6,4,0,0,'WorkflowTransition',NULL,NULL),(234,1,4,6,5,0,0,'WorkflowTransition',NULL,NULL),(239,1,2,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(240,1,3,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(241,1,4,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(242,1,5,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(243,1,6,0,4,0,0,'WorkflowPermission','priority_id','readonly'),(244,1,0,2,5,0,0,'WorkflowTransition',NULL,NULL),(245,1,0,2,4,0,0,'WorkflowTransition',NULL,NULL),(246,1,0,6,5,0,0,'WorkflowTransition',NULL,NULL),(352,1,2,0,3,0,0,'WorkflowPermission','project_id','readonly'),(353,1,2,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(354,1,2,0,3,0,0,'WorkflowPermission','subject','readonly'),(355,1,2,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(356,1,2,0,3,0,0,'WorkflowPermission','is_private','readonly'),(357,1,2,0,3,0,0,'WorkflowPermission','description','readonly'),(358,1,2,0,3,0,0,'WorkflowPermission','1','readonly'),(359,1,2,0,3,0,0,'WorkflowPermission','2','readonly'),(360,1,2,0,3,0,0,'WorkflowPermission','10','readonly'),(361,1,2,0,3,0,0,'WorkflowPermission','20','readonly'),(362,1,2,0,3,0,0,'WorkflowPermission','8','readonly'),(363,1,2,0,3,0,0,'WorkflowPermission','15','readonly'),(364,1,2,0,3,0,0,'WorkflowPermission','11','readonly'),(365,1,2,0,3,0,0,'WorkflowPermission','12','readonly'),(366,1,2,0,3,0,0,'WorkflowPermission','19','readonly'),(367,1,2,0,3,0,0,'WorkflowPermission','7','readonly'),(368,1,2,0,3,0,0,'WorkflowPermission','3','readonly'),(369,1,2,0,3,0,0,'WorkflowPermission','5','readonly'),(370,1,2,0,3,0,0,'WorkflowPermission','6','readonly'),(371,1,2,0,3,0,0,'WorkflowPermission','22','readonly'),(372,1,3,0,3,0,0,'WorkflowPermission','project_id','readonly'),(373,1,3,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(374,1,3,0,3,0,0,'WorkflowPermission','subject','readonly'),(375,1,3,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(376,1,3,0,3,0,0,'WorkflowPermission','is_private','readonly'),(377,1,3,0,3,0,0,'WorkflowPermission','description','readonly'),(378,1,3,0,3,0,0,'WorkflowPermission','1','readonly'),(379,1,3,0,3,0,0,'WorkflowPermission','2','readonly'),(380,1,3,0,3,0,0,'WorkflowPermission','10','readonly'),(381,1,3,0,3,0,0,'WorkflowPermission','20','readonly'),(382,1,3,0,3,0,0,'WorkflowPermission','8','readonly'),(383,1,3,0,3,0,0,'WorkflowPermission','15','readonly'),(384,1,3,0,3,0,0,'WorkflowPermission','11','readonly'),(385,1,3,0,3,0,0,'WorkflowPermission','12','readonly'),(386,1,3,0,3,0,0,'WorkflowPermission','19','readonly'),(387,1,3,0,3,0,0,'WorkflowPermission','7','readonly'),(388,1,3,0,3,0,0,'WorkflowPermission','3','readonly'),(389,1,3,0,3,0,0,'WorkflowPermission','5','readonly'),(390,1,3,0,3,0,0,'WorkflowPermission','6','readonly'),(391,1,3,0,3,0,0,'WorkflowPermission','22','readonly'),(392,1,4,0,3,0,0,'WorkflowPermission','project_id','readonly'),(393,1,4,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(394,1,4,0,3,0,0,'WorkflowPermission','subject','readonly'),(395,1,4,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(396,1,4,0,3,0,0,'WorkflowPermission','is_private','readonly'),(397,1,4,0,3,0,0,'WorkflowPermission','description','readonly'),(398,1,4,0,3,0,0,'WorkflowPermission','1','readonly'),(399,1,4,0,3,0,0,'WorkflowPermission','2','readonly'),(400,1,4,0,3,0,0,'WorkflowPermission','10','readonly'),(401,1,4,0,3,0,0,'WorkflowPermission','20','readonly'),(402,1,4,0,3,0,0,'WorkflowPermission','8','readonly'),(403,1,4,0,3,0,0,'WorkflowPermission','15','readonly'),(404,1,4,0,3,0,0,'WorkflowPermission','11','readonly'),(405,1,4,0,3,0,0,'WorkflowPermission','12','readonly'),(406,1,4,0,3,0,0,'WorkflowPermission','19','readonly'),(407,1,4,0,3,0,0,'WorkflowPermission','7','readonly'),(408,1,4,0,3,0,0,'WorkflowPermission','3','readonly'),(409,1,4,0,3,0,0,'WorkflowPermission','5','readonly'),(410,1,4,0,3,0,0,'WorkflowPermission','6','readonly'),(411,1,4,0,3,0,0,'WorkflowPermission','22','readonly'),(412,1,5,0,3,0,0,'WorkflowPermission','project_id','readonly'),(413,1,5,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(414,1,5,0,3,0,0,'WorkflowPermission','subject','readonly'),(415,1,5,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(416,1,5,0,3,0,0,'WorkflowPermission','is_private','readonly'),(417,1,5,0,3,0,0,'WorkflowPermission','description','readonly'),(418,1,5,0,3,0,0,'WorkflowPermission','1','readonly'),(419,1,5,0,3,0,0,'WorkflowPermission','2','readonly'),(420,1,5,0,3,0,0,'WorkflowPermission','10','readonly'),(421,1,5,0,3,0,0,'WorkflowPermission','20','readonly'),(422,1,5,0,3,0,0,'WorkflowPermission','8','readonly'),(423,1,5,0,3,0,0,'WorkflowPermission','15','readonly'),(424,1,5,0,3,0,0,'WorkflowPermission','11','readonly'),(425,1,5,0,3,0,0,'WorkflowPermission','12','readonly'),(426,1,5,0,3,0,0,'WorkflowPermission','19','readonly'),(427,1,5,0,3,0,0,'WorkflowPermission','7','readonly'),(428,1,5,0,3,0,0,'WorkflowPermission','3','readonly'),(429,1,5,0,3,0,0,'WorkflowPermission','5','readonly'),(430,1,5,0,3,0,0,'WorkflowPermission','6','readonly'),(431,1,5,0,3,0,0,'WorkflowPermission','22','readonly'),(432,1,6,0,3,0,0,'WorkflowPermission','project_id','readonly'),(433,1,6,0,3,0,0,'WorkflowPermission','tracker_id','readonly'),(434,1,6,0,3,0,0,'WorkflowPermission','subject','readonly'),(435,1,6,0,3,0,0,'WorkflowPermission','priority_id','readonly'),(436,1,6,0,3,0,0,'WorkflowPermission','is_private','readonly'),(437,1,6,0,3,0,0,'WorkflowPermission','description','readonly'),(438,1,6,0,3,0,0,'WorkflowPermission','1','readonly'),(439,1,6,0,3,0,0,'WorkflowPermission','2','readonly'),(440,1,6,0,3,0,0,'WorkflowPermission','10','readonly'),(441,1,6,0,3,0,0,'WorkflowPermission','20','readonly'),(442,1,6,0,3,0,0,'WorkflowPermission','8','readonly'),(443,1,6,0,3,0,0,'WorkflowPermission','15','readonly'),(444,1,6,0,3,0,0,'WorkflowPermission','11','readonly'),(445,1,6,0,3,0,0,'WorkflowPermission','12','readonly'),(446,1,6,0,3,0,0,'WorkflowPermission','19','readonly'),(447,1,6,0,3,0,0,'WorkflowPermission','7','readonly'),(448,1,6,0,3,0,0,'WorkflowPermission','3','readonly'),(449,1,6,0,3,0,0,'WorkflowPermission','5','readonly'),(450,1,6,0,3,0,0,'WorkflowPermission','6','readonly'),(451,1,6,0,3,0,0,'WorkflowPermission','22','readonly'),(537,1,2,0,2,0,0,'WorkflowPermission','project_id','readonly'),(538,1,2,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(539,1,2,0,2,0,0,'WorkflowPermission','subject','readonly'),(540,1,2,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(541,1,2,0,2,0,0,'WorkflowPermission','is_private','readonly'),(542,1,2,0,2,0,0,'WorkflowPermission','description','readonly'),(543,1,2,0,2,0,0,'WorkflowPermission','1','readonly'),(544,1,2,0,2,0,0,'WorkflowPermission','2','readonly'),(545,1,2,0,2,0,0,'WorkflowPermission','10','readonly'),(546,1,2,0,2,0,0,'WorkflowPermission','20','readonly'),(547,1,2,0,2,0,0,'WorkflowPermission','8','readonly'),(548,1,2,0,2,0,0,'WorkflowPermission','15','readonly'),(549,1,2,0,2,0,0,'WorkflowPermission','11','readonly'),(550,1,2,0,2,0,0,'WorkflowPermission','12','readonly'),(551,1,2,0,2,0,0,'WorkflowPermission','19','readonly'),(552,1,2,0,2,0,0,'WorkflowPermission','17','readonly'),(553,1,2,0,2,0,0,'WorkflowPermission','7','readonly'),(554,1,2,0,2,0,0,'WorkflowPermission','3','readonly'),(555,1,2,0,2,0,0,'WorkflowPermission','5','readonly'),(556,1,2,0,2,0,0,'WorkflowPermission','6','readonly'),(557,1,2,0,2,0,0,'WorkflowPermission','22','readonly'),(558,1,3,0,2,0,0,'WorkflowPermission','project_id','readonly'),(559,1,3,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(560,1,3,0,2,0,0,'WorkflowPermission','subject','readonly'),(561,1,3,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(562,1,3,0,2,0,0,'WorkflowPermission','is_private','readonly'),(563,1,3,0,2,0,0,'WorkflowPermission','description','readonly'),(564,1,3,0,2,0,0,'WorkflowPermission','1','readonly'),(565,1,3,0,2,0,0,'WorkflowPermission','2','readonly'),(566,1,3,0,2,0,0,'WorkflowPermission','10','readonly'),(567,1,3,0,2,0,0,'WorkflowPermission','20','readonly'),(568,1,3,0,2,0,0,'WorkflowPermission','8','readonly'),(569,1,3,0,2,0,0,'WorkflowPermission','15','readonly'),(570,1,3,0,2,0,0,'WorkflowPermission','11','readonly'),(571,1,3,0,2,0,0,'WorkflowPermission','12','readonly'),(572,1,3,0,2,0,0,'WorkflowPermission','19','readonly'),(573,1,3,0,2,0,0,'WorkflowPermission','17','readonly'),(574,1,3,0,2,0,0,'WorkflowPermission','7','readonly'),(575,1,3,0,2,0,0,'WorkflowPermission','3','readonly'),(576,1,3,0,2,0,0,'WorkflowPermission','5','readonly'),(577,1,3,0,2,0,0,'WorkflowPermission','6','readonly'),(578,1,3,0,2,0,0,'WorkflowPermission','22','readonly'),(579,1,4,0,2,0,0,'WorkflowPermission','project_id','readonly'),(580,1,4,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(581,1,4,0,2,0,0,'WorkflowPermission','subject','readonly'),(582,1,4,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(583,1,4,0,2,0,0,'WorkflowPermission','is_private','readonly'),(584,1,4,0,2,0,0,'WorkflowPermission','description','readonly'),(585,1,4,0,2,0,0,'WorkflowPermission','1','readonly'),(586,1,4,0,2,0,0,'WorkflowPermission','2','readonly'),(587,1,4,0,2,0,0,'WorkflowPermission','10','readonly'),(588,1,4,0,2,0,0,'WorkflowPermission','20','readonly'),(589,1,4,0,2,0,0,'WorkflowPermission','8','readonly'),(590,1,4,0,2,0,0,'WorkflowPermission','15','readonly'),(591,1,4,0,2,0,0,'WorkflowPermission','11','readonly'),(592,1,4,0,2,0,0,'WorkflowPermission','12','readonly'),(593,1,4,0,2,0,0,'WorkflowPermission','19','readonly'),(594,1,4,0,2,0,0,'WorkflowPermission','17','readonly'),(595,1,4,0,2,0,0,'WorkflowPermission','7','readonly'),(596,1,4,0,2,0,0,'WorkflowPermission','3','readonly'),(597,1,4,0,2,0,0,'WorkflowPermission','5','readonly'),(598,1,4,0,2,0,0,'WorkflowPermission','6','readonly'),(599,1,4,0,2,0,0,'WorkflowPermission','22','readonly'),(600,1,5,0,2,0,0,'WorkflowPermission','project_id','readonly'),(601,1,5,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(602,1,5,0,2,0,0,'WorkflowPermission','subject','readonly'),(603,1,5,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(604,1,5,0,2,0,0,'WorkflowPermission','is_private','readonly'),(605,1,5,0,2,0,0,'WorkflowPermission','description','readonly'),(606,1,5,0,2,0,0,'WorkflowPermission','1','readonly'),(607,1,5,0,2,0,0,'WorkflowPermission','2','readonly'),(608,1,5,0,2,0,0,'WorkflowPermission','10','readonly'),(609,1,5,0,2,0,0,'WorkflowPermission','20','readonly'),(610,1,5,0,2,0,0,'WorkflowPermission','8','readonly'),(611,1,5,0,2,0,0,'WorkflowPermission','15','readonly'),(612,1,5,0,2,0,0,'WorkflowPermission','11','readonly'),(613,1,5,0,2,0,0,'WorkflowPermission','12','readonly'),(614,1,5,0,2,0,0,'WorkflowPermission','19','readonly'),(615,1,5,0,2,0,0,'WorkflowPermission','17','readonly'),(616,1,5,0,2,0,0,'WorkflowPermission','7','readonly'),(617,1,5,0,2,0,0,'WorkflowPermission','3','readonly'),(618,1,5,0,2,0,0,'WorkflowPermission','5','readonly'),(619,1,5,0,2,0,0,'WorkflowPermission','6','readonly'),(620,1,5,0,2,0,0,'WorkflowPermission','22','readonly'),(621,1,6,0,2,0,0,'WorkflowPermission','project_id','readonly'),(622,1,6,0,2,0,0,'WorkflowPermission','tracker_id','readonly'),(623,1,6,0,2,0,0,'WorkflowPermission','subject','readonly'),(624,1,6,0,2,0,0,'WorkflowPermission','priority_id','readonly'),(625,1,6,0,2,0,0,'WorkflowPermission','is_private','readonly'),(626,1,6,0,2,0,0,'WorkflowPermission','description','readonly'),(627,1,6,0,2,0,0,'WorkflowPermission','1','readonly'),(628,1,6,0,2,0,0,'WorkflowPermission','2','readonly'),(629,1,6,0,2,0,0,'WorkflowPermission','10','readonly'),(630,1,6,0,2,0,0,'WorkflowPermission','20','readonly'),(631,1,6,0,2,0,0,'WorkflowPermission','8','readonly'),(632,1,6,0,2,0,0,'WorkflowPermission','15','readonly'),(633,1,6,0,2,0,0,'WorkflowPermission','11','readonly'),(634,1,6,0,2,0,0,'WorkflowPermission','12','readonly'),(635,1,6,0,2,0,0,'WorkflowPermission','19','readonly'),(636,1,6,0,2,0,0,'WorkflowPermission','17','readonly'),(637,1,6,0,2,0,0,'WorkflowPermission','7','readonly'),(638,1,6,0,2,0,0,'WorkflowPermission','3','readonly'),(639,1,6,0,2,0,0,'WorkflowPermission','5','readonly'),(640,1,6,0,2,0,0,'WorkflowPermission','6','readonly'),(641,1,6,0,2,0,0,'WorkflowPermission','22','readonly'),(642,1,2,3,2,0,0,'WorkflowTransition',NULL,NULL),(644,1,3,4,2,0,0,'WorkflowTransition',NULL,NULL),(645,1,4,5,2,0,0,'WorkflowTransition',NULL,NULL),(646,1,4,6,2,0,0,'WorkflowTransition',NULL,NULL),(648,1,4,3,2,0,0,'WorkflowTransition',NULL,NULL),(649,1,4,3,3,0,0,'WorkflowTransition',NULL,NULL),(650,1,4,3,4,0,0,'WorkflowTransition',NULL,NULL),(651,1,4,3,5,0,0,'WorkflowTransition',NULL,NULL); /*!40000 ALTER TABLE `workflows` ENABLE KEYS */; UNLOCK TABLES; /*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; @@ -1764,4 +1764,4 @@ UNLOCK TABLES; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; --- Dump completed on 2020-07-15 16:33:41 +-- Dump completed on 2020-08-17 18:06:56 diff --git a/salt/playbook/init.sls b/salt/playbook/init.sls index 44b806f9a..d390a36fb 100644 --- a/salt/playbook/init.sls +++ b/salt/playbook/init.sls @@ -1,6 +1,6 @@ {% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set MAINIP = salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] %} {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%} diff --git a/salt/reactor/fleet.sls b/salt/reactor/fleet.sls index 177dabf3a..4e4e13791 100644 --- a/salt/reactor/fleet.sls +++ b/salt/reactor/fleet.sls @@ -10,7 +10,7 @@ def run(): MINIONID = data['id'] ACTION = data['data']['action'] LOCAL_SALT_DIR = "/opt/so/saltstack/local" - STATICFILE = f"{LOCAL_SALT_DIR}/pillar/static.sls" + STATICFILE = f"{LOCAL_SALT_DIR}/pillar/global.sls" SECRETSFILE = f"{LOCAL_SALT_DIR}/pillar/secrets.sls" if MINIONID.split('_')[-1] in ['manager','eval','fleet','managersearch','standalone']: diff --git a/salt/redis/etc/redis.conf b/salt/redis/etc/redis.conf index d5f39da99..7679a789e 100644 --- a/salt/redis/etc/redis.conf +++ b/salt/redis/etc/redis.conf @@ -59,7 +59,7 @@ # internet, binding to all the interfaces is dangerous and will expose the # instance to everybody on the internet. So by default we uncomment the # following bind directive, that will force Redis to listen only into -# the IPv4 lookback interface address (this means Redis will be able to +# the IPv4 loopback interface address (this means Redis will be able to # accept connections only from clients running into the same computer it # is running). # @@ -86,6 +86,11 @@ bind 0.0.0.0 # even if no authentication is configured, nor a specific set of interfaces # are explicitly listed using the "bind" directive. protected-mode no +tls-cert-file /certs/redis.crt +tls-key-file /certs/redis.key +tls-ca-cert-file /certs/ca.crt +tls-port 9696 +tls-auth-clients no # Accept connections on the specified port, default is 6379 (IANA #815344). # If port 0 is specified Redis will not listen on a TCP socket. @@ -129,6 +134,92 @@ timeout 0 # Redis default starting with Redis 3.2.1. tcp-keepalive 300 +################################# TLS/SSL ##################################### + +# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration +# directive can be used to define TLS-listening ports. To enable TLS on the +# default port, use: +# +# port 0 +# tls-port 6379 + +# Configure a X.509 certificate and private key to use for authenticating the +# server to connected clients, masters or cluster peers. These files should be +# PEM formatted. +# +# tls-cert-file redis.crt +# tls-key-file redis.key + +# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange: +# +# tls-dh-params-file redis.dh + +# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL +# clients and peers. Redis requires an explicit configuration of at least one +# of these, and will not implicitly use the system wide configuration. +# +# tls-ca-cert-file ca.crt +# tls-ca-cert-dir /etc/ssl/certs + +# By default, clients (including replica servers) on a TLS port are required +# to authenticate using valid client side certificates. +# +# It is possible to disable authentication using this directive. +# +# tls-auth-clients no + +# By default, a Redis replica does not attempt to establish a TLS connection +# with its master. +# +# Use the following directive to enable TLS on replication links. +# +# tls-replication yes + +# By default, the Redis Cluster bus uses a plain TCP connection. To enable +# TLS for the bus protocol, use the following directive: +# +# tls-cluster yes + +# Explicitly specify TLS versions to support. Allowed values are case insensitive +# and include "TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3" (OpenSSL >= 1.1.1) or +# any combination. To enable only TLSv1.2 and TLSv1.3, use: +# +# tls-protocols "TLSv1.2 TLSv1.3" + +# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information +# about the syntax of this string. +# +# Note: this configuration applies only to <= TLSv1.2. +# +# tls-ciphers DEFAULT:!MEDIUM + +# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more +# information about the syntax of this string, and specifically for TLSv1.3 +# ciphersuites. +# +# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 + +# When choosing a cipher, use the server's preference instead of the client +# preference. By default, the server follows the client's preference. +# +# tls-prefer-server-ciphers yes + +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + ################################# GENERAL ##################################### # By default Redis does not run as a daemon. Use 'yes' if you need it. @@ -252,6 +343,19 @@ rdbchecksum yes # The filename where to dump the DB dbfilename dump.rdb +# Remove RDB files used by replication in instances without persistence +# enabled. By default this option is disabled, however there are environments +# where for regulations or other security concerns, RDB files persisted on +# disk by masters in order to feed replicas, or stored on disk by replicas +# in order to load them for the initial synchronization, should be deleted +# ASAP. Note that this option ONLY WORKS in instances that have both AOF +# and RDB persistence disabled, otherwise is completely ignored. +# +# An alternative (and sometimes better) way to obtain the same effect is +# to use diskless replication on both master and replicas instances. However +# in the case of replicas, diskless is not always an option. +rdb-del-sync-files no + # The working directory. # # The DB will be written inside this directory, with the filename specified @@ -260,88 +364,104 @@ dbfilename dump.rdb # The Append Only File will also be created inside this directory. # # Note that you must specify a directory here, not a file name. -dir /redis +dir ./ ################################# REPLICATION ################################# -# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# Master-Replica replication. Use replicaof to make a Redis instance a copy of # another Redis server. A few things to understand ASAP about Redis replication. # +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# # 1) Redis replication is asynchronous, but you can configure a master to # stop accepting writes if it appears to be not connected with at least -# a given number of slaves. -# 2) Redis slaves are able to perform a partial resynchronization with the +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the # master if the replication link is lost for a relatively small amount of # time. You may want to configure the replication backlog size (see the next # sections of this file) with a sensible value depending on your needs. # 3) Replication is automatic and does not need user intervention. After a -# network partition slaves automatically try to reconnect to masters +# network partition replicas automatically try to reconnect to masters # and resynchronize with them. # -# slaveof +# replicaof # If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before +# directive below) it is possible to tell the replica to authenticate before # starting the replication synchronization process, otherwise the master will -# refuse the slave request. +# refuse the replica request. # # masterauth - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: # -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# However this is not enough if you are using Redis ACLs (for Redis version +# 6 or greater), and the default user is not capable of running the PSYNC +# command and/or other commands needed for replication. In this case it's +# better to configure a special user to use with replication, and specify the +# masteruser configuration as such: +# +# masteruser +# +# When masteruser is specified, the replica will authenticate against its +# master using the new AUTH form: AUTH . + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will # still reply to client requests, possibly with out of date data, or the # data set may just be empty if this is the first synchronization. # -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# 2) if replica-serve-stale-data is set to 'no' the replica will reply with # an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. +# but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, +# SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, +# COMMAND, POST, HOST: and LATENCY. # -slave-serve-stale-data yes +replica-serve-stale-data yes -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but # may also cause problems if clients are writing to it because of a # misconfiguration. # -# Since Redis 2.6 by default slaves are read-only. +# Since Redis 2.6 by default replicas are read-only. # -# Note: read only slaves are not designed to be exposed to untrusted clients +# Note: read only replicas are not designed to be exposed to untrusted clients # on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands +# Still a read only replica exports by default all the administrative commands # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve -# security of read only slaves using 'rename-command' to shadow all the +# security of read only replicas using 'rename-command' to shadow all the # administrative / dangerous commands. -slave-read-only yes +replica-read-only yes # Replication SYNC strategy: disk or socket. # -# ------------------------------------------------------- -# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY -# ------------------------------------------------------- +# New replicas and reconnecting replicas that are not able to continue the +# replication process just receiving differences, need to do what is called a +# "full synchronization". An RDB file is transmitted from the master to the +# replicas. # -# New slaves and reconnecting slaves that are not able to continue the replication -# process just receiving differences, need to do what is called a "full -# synchronization". An RDB file is transmitted from the master to the slaves. # The transmission can happen in two different ways: # # 1) Disk-backed: The Redis master creates a new process that writes the RDB # file on disk. Later the file is transferred by the parent -# process to the slaves incrementally. +# process to the replicas incrementally. # 2) Diskless: The Redis master creates a new process that directly writes the -# RDB file to slave sockets, without touching the disk at all. +# RDB file to replica sockets, without touching the disk at all. # -# With disk-backed replication, while the RDB file is generated, more slaves -# can be queued and served with the RDB file as soon as the current child producing -# the RDB file finishes its work. With diskless replication instead once -# the transfer starts, new slaves arriving will be queued and a new transfer -# will start when the current one terminates. +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child +# producing the RDB file finishes its work. With diskless replication instead +# once the transfer starts, new replicas arriving will be queued and a new +# transfer will start when the current one terminates. # # When diskless replication is used, the master waits a configurable amount of -# time (in seconds) before starting the transfer in the hope that multiple slaves -# will arrive and the transfer can be parallelized. +# time (in seconds) before starting the transfer in the hope that multiple +# replicas will arrive and the transfer can be parallelized. # # With slow disks and fast (large bandwidth) networks, diskless replication # works better. @@ -349,157 +469,334 @@ repl-diskless-sync no # When diskless replication is enabled, it is possible to configure the delay # the server waits in order to spawn the child that transfers the RDB via socket -# to the slaves. +# to the replicas. # # This is important since once the transfer starts, it is not possible to serve -# new slaves arriving, that will be queued for the next RDB transfer, so the server -# waits a delay in order to let more slaves arrive. +# new replicas arriving, that will be queued for the next RDB transfer, so the +# server waits a delay in order to let more replicas arrive. # # The delay is specified in seconds, and by default is 5 seconds. To disable # it entirely just set it to 0 seconds and the transfer will start ASAP. repl-diskless-sync-delay 5 -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. +# ----------------------------------------------------------------------------- +# WARNING: RDB diskless load is experimental. Since in this setup the replica +# does not immediately store an RDB on disk, it may cause data loss during +# failovers. RDB diskless load + Redis modules not handling I/O reads may also +# cause Redis to abort in case of I/O errors during the initial synchronization +# stage with the master. Use only if your do what you are doing. +# ----------------------------------------------------------------------------- # -# repl-ping-slave-period 10 +# Replica can load the RDB it reads from the replication link directly from the +# socket, or store the RDB to a file and read that file after it was completely +# recived from the master. +# +# In many cases the disk is slower than the network, and storing and loading +# the RDB file may increase replication time (and even increase the master's +# Copy on Write memory and salve buffers). +# However, parsing the RDB file directly from the socket may mean that we have +# to flush the contents of the current database before the full rdb was +# received. For this reason we have the following options: +# +# "disabled" - Don't use diskless load (store the rdb file to the disk first) +# "on-empty-db" - Use diskless load only when it is completely safe. +# "swapdb" - Keep a copy of the current db contents in RAM while parsing +# the data directly from the socket. note that this requires +# sufficient memory, if you don't have it, you risk an OOM kill. +repl-diskless-load disabled + +# Replicas send PINGs to server in a predefined interval. It's possible to +# change this interval with the repl_ping_replica_period option. The default +# value is 10 seconds. +# +# repl-ping-replica-period 10 # The following option sets the replication timeout for: # -# 1) Bulk transfer I/O during SYNC, from the point of view of slave. -# 2) Master timeout from the point of view of slaves (data, pings). -# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). # # It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. # # repl-timeout 60 -# Disable TCP_NODELAY on the slave socket after SYNC? +# Disable TCP_NODELAY on the replica socket after SYNC? # # If you select "yes" Redis will use a smaller number of TCP packets and -# less bandwidth to send data to slaves. But this can add a delay for -# the data to appear on the slave side, up to 40 milliseconds with +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with # Linux kernels using a default configuration. # -# If you select "no" the delay for data to appear on the slave side will +# If you select "no" the delay for data to appear on the replica side will # be reduced but more bandwidth will be used for replication. # # By default we optimize for low latency, but in very high traffic conditions -# or when the master and slaves are many hops away, turning this to "yes" may +# or when the master and replicas are many hops away, turning this to "yes" may # be a good idea. repl-disable-tcp-nodelay no # Set the replication backlog size. The backlog is a buffer that accumulates -# slave data when slaves are disconnected for some time, so that when a slave -# wants to reconnect again, often a full resync is not needed, but a partial -# resync is enough, just passing the portion of data the slave missed while -# disconnected. +# replica data when replicas are disconnected for some time, so that when a +# replica wants to reconnect again, often a full resync is not needed, but a +# partial resync is enough, just passing the portion of data the replica +# missed while disconnected. # -# The bigger the replication backlog, the longer the time the slave can be +# The bigger the replication backlog, the longer the time the replica can be # disconnected and later be able to perform a partial resynchronization. # -# The backlog is only allocated once there is at least a slave connected. +# The backlog is only allocated once there is at least a replica connected. # # repl-backlog-size 1mb -# After a master has no longer connected slaves for some time, the backlog +# After a master has no longer connected replicas for some time, the backlog # will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last slave disconnected, for +# need to elapse, starting from the time the last replica disconnected, for # the backlog buffer to be freed. # -# Note that slaves never free the backlog for timeout, since they may be +# Note that replicas never free the backlog for timeout, since they may be # promoted to masters later, and should be able to correctly "partially -# resynchronize" with the slaves: hence they should always accumulate backlog. +# resynchronize" with the replicas: hence they should always accumulate backlog. # # A value of 0 means to never release the backlog. # # repl-backlog-ttl 3600 -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. +# The replica priority is an integer number published by Redis in the INFO +# output. It is used by Redis Sentinel in order to select a replica to promote +# into a master if the master is no longer working correctly. # -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one with priority 10, that is the lowest. +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel +# will pick the one with priority 10, that is the lowest. # -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by # Redis Sentinel for promotion. # # By default the priority is 100. -slave-priority 100 +replica-priority 100 # It is possible for a master to stop accepting writes if there are less than -# N slaves connected, having a lag less or equal than M seconds. +# N replicas connected, having a lag less or equal than M seconds. # -# The N slaves need to be in "online" state. +# The N replicas need to be in "online" state. # # The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the slave, that is usually sent every second. +# the last ping received from the replica, that is usually sent every second. # # This option does not GUARANTEE that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough slaves +# will limit the window of exposure for lost writes in case not enough replicas # are available, to the specified number of seconds. # -# For example to require at least 3 slaves with a lag <= 10 seconds use: +# For example to require at least 3 replicas with a lag <= 10 seconds use: # -# min-slaves-to-write 3 -# min-slaves-max-lag 10 +# min-replicas-to-write 3 +# min-replicas-max-lag 10 # # Setting one or the other to 0 disables the feature. # -# By default min-slaves-to-write is set to 0 (feature disabled) and -# min-slaves-max-lag is set to 10. +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. # A Redis master is able to list the address and port of the attached -# slaves in different ways. For example the "INFO replication" section +# replicas in different ways. For example the "INFO replication" section # offers this information, which is used, among other tools, by -# Redis Sentinel in order to discover slave instances. +# Redis Sentinel in order to discover replica instances. # Another place where this info is available is in the output of the # "ROLE" command of a master. # -# The listed IP and address normally reported by a slave is obtained +# The listed IP and address normally reported by a replica is obtained # in the following way: # # IP: The address is auto detected by checking the peer address -# of the socket used by the slave to connect with the master. +# of the socket used by the replica to connect with the master. # -# Port: The port is communicated by the slave during the replication -# handshake, and is normally the port that the slave is using to -# list for connections. +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. # # However when port forwarding or Network Address Translation (NAT) is -# used, the slave may be actually reachable via different IP and port -# pairs. The following two options can be used by a slave in order to +# used, the replica may be actually reachable via different IP and port +# pairs. The following two options can be used by a replica in order to # report to its master a specific set of IP and port, so that both INFO # and ROLE will report those values. # # There is no need to use both the options if you need to override just # the port or the IP address. # -# slave-announce-ip 5.5.5.5 -# slave-announce-port 1234 +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +############################### KEYS TRACKING ################################# + +# Redis implements server assisted support for client side caching of values. +# This is implemented using an invalidation table that remembers, using +# 16 millions of slots, what clients may have certain subsets of keys. In turn +# this is used in order to send invalidation messages to clients. Please +# to understand more about the feature check this page: +# +# https://redis.io/topics/client-side-caching +# +# When tracking is enabled for a client, all the read only queries are assumed +# to be cached: this will force Redis to store information in the invalidation +# table. When keys are modified, such information is flushed away, and +# invalidation messages are sent to the clients. However if the workload is +# heavily dominated by reads, Redis could use more and more memory in order +# to track the keys fetched by many clients. +# +# For this reason it is possible to configure a maximum fill value for the +# invalidation table. By default it is set to 1M of keys, and once this limit +# is reached, Redis will start to evict keys in the invalidation table +# even if they were not modified, just to reclaim memory: this will in turn +# force the clients to invalidate the cached values. Basically the table +# maximum size is a trade off between the memory you want to spend server +# side to track information about who cached what, and the ability of clients +# to retain cached objects in memory. +# +# If you set the value to 0, it means there are no limits, and Redis will +# retain as many keys as needed in the invalidation table. +# In the "stats" INFO section, you can find information about the number of +# keys in the invalidation table at every given moment. +# +# Note: when key tracking is used in broadcasting mode, no memory is used +# in the server side so this setting is useless. +# +# tracking-table-max-keys 1000000 ################################## SECURITY ################################### -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# # Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. +# 1 million passwords per second against a modern box. This means that you +# should use very strong passwords, otherwise they will be very easy to break. +# Note that because the password is really a shared secret between the client +# and the server, and should not be memorized by any human, the password +# can be easily a long string from /dev/urandom or whatever, so by using a +# long and unguessable password no brute force attack will be possible. + +# Redis ACL users are defined in the following format: +# +# user ... acl rules ... +# +# For example: +# +# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 +# +# The special username "default" is used for new connections. If this user +# has the "nopass" rule, then new connections will be immediately authenticated +# as the "default" user without the need of any password provided via the +# AUTH command. Otherwise if the "default" user is not flagged with "nopass" +# the connections will start in not authenticated state, and will require +# AUTH (or the HELLO command AUTH option) in order to be authenticated and +# start to work. +# +# The ACL rules that describe what an user can do are the following: +# +# on Enable the user: it is possible to authenticate as this user. +# off Disable the user: it's no longer possible to authenticate +# with this user, however the already authenticated connections +# will still work. +# + Allow the execution of that command +# - Disallow the execution of that command +# +@ Allow the execution of all the commands in such category +# with valid categories are like @admin, @set, @sortedset, ... +# and so forth, see the full list in the server.c file where +# the Redis command table is described and defined. +# The special category @all means all the commands, but currently +# present in the server, and that will be loaded in the future +# via modules. +# +|subcommand Allow a specific subcommand of an otherwise +# disabled command. Note that this form is not +# allowed as negative like -DEBUG|SEGFAULT, but +# only additive starting with "+". +# allcommands Alias for +@all. Note that it implies the ability to execute +# all the future commands loaded via the modules system. +# nocommands Alias for -@all. +# ~ Add a pattern of keys that can be mentioned as part of +# commands. For instance ~* allows all the keys. The pattern +# is a glob-style pattern like the one of KEYS. +# It is possible to specify multiple patterns. +# allkeys Alias for ~* +# resetkeys Flush the list of allowed keys patterns. +# > Add this passowrd to the list of valid password for the user. +# For example >mypass will add "mypass" to the list. +# This directive clears the "nopass" flag (see later). +# < Remove this password from the list of valid passwords. +# nopass All the set passwords of the user are removed, and the user +# is flagged as requiring no password: it means that every +# password will work against this user. If this directive is +# used for the default user, every new connection will be +# immediately authenticated with the default user without +# any explicit AUTH command required. Note that the "resetpass" +# directive will clear this condition. +# resetpass Flush the list of allowed passwords. Moreover removes the +# "nopass" status. After "resetpass" the user has no associated +# passwords and there is no way to authenticate without adding +# some password (or setting it as "nopass" later). +# reset Performs the following actions: resetpass, resetkeys, off, +# -@all. The user returns to the same state it has immediately +# after its creation. +# +# ACL rules can be specified in any order: for instance you can start with +# passwords, then flags, or key patterns. However note that the additive +# and subtractive rules will CHANGE MEANING depending on the ordering. +# For instance see the following example: +# +# user alice on +@all -DEBUG ~* >somepassword +# +# This will allow "alice" to use all the commands with the exception of the +# DEBUG command, since +@all added all the commands to the set of the commands +# alice can use, and later DEBUG was removed. However if we invert the order +# of two ACL rules the result will be different: +# +# user alice on -DEBUG +@all ~* >somepassword +# +# Now DEBUG was removed when alice had yet no commands in the set of allowed +# commands, later all the commands are added, so the user will be able to +# execute everything. +# +# Basically ACL rules are processed left-to-right. +# +# For more information about ACL configuration please refer to +# the Redis web site at https://redis.io/topics/acl + +# ACL LOG +# +# The ACL Log tracks failed commands and authentication events associated +# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked +# by ACLs. The ACL Log is stored in memory. You can reclaim memory with +# ACL LOG RESET. Define the maximum entry length of the ACL Log below. +acllog-max-len 128 + +# Using an external ACL file +# +# Instead of configuring users here in this file, it is possible to use +# a stand-alone file just listing users. The two methods cannot be mixed: +# if you configure users here and at the same time you activate the exteranl +# ACL file, the server will refuse to start. +# +# The format of the external ACL user file is exactly the same as the +# format that is used inside redis.conf to describe users. +# +# aclfile /etc/redis/users.acl + +# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatiblity +# layer on top of the new ACL system. The option effect will be just setting +# the password for the default user. Clients will still authenticate using +# AUTH as usually, or more explicitly with AUTH default +# if they follow the new protocol: both will work. # # requirepass foobared -# Command renaming. +# Command renaming (DEPRECATED). +# +# ------------------------------------------------------------------------ +# WARNING: avoid using this option if possible. Instead use ACLs to remove +# commands from the default user, and put them only in some admin user you +# create for administrative purposes. +# ------------------------------------------------------------------------ # # It is possible to change the name of dangerous commands in a shared # environment. For instance the CONFIG command may be renamed into something @@ -516,7 +813,7 @@ slave-priority 100 # rename-command CONFIG "" # # Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. +# AOF file or transmitted to replicas may cause problems. ################################### CLIENTS #################################### @@ -529,6 +826,11 @@ slave-priority 100 # Once the limit is reached Redis will close all the new connections sending # an error 'max number of clients reached'. # +# IMPORTANT: When Redis Cluster is used, the max number of connections is also +# shared with the cluster bus: every node in the cluster will use two +# connections, one incoming and another outgoing. It is important to size the +# limit accordingly in case of very large clusters. +# # maxclients 10000 ############################## MEMORY MANAGEMENT ################################ @@ -545,27 +847,27 @@ slave-priority 100 # This option is usually useful when using Redis as an LRU or LFU cache, or to # set a hard memory limit for an instance (using the 'noeviction' policy). # -# WARNING: If you have slaves attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the slaves are subtracted +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted # from the used memory count, so that network problems / resyncs will # not trigger a loop where keys are evicted, and in turn the output -# buffer of slaves is full with DELs of keys evicted triggering the deletion +# buffer of replicas is full with DELs of keys evicted triggering the deletion # of more keys, and so forth until the database is completely emptied. # -# In short... if you have slaves attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for slave +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica # output buffers (but this is not needed if the policy is 'noeviction'). # maxmemory 817m # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached. You can select among five behaviors: +# is reached. You can select one from the following behaviors: # -# volatile-lru -> Evict using approximated LRU among the keys with an expire set. +# volatile-lru -> Evict using approximated LRU, only keys with an expire set. # allkeys-lru -> Evict any key using approximated LRU. -# volatile-lfu -> Evict using approximated LFU among the keys with an expire set. +# volatile-lfu -> Evict using approximated LFU, only keys with an expire set. # allkeys-lfu -> Evict any key using approximated LFU. -# volatile-random -> Remove a random key among the ones with an expire set. +# volatile-random -> Remove a random key having an expire set. # allkeys-random -> Remove a random key, any key. # volatile-ttl -> Remove the key with the nearest expire time (minor TTL) # noeviction -> Don't evict anything, just return an error on write operations. @@ -600,6 +902,43 @@ maxmemory-policy noeviction # # maxmemory-samples 5 +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica +# to have a different memory setting, and you are sure all the writes performed +# to the replica are idempotent, then you may change this default (but be sure +# to understand what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory +# and so forth). So make sure you monitor your replicas and make sure they +# have enough memory to never hit a real out-of-memory condition before the +# master hits the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +# Redis reclaims expired keys in two ways: upon access when those keys are +# found to be expired, and also in background, in what is called the +# "active expire key". The key space is slowly and interactively scanned +# looking for expired keys to reclaim, so that it is possible to free memory +# of keys that are expired and will never be accessed again in a short time. +# +# The default effort of the expire cycle will try to avoid having more than +# ten percent of expired keys still in memory, and will try to avoid consuming +# more than 25% of total memory and to add latency to the system. However +# it is possible to increase the expire "effort" that is normally set to +# "1", to a greater value, up to the value "10". At its maximum value the +# system will use more CPU, longer cycles (and technically may introduce +# more latency), and will tollerate less already expired keys still present +# in the system. It's a tradeoff betweeen memory, CPU and latecy. +# +# active-expire-effort 1 + ############################# LAZY FREEING #################################### # Redis has two primitives to delete keys. One is called DEL and is a blocking @@ -635,19 +974,72 @@ maxmemory-policy noeviction # or SORT with STORE option may delete existing keys. The SET command # itself removes any old content of the specified key in order to replace # it with the specified string. -# 4) During replication, when a slave performs a full resynchronization with +# 4) During replication, when a replica performs a full resynchronization with # its master, the content of the whole database is removed in order to -# load the RDB file just transfered. +# load the RDB file just transferred. # # In all the above cases the default is to delete objects in a blocking way, # like if DEL was called. However you can configure each case specifically # in order to instead release memory in a non-blocking way like if UNLINK -# was called, using the following configuration directives: +# was called, using the following configuration directives. lazyfree-lazy-eviction no lazyfree-lazy-expire no lazyfree-lazy-server-del no -slave-lazy-flush no +replica-lazy-flush no + +# It is also possible, for the case when to replace the user code DEL calls +# with UNLINK calls is not easy, to modify the default behavior of the DEL +# command to act exactly like UNLINK, using the following configuration +# directive: + +lazyfree-lazy-user-del no + +################################ THREADED I/O ################################# + +# Redis is mostly single threaded, however there are certain threaded +# operations such as UNLINK, slow I/O accesses and other things that are +# performed on side threads. +# +# Now it is also possible to handle Redis clients socket reads and writes +# in different I/O threads. Since especially writing is so slow, normally +# Redis users use pipelining in order to speedup the Redis performances per +# core, and spawn multiple instances in order to scale more. Using I/O +# threads it is possible to easily speedup two times Redis without resorting +# to pipelining nor sharding of the instance. +# +# By default threading is disabled, we suggest enabling it only in machines +# that have at least 4 or more cores, leaving at least one spare core. +# Using more than 8 threads is unlikely to help much. We also recommend using +# threaded I/O only if you actually have performance problems, with Redis +# instances being able to use a quite big percentage of CPU time, otherwise +# there is no point in using this feature. +# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# io-threads 4 +# +# Setting io-threads to 1 will just use the main thread as usually. +# When I/O threads are enabled, we only use threads for writes, that is +# to thread the write(2) syscall and transfer the client buffers to the +# socket. However it is also possible to enable threading of reads and +# protocol parsing using the following configuration directive, by setting +# it to yes: +# +# io-threads-do-reads no +# +# Usually threading reads doesn't help much. +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. Aso this feature currently does not work when SSL is +# enabled. +# +# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make +# sure you also run the benchmark itself in threaded mode, using the +# --threads option to match the number of Redis theads, otherwise you'll not +# be able to notice the improvements. ############################## APPEND ONLY MODE ############################### @@ -776,10 +1168,7 @@ aof-load-truncated yes # When loading Redis recognizes that the AOF file starts with the "REDIS" # string and loads the prefixed RDB file, and continues loading the AOF # tail. -# -# This is currently turned off by default in order to avoid the surprise -# of a format change, but will at some point be used as the default. -aof-use-rdb-preamble no +aof-use-rdb-preamble yes ################################ LUA SCRIPTING ############################### @@ -800,13 +1189,7 @@ aof-use-rdb-preamble no lua-time-limit 5000 ################################ REDIS CLUSTER ############################### -# -# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however -# in order to mark it as "mature" we need to wait for a non trivial percentage -# of users to deploy it in production. -# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# + # Normal Redis instances can't be part of a Redis Cluster; only nodes that are # started as cluster nodes can. In order to start a Redis instance as a # cluster node enable the cluster support uncommenting the following: @@ -827,42 +1210,42 @@ lua-time-limit 5000 # # cluster-node-timeout 15000 -# A slave of a failing master will avoid to start a failover if its data +# A replica of a failing master will avoid to start a failover if its data # looks too old. # -# There is no simple way for a slave to actually have an exact measure of +# There is no simple way for a replica to actually have an exact measure of # its "data age", so the following two checks are performed: # -# 1) If there are multiple slaves able to failover, they exchange messages -# in order to try to give an advantage to the slave with the best +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best # replication offset (more data from the master processed). -# Slaves will try to get their rank by offset, and apply to the start +# Replicas will try to get their rank by offset, and apply to the start # of the failover a delay proportional to their rank. # -# 2) Every single slave computes the time of the last interaction with +# 2) Every single replica computes the time of the last interaction with # its master. This can be the last ping or command received (if the master # is still in the "connected" state), or the time that elapsed since the # disconnection with the master (if the replication link is currently down). -# If the last interaction is too old, the slave will not try to failover +# If the last interaction is too old, the replica will not try to failover # at all. # -# The point "2" can be tuned by user. Specifically a slave will not perform +# The point "2" can be tuned by user. Specifically a replica will not perform # the failover if, since the last interaction with the master, the time # elapsed is greater than: # -# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# (node-timeout * replica-validity-factor) + repl-ping-replica-period # -# So for example if node-timeout is 30 seconds, and the slave-validity-factor -# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the -# slave will not try to failover if it was not able to talk with the master +# So for example if node-timeout is 30 seconds, and the replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master # for longer than 310 seconds. # -# A large slave-validity-factor may allow slaves with too old data to failover +# A large replica-validity-factor may allow replicas with too old data to failover # a master, while a too small value may prevent the cluster from being able to -# elect a slave at all. +# elect a replica at all. # -# For maximum availability, it is possible to set the slave-validity-factor -# to a value of 0, which means, that slaves will always try to failover the +# For maximum availability, it is possible to set the replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the # master regardless of the last time they interacted with the master. # (However they'll always try to apply a delay proportional to their # offset rank). @@ -870,22 +1253,22 @@ lua-time-limit 5000 # Zero is the only value able to guarantee that when all the partitions heal # the cluster will always be able to continue. # -# cluster-slave-validity-factor 10 +# cluster-replica-validity-factor 10 -# Cluster slaves are able to migrate to orphaned masters, that are masters -# that are left without working slaves. This improves the cluster ability +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability # to resist to failures as otherwise an orphaned master can't be failed over -# in case of failure if it has no working slaves. +# in case of failure if it has no working replicas. # -# Slaves migrate to orphaned masters only if there are still at least a -# given number of other working slaves for their old master. This number -# is the "migration barrier". A migration barrier of 1 means that a slave -# will migrate only if there is at least 1 other working slave for its master -# and so forth. It usually reflects the number of slaves you want for every +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every # master in your cluster. # -# Default is 1 (slaves migrate only if their masters remain with at least -# one slave). To disable migration just set it to a very large value. +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value. # A value of 0 can be set but is useful only for debugging and dangerous # in production. # @@ -904,7 +1287,7 @@ lua-time-limit 5000 # # cluster-require-full-coverage yes -# This option, when set to yes, prevents slaves from trying to failover its +# This option, when set to yes, prevents replicas from trying to failover its # master during master failures. However the master can still perform a # manual failover, if forced to do so. # @@ -912,7 +1295,23 @@ lua-time-limit 5000 # data center operations, where we want one side to never be promoted if not # in the case of a total DC failure. # -# cluster-slave-no-failover no +# cluster-replica-no-failover no + +# This option, when set to yes, allows nodes to serve read traffic while the +# the cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful for two cases. The first case is for when an application +# doesn't require consistency of data during node failures or network partitions. +# One example of this is a cache, where as long as the node has the data it +# should be able to serve it. +# +# The second use case is for configurations that don't meet the recommended +# three shards but want to enable cluster mode and scale later. A +# master outage in a 1 or 2 shard configuration causes a read/write outage to the +# entire cluster without this option set, with it set there is only a write outage. +# Without a quorum of masters, slot ownership will not change automatically. +# +# cluster-allow-reads-when-down no # In order to setup your cluster make sure to read the documentation # available at http://redis.io web site. @@ -1020,7 +1419,11 @@ latency-monitor-threshold 0 # z Sorted set commands # x Expired events (events generated every time a key expires) # e Evicted events (events generated when a key is evicted for maxmemory) -# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# t Stream commands +# m Key-miss events (Note: It is not included in the 'A' class) +# A Alias for g$lshzxet, so that the "AKE" string means all the events +# (Except key-miss events which are excluded from 'A' due to their +# unique nature). # # The "notify-keyspace-events" takes as argument a string that is composed # of zero or multiple characters. The empty string means that notifications @@ -1041,6 +1444,61 @@ latency-monitor-threshold 0 # specify at least one of K or E, no events will be delivered. notify-keyspace-events "" +############################### GOPHER SERVER ################################# + +# Redis contains an implementation of the Gopher protocol, as specified in +# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt). +# +# The Gopher protocol was very popular in the late '90s. It is an alternative +# to the web, and the implementation both server and client side is so simple +# that the Redis server has just 100 lines of code in order to implement this +# support. +# +# What do you do with Gopher nowadays? Well Gopher never *really* died, and +# lately there is a movement in order for the Gopher more hierarchical content +# composed of just plain text documents to be resurrected. Some want a simpler +# internet, others believe that the mainstream internet became too much +# controlled, and it's cool to create an alternative space for people that +# want a bit of fresh air. +# +# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol +# as a gift. +# +# --- HOW IT WORKS? --- +# +# The Redis Gopher support uses the inline protocol of Redis, and specifically +# two kind of inline requests that were anyway illegal: an empty request +# or any request that starts with "/" (there are no Redis commands starting +# with such a slash). Normal RESP2/RESP3 requests are completely out of the +# path of the Gopher protocol implementation and are served as usually as well. +# +# If you open a connection to Redis when Gopher is enabled and send it +# a string like "/foo", if there is a key named "/foo" it is served via the +# Gopher protocol. +# +# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher +# talking), you likely need a script like the following: +# +# https://github.com/antirez/gopher2redis +# +# --- SECURITY WARNING --- +# +# If you plan to put Redis on the internet in a publicly accessible address +# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance. +# Once a password is set: +# +# 1. The Gopher server (when enabled, not by default) will still serve +# content via Gopher. +# 2. However other commands cannot be called before the client will +# authenticate. +# +# So use the 'requirepass' option to protect your instance. +# +# To enable Gopher support uncomment the following line and set +# the option from no (the default) to yes. +# +# gopher-enabled no + ############################### ADVANCED CONFIG ############################### # Hashes are encoded using a memory efficient data structure when they have a @@ -1107,6 +1565,17 @@ zset-max-ziplist-value 64 # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. hll-sparse-max-bytes 3000 +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entires limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in # order to help rehashing the main Redis hash table (the one mapping top-level # keys to values). The hash table implementation Redis uses (see dict.c) @@ -1135,7 +1604,7 @@ activerehashing yes # The limit can be set differently for the three different classes of clients: # # normal -> normal clients including MONITOR clients -# slave -> slave clients +# replica -> replica clients # pubsub -> clients subscribed to at least one pubsub channel or pattern # # The syntax of every client-output-buffer-limit directive is the following: @@ -1156,12 +1625,12 @@ activerehashing yes # asynchronous clients may create a scenario where data is requested faster # than it can read. # -# Instead there is a default limit for pubsub and slave clients, since -# subscribers and slaves receive data in a push fashion. +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. # # Both the hard or the soft limit can be disabled by setting them to zero. client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit replica 256mb 64mb 60 client-output-buffer-limit pubsub 32mb 8mb 60 # Client query buffers accumulate new commands. They are limited to a fixed @@ -1195,12 +1664,34 @@ client-output-buffer-limit pubsub 32mb 8mb 60 # 100 only in environments where very low latency is required. hz 10 +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporary raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + # When a child rewrites the AOF file, if the following option is enabled # the file will be fsync-ed every 32 MB of data generated. This is useful # in order to commit the file to the disk more incrementally and avoid # big latency spikes. aof-rewrite-incremental-fsync yes +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + # Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good # idea to start with the default settings and only change them after investigating # how to improve the performances and how the keys LFU change over time, which @@ -1255,10 +1746,6 @@ aof-rewrite-incremental-fsync yes ########################### ACTIVE DEFRAGMENTATION ####################### # -# WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested -# even in production and manually tested by multiple engineers for some -# time. -# # What is active defragmentation? # ------------------------------- # @@ -1298,7 +1785,7 @@ aof-rewrite-incremental-fsync yes # a good idea to leave the defaults untouched. # Enabled active defragmentation -# activedefrag yes +# activedefrag no # Minimum amount of fragmentation waste to start active defrag # active-defrag-ignore-bytes 100mb @@ -1309,8 +1796,42 @@ aof-rewrite-incremental-fsync yes # Maximum percentage of fragmentation at which we use maximum effort # active-defrag-threshold-upper 100 -# Minimal effort for defrag in CPU percentage -# active-defrag-cycle-min 25 +# Minimal effort for defrag in CPU percentage, to be used when the lower +# threshold is reached +# active-defrag-cycle-min 1 -# Maximal effort for defrag in CPU percentage -# active-defrag-cycle-max 75 +# Maximal effort for defrag in CPU percentage, to be used when the upper +# threshold is reached +# active-defrag-cycle-max 25 + +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 + +# Jemalloc background thread for purging will be enabled by default +jemalloc-bg-thread yes + +# It is possible to pin different threads and processes of Redis to specific +# CPUs in your system, in order to maximize the performances of the server. +# This is useful both in order to pin different Redis threads in different +# CPUs, but also in order to make sure that multiple Redis instances running +# in the same host will be pinned to different CPUs. +# +# Normally you can do this using the "taskset" command, however it is also +# possible to this via Redis configuration directly, both in Linux and FreeBSD. +# +# You can pin the server/IO threads, bio threads, aof rewrite child process, and +# the bgsave child process. The syntax to specify the cpu list is the same as +# the taskset command: +# +# Set redis server/io threads to cpu affinity 0,2,4,6: +# server_cpulist 0-7:2 +# +# Set bio threads to cpu affinity 1,3: +# bio_cpulist 1,3 +# +# Set aof rewrite child process to cpu affinity 8,9,10,11: +# aof_rewrite_cpulist 8-11 +# +# Set bgsave child process to cpu affinity 1,10,11 +# bgsave_cpulist 1,10-11 diff --git a/salt/redis/init.sls b/salt/redis/init.sls index 5a981e688..3f24ba079 100644 --- a/salt/redis/init.sls +++ b/salt/redis/init.sls @@ -12,8 +12,8 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} # Redis Setup @@ -53,10 +53,14 @@ so-redis: - user: socore - port_bindings: - 0.0.0.0:6379:6379 + - 0.0.0.0:9696:9696 - binds: - /opt/so/log/redis:/var/log/redis:rw - /opt/so/conf/redis/etc/redis.conf:/usr/local/etc/redis/redis.conf:ro - /opt/so/conf/redis/working:/redis:rw + - /etc/pki/redis.crt:/certs/redis.crt:ro + - /etc/pki/redis.key:/certs/redis.key:ro + - /etc/pki/ca.crt:/certs/ca.crt:ro - entrypoint: "redis-server /usr/local/etc/redis/redis.conf" - watch: - file: /opt/so/conf/redis/etc diff --git a/salt/registry/init.sls b/salt/registry/init.sls index 9ee44d1de..6e17d639e 100644 --- a/salt/registry/init.sls +++ b/salt/registry/init.sls @@ -40,7 +40,7 @@ dockerregistryconf: # Install the registry container so-dockerregistry: docker_container.running: - - image: registry:2 + - image: registry:latest - hostname: so-registry - restart_policy: always - port_bindings: diff --git a/salt/salt/init.sls b/salt/salt/init.sls index a11246cbb..2caae81cd 100644 --- a/salt/salt/init.sls +++ b/salt/salt/init.sls @@ -1,15 +1,13 @@ - - {% if grains['os'] != 'CentOS' %} saltpymodules: pkg.installed: - pkgs: - python-docker - python-m2crypto - {% endif %} +{% endif %} - -salt_minion_service: - service.running: - - name: salt-minion - - enable: True +salt_bootstrap: + file.managed: + - name: /usr/sbin/bootstrap-salt.sh + - source: salt://salt/scripts/bootstrap-salt.sh + - mode: 755 diff --git a/salt/salt/map.jinja b/salt/salt/map.jinja new file mode 100644 index 000000000..5b882a3a3 --- /dev/null +++ b/salt/salt/map.jinja @@ -0,0 +1,18 @@ +{% import_yaml 'salt/minion.defaults.yaml' as salt %} +{% set SALTVERSION = salt.salt.minion.version %} + +{% if grains.os|lower == 'ubuntu' %} + {% set COMMON = 'salt-common' %} +{% elif grains.os|lower == 'centos' %} + {% set COMMON = 'salt' %} +{% endif %} + +{% if grains.saltversion|string != SALTVERSION|string %} + {% if grains.os|lower == 'centos' %} + {% set UPGRADECOMMAND = 'yum versionlock delete "salt-*" && sh /usr/sbin/bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} + {% elif grains.os|lower == 'ubuntu' %} + {% set UPGRADECOMMAND = 'apt-mark unhold salt-common && apt-mark unhold salt-minion && sh /usr/sbin/bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %} + {% endif %} +{% else %} + {% set UPGRADECOMMAND = 'echo Already running Salt Minon version ' ~ SALTVERSION %} +{% endif %} \ No newline at end of file diff --git a/salt/salt/master.defaults.yaml b/salt/salt/master.defaults.yaml new file mode 100644 index 000000000..8694ffbc7 --- /dev/null +++ b/salt/salt/master.defaults.yaml @@ -0,0 +1,5 @@ +#version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched +# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions +salt: + master: + version: 3001.1 \ No newline at end of file diff --git a/salt/salt/master.sls b/salt/salt/master.sls new file mode 100644 index 000000000..481be743a --- /dev/null +++ b/salt/salt/master.sls @@ -0,0 +1,11 @@ +salt_master_package: + pkg.installed: + - pkgs: + - salt + - salt-master + - hold: True + +salt_master_service: + service.running: + - name: salt-master + - enable: True \ No newline at end of file diff --git a/salt/salt/minion.defaults.yaml b/salt/salt/minion.defaults.yaml new file mode 100644 index 000000000..31c313df6 --- /dev/null +++ b/salt/salt/minion.defaults.yaml @@ -0,0 +1,5 @@ +#version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched +# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions +salt: + minion: + version: 3001.1 \ No newline at end of file diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls new file mode 100644 index 000000000..b2d3a2913 --- /dev/null +++ b/salt/salt/minion.sls @@ -0,0 +1,26 @@ +{% from 'salt/map.jinja' import COMMON with context %} +{% from 'salt/map.jinja' import UPGRADECOMMAND with context %} + +include: + - salt + +install_salt_minion: + cmd.run: + - name: {{ UPGRADECOMMAND }} + +#versionlock_salt_minion: +# module.run: +# - pkg.hold: +# - name: "salt-*" + +salt_minion_package: + pkg.installed: + - pkgs: + - {{ COMMON }} + - salt-minion + - hold: True + +salt_minion_service: + service.running: + - name: salt-minion + - enable: True diff --git a/salt/salt/scripts/bootstrap-salt.sh b/salt/salt/scripts/bootstrap-salt.sh new file mode 100644 index 000000000..70241a041 --- /dev/null +++ b/salt/salt/scripts/bootstrap-salt.sh @@ -0,0 +1,7856 @@ +#!/bin/sh - + +# WARNING: Changes to this file in the salt repo will be overwritten! +# Please submit pull requests against the salt-bootstrap repo: +# https://github.com/saltstack/salt-bootstrap + +#====================================================================================================================== +# vim: softtabstop=4 shiftwidth=4 expandtab fenc=utf-8 spell spelllang=en cc=120 +#====================================================================================================================== +# +# FILE: bootstrap-salt.sh +# +# DESCRIPTION: Bootstrap Salt installation for various systems/distributions +# +# BUGS: https://github.com/saltstack/salt-bootstrap/issues +# +# COPYRIGHT: (c) 2012-2018 by the SaltStack Team, see AUTHORS.rst for more +# details. +# +# LICENSE: Apache 2.0 +# ORGANIZATION: SaltStack (saltstack.com) +# CREATED: 10/15/2012 09:49:37 PM WEST +#====================================================================================================================== +set -o nounset # Treat unset variables as an error + +__ScriptVersion="2020.06.23" +__ScriptName="bootstrap-salt.sh" + +__ScriptFullName="$0" +__ScriptArgs="$*" + +#====================================================================================================================== +# Environment variables taken into account. +#---------------------------------------------------------------------------------------------------------------------- +# * BS_COLORS: If 0 disables colour support +# * BS_PIP_ALLOWED: If 1 enable pip based installations(if needed) +# * BS_PIP_ALL: If 1 enable all python packages to be installed via pip instead of apt, requires setting virtualenv +# * BS_VIRTUALENV_DIR: The virtualenv to install salt into (shouldn't exist yet) +# * BS_ECHO_DEBUG: If 1 enable debug echo which can also be set by -D +# * BS_SALT_ETC_DIR: Defaults to /etc/salt (Only tweak'able on git based installations) +# * BS_SALT_CACHE_DIR: Defaults to /var/cache/salt (Only tweak'able on git based installations) +# * BS_KEEP_TEMP_FILES: If 1, don't move temporary files, instead copy them +# * BS_FORCE_OVERWRITE: Force overriding copied files(config, init.d, etc) +# * BS_UPGRADE_SYS: If 1 and an option, upgrade system. Default 0. +# * BS_GENTOO_USE_BINHOST: If 1 add `--getbinpkg` to gentoo's emerge +# * BS_SALT_MASTER_ADDRESS: The IP or DNS name of the salt-master the minion should connect to +# * BS_SALT_GIT_CHECKOUT_DIR: The directory where to clone Salt on git installations +#====================================================================================================================== + + +# Bootstrap script truth values +BS_TRUE=1 +BS_FALSE=0 + +# Default sleep time used when waiting for daemons to start, restart and checking for these running +__DEFAULT_SLEEP=3 + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __detect_color_support +# DESCRIPTION: Try to detect color support. +#---------------------------------------------------------------------------------------------------------------------- +_COLORS=${BS_COLORS:-$(tput colors 2>/dev/null || echo 0)} +__detect_color_support() { + # shellcheck disable=SC2181 + if [ $? -eq 0 ] && [ "$_COLORS" -gt 2 ]; then + RC='\033[1;31m' + GC='\033[1;32m' + BC='\033[1;34m' + YC='\033[1;33m' + EC='\033[0m' + else + RC="" + GC="" + BC="" + YC="" + EC="" + fi +} +__detect_color_support + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: echoerr +# DESCRIPTION: Echo errors to stderr. +#---------------------------------------------------------------------------------------------------------------------- +echoerror() { + printf "${RC} * ERROR${EC}: %s\\n" "$@" 1>&2; +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: echoinfo +# DESCRIPTION: Echo information to stdout. +#---------------------------------------------------------------------------------------------------------------------- +echoinfo() { + printf "${GC} * INFO${EC}: %s\\n" "$@"; +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: echowarn +# DESCRIPTION: Echo warning information to stdout. +#---------------------------------------------------------------------------------------------------------------------- +echowarn() { + printf "${YC} * WARN${EC}: %s\\n" "$@"; +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: echodebug +# DESCRIPTION: Echo debug information to stdout. +#---------------------------------------------------------------------------------------------------------------------- +echodebug() { + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + printf "${BC} * DEBUG${EC}: %s\\n" "$@"; + fi +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_command_exists +# DESCRIPTION: Check if a command exists. +#---------------------------------------------------------------------------------------------------------------------- +__check_command_exists() { + command -v "$1" > /dev/null 2>&1 +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_pip_allowed +# DESCRIPTION: Simple function to let the users know that -P needs to be used. +#---------------------------------------------------------------------------------------------------------------------- +__check_pip_allowed() { + if [ $# -eq 1 ]; then + _PIP_ALLOWED_ERROR_MSG=$1 + else + _PIP_ALLOWED_ERROR_MSG="pip based installations were not allowed. Retry using '-P'" + fi + + if [ "$_PIP_ALLOWED" -eq $BS_FALSE ]; then + echoerror "$_PIP_ALLOWED_ERROR_MSG" + __usage + exit 1 + fi +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_config_dir +# DESCRIPTION: Checks the config directory, retrieves URLs if provided. +#---------------------------------------------------------------------------------------------------------------------- +__check_config_dir() { + CC_DIR_NAME="$1" + CC_DIR_BASE=$(basename "${CC_DIR_NAME}") + + case "$CC_DIR_NAME" in + http://*|https://*) + __fetch_url "/tmp/${CC_DIR_BASE}" "${CC_DIR_NAME}" + CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + ;; + ftp://*) + __fetch_url "/tmp/${CC_DIR_BASE}" "${CC_DIR_NAME}" + CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + ;; + *://*) + echoerror "Unsupported URI scheme for $CC_DIR_NAME" + echo "null" + return + ;; + *) + if [ ! -e "${CC_DIR_NAME}" ]; then + echoerror "The configuration directory or archive $CC_DIR_NAME does not exist." + echo "null" + return + fi + ;; + esac + + case "$CC_DIR_NAME" in + *.tgz|*.tar.gz) + tar -zxf "${CC_DIR_NAME}" -C /tmp + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tgz") + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.gz") + CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + ;; + *.tbz|*.tar.bz2) + tar -xjf "${CC_DIR_NAME}" -C /tmp + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tbz") + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.bz2") + CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + ;; + *.txz|*.tar.xz) + tar -xJf "${CC_DIR_NAME}" -C /tmp + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".txz") + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.xz") + CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + ;; + esac + + echo "${CC_DIR_NAME}" +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_unparsed_options +# DESCRIPTION: Checks the placed after the install arguments +#---------------------------------------------------------------------------------------------------------------------- +__check_unparsed_options() { + shellopts="$1" + # grep alternative for SunOS + if [ -f /usr/xpg4/bin/grep ]; then + grep='/usr/xpg4/bin/grep' + else + grep='grep' + fi + unparsed_options=$( echo "$shellopts" | ${grep} -E '(^|[[:space:]])[-]+[[:alnum:]]' ) + if [ "$unparsed_options" != "" ]; then + __usage + echo + echoerror "options are only allowed before install arguments" + echo + exit 1 + fi +} + + +#---------------------------------------------------------------------------------------------------------------------- +# Handle command line arguments +#---------------------------------------------------------------------------------------------------------------------- +_KEEP_TEMP_FILES=${BS_KEEP_TEMP_FILES:-$BS_FALSE} +_TEMP_CONFIG_DIR="null" +_SALTSTACK_REPO_URL="https://github.com/saltstack/salt.git" +_SALT_REPO_URL=${_SALTSTACK_REPO_URL} +_DOWNSTREAM_PKG_REPO=$BS_FALSE +_TEMP_KEYS_DIR="null" +_SLEEP="${__DEFAULT_SLEEP}" +_INSTALL_MASTER=$BS_FALSE +_INSTALL_SYNDIC=$BS_FALSE +_INSTALL_MINION=$BS_TRUE +_INSTALL_CLOUD=$BS_FALSE +_VIRTUALENV_DIR=${BS_VIRTUALENV_DIR:-"null"} +_START_DAEMONS=$BS_TRUE +_DISABLE_SALT_CHECKS=$BS_FALSE +_ECHO_DEBUG=${BS_ECHO_DEBUG:-$BS_FALSE} +_CONFIG_ONLY=$BS_FALSE +_PIP_ALLOWED=${BS_PIP_ALLOWED:-$BS_FALSE} +_PIP_ALL=${BS_PIP_ALL:-$BS_FALSE} +_SALT_ETC_DIR=${BS_SALT_ETC_DIR:-/etc/salt} +_SALT_CACHE_DIR=${BS_SALT_CACHE_DIR:-/var/cache/salt} +_PKI_DIR=${_SALT_ETC_DIR}/pki +_FORCE_OVERWRITE=${BS_FORCE_OVERWRITE:-$BS_FALSE} +_GENTOO_USE_BINHOST=${BS_GENTOO_USE_BINHOST:-$BS_FALSE} +_EPEL_REPO=${BS_EPEL_REPO:-epel} +_EPEL_REPOS_INSTALLED=$BS_FALSE +_UPGRADE_SYS=${BS_UPGRADE_SYS:-$BS_FALSE} +_INSECURE_DL=${BS_INSECURE_DL:-$BS_FALSE} +_CURL_ARGS=${BS_CURL_ARGS:-} +_FETCH_ARGS=${BS_FETCH_ARGS:-} +_GPG_ARGS=${BS_GPG_ARGS:-} +_WGET_ARGS=${BS_WGET_ARGS:-} +_SALT_MASTER_ADDRESS=${BS_SALT_MASTER_ADDRESS:-null} +_SALT_MINION_ID="null" +# _SIMPLIFY_VERSION is mostly used in Solaris based distributions +_SIMPLIFY_VERSION=$BS_TRUE +_LIBCLOUD_MIN_VERSION="0.14.0" +_EXTRA_PACKAGES="" +_HTTP_PROXY="" +_SALT_GIT_CHECKOUT_DIR=${BS_SALT_GIT_CHECKOUT_DIR:-/tmp/git/salt} +_NO_DEPS=$BS_FALSE +_FORCE_SHALLOW_CLONE=$BS_FALSE +_DISABLE_SSL=$BS_FALSE +_DISABLE_REPOS=$BS_FALSE +_CUSTOM_REPO_URL="null" +_CUSTOM_MASTER_CONFIG="null" +_CUSTOM_MINION_CONFIG="null" +_QUIET_GIT_INSTALLATION=$BS_FALSE +_REPO_URL="repo.saltstack.com" +_PY_EXE="" +_INSTALL_PY="$BS_FALSE" +_TORNADO_MAX_PY3_VERSION="5.0" +_POST_NEON_INSTALL=$BS_FALSE +_MINIMUM_PIP_VERSION="8.0.0" +_MINIMUM_SETUPTOOLS_VERSION="9.1" +_POST_NEON_PIP_INSTALL_ARGS="--prefix=/usr" + +# Defaults for install arguments +ITYPE="stable" + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __usage +# DESCRIPTION: Display usage information. +#---------------------------------------------------------------------------------------------------------------------- +__usage() { + cat << EOT + + Usage : ${__ScriptName} [options] [install-type-args] + + Installation types: + - stable Install latest stable release. This is the default + install type + - stable [branch] Install latest version on a branch. Only supported + for packages available at repo.saltstack.com + - stable [version] Install a specific version. Only supported for + packages available at repo.saltstack.com + - testing RHEL-family specific: configure EPEL testing repo + - git Install from the head of the master branch + - git [ref] Install from any git ref (such as a branch, tag, or + commit) + + Examples: + - ${__ScriptName} + - ${__ScriptName} stable + - ${__ScriptName} stable 2017.7 + - ${__ScriptName} stable 2017.7.2 + - ${__ScriptName} testing + - ${__ScriptName} git + - ${__ScriptName} git 2017.7 + - ${__ScriptName} git v2017.7.2 + - ${__ScriptName} git 06f249901a2e2f1ed310d58ea3921a129f214358 + + Options: + -h Display this message + -v Display script version + -n No colours + -D Show debug output + -c Temporary configuration directory + -g Salt Git repository URL. Default: ${_SALTSTACK_REPO_URL} + -w Install packages from downstream package repository rather than + upstream, saltstack package repository. This is currently only + implemented for SUSE. + -k Temporary directory holding the minion keys which will pre-seed + the master. + -s Sleep time used when waiting for daemons to start, restart and when + checking for the services running. Default: ${__DEFAULT_SLEEP} + -L Also install salt-cloud and required python-libcloud package + -M Also install salt-master + -S Also install salt-syndic + -N Do not install salt-minion + -X Do not start daemons after installation + -d Disables checking if Salt services are enabled to start on system boot. + You can also do this by touching /tmp/disable_salt_checks on the target + host. Default: \${BS_FALSE} + -P Allow pip based installations. On some distributions the required salt + packages or its dependencies are not available as a package for that + distribution. Using this flag allows the script to use pip as a last + resort method. NOTE: This only works for functions which actually + implement pip based installations. + -U If set, fully upgrade the system prior to bootstrapping Salt + -I If set, allow insecure connections while downloading any files. For + example, pass '--no-check-certificate' to 'wget' or '--insecure' to + 'curl'. On Debian and Ubuntu, using this option with -U allows obtaining + GnuPG archive keys insecurely if distro has changed release signatures. + -F Allow copied files to overwrite existing (config, init.d, etc) + -K If set, keep the temporary files in the temporary directories specified + with -c and -k + -C Only run the configuration function. Implies -F (forced overwrite). + To overwrite Master or Syndic configs, -M or -S, respectively, must + also be specified. Salt installation will be ommitted, but some of the + dependencies could be installed to write configuration with -j or -J. + -A Pass the salt-master DNS name or IP. This will be stored under + \${BS_SALT_ETC_DIR}/minion.d/99-master-address.conf + -i Pass the salt-minion id. This will be stored under + \${BS_SALT_ETC_DIR}/minion_id + -p Extra-package to install while installing Salt dependencies. One package + per -p flag. You are responsible for providing the proper package name. + -H Use the specified HTTP proxy for all download URLs (including https://). + For example: http://myproxy.example.com:3128 + -b Assume that dependencies are already installed and software sources are + set up. If git is selected, git tree is still checked out as dependency + step. + -f Force shallow cloning for git installations. + This may result in an "n/a" in the version number. + -l Disable ssl checks. When passed, switches "https" calls to "http" where + possible. + -V Install Salt into virtualenv + (only available for Ubuntu based distributions) + -a Pip install all Python pkg dependencies for Salt. Requires -V to install + all pip pkgs into the virtualenv. + (Only available for Ubuntu based distributions) + -r Disable all repository configuration performed by this script. This + option assumes all necessary repository configuration is already present + on the system. + -R Specify a custom repository URL. Assumes the custom repository URL + points to a repository that mirrors Salt packages located at + repo.saltstack.com. The option passed with -R replaces the + "repo.saltstack.com". If -R is passed, -r is also set. Currently only + works on CentOS/RHEL and Debian based distributions. + -J Replace the Master config file with data passed in as a JSON string. If + a Master config file is found, a reasonable effort will be made to save + the file with a ".bak" extension. If used in conjunction with -C or -F, + no ".bak" file will be created as either of those options will force + a complete overwrite of the file. + -j Replace the Minion config file with data passed in as a JSON string. If + a Minion config file is found, a reasonable effort will be made to save + the file with a ".bak" extension. If used in conjunction with -C or -F, + no ".bak" file will be created as either of those options will force + a complete overwrite of the file. + -q Quiet salt installation from git (setup.py install -q) + -x Changes the Python version used to install Salt. + For CentOS 6 git installations python2.7 is supported. + Fedora git installation, CentOS 7, Debian 9, Ubuntu 16.04 and 18.04 support python3. + -y Installs a different python version on host. Currently this has only been + tested with CentOS 6 and is considered experimental. This will install the + ius repo on the box if disable repo is false. This must be used in conjunction + with -x . For example: + sh bootstrap.sh -P -y -x python2.7 git v2017.7.2 + The above will install python27 and install the git version of salt using the + python2.7 executable. This only works for git and pip installations. + +EOT +} # ---------- end of function __usage ---------- + + +while getopts ':hvnDc:g:Gyx:wk:s:MSNXCPFUKIA:i:Lp:dH:bflV:J:j:rR:aq' opt +do + case "${opt}" in + + h ) __usage; exit 0 ;; + v ) echo "$0 -- Version $__ScriptVersion"; exit 0 ;; + n ) _COLORS=0; __detect_color_support ;; + D ) _ECHO_DEBUG=$BS_TRUE ;; + c ) _TEMP_CONFIG_DIR="$OPTARG" ;; + g ) _SALT_REPO_URL=$OPTARG ;; + + G ) echowarn "The '-G' option is DEPRECATED and will be removed in the future stable release!" + echowarn "Bootstrap will always use 'https' protocol to clone from SaltStack GitHub repo." + echowarn "No need to provide this option anymore, now it is a default behavior." + ;; + + w ) _DOWNSTREAM_PKG_REPO=$BS_TRUE ;; + k ) _TEMP_KEYS_DIR="$OPTARG" ;; + s ) _SLEEP=$OPTARG ;; + M ) _INSTALL_MASTER=$BS_TRUE ;; + S ) _INSTALL_SYNDIC=$BS_TRUE ;; + N ) _INSTALL_MINION=$BS_FALSE ;; + X ) _START_DAEMONS=$BS_FALSE ;; + C ) _CONFIG_ONLY=$BS_TRUE ;; + P ) _PIP_ALLOWED=$BS_TRUE ;; + F ) _FORCE_OVERWRITE=$BS_TRUE ;; + U ) _UPGRADE_SYS=$BS_TRUE ;; + K ) _KEEP_TEMP_FILES=$BS_TRUE ;; + I ) _INSECURE_DL=$BS_TRUE ;; + A ) _SALT_MASTER_ADDRESS=$OPTARG ;; + i ) _SALT_MINION_ID=$OPTARG ;; + L ) _INSTALL_CLOUD=$BS_TRUE ;; + p ) _EXTRA_PACKAGES="$_EXTRA_PACKAGES $OPTARG" ;; + d ) _DISABLE_SALT_CHECKS=$BS_TRUE ;; + H ) _HTTP_PROXY="$OPTARG" ;; + b ) _NO_DEPS=$BS_TRUE ;; + f ) _FORCE_SHALLOW_CLONE=$BS_TRUE ;; + l ) _DISABLE_SSL=$BS_TRUE ;; + V ) _VIRTUALENV_DIR="$OPTARG" ;; + a ) _PIP_ALL=$BS_TRUE ;; + r ) _DISABLE_REPOS=$BS_TRUE ;; + R ) _CUSTOM_REPO_URL=$OPTARG ;; + J ) _CUSTOM_MASTER_CONFIG=$OPTARG ;; + j ) _CUSTOM_MINION_CONFIG=$OPTARG ;; + q ) _QUIET_GIT_INSTALLATION=$BS_TRUE ;; + x ) _PY_EXE="$OPTARG" ;; + y ) _INSTALL_PY="$BS_TRUE" ;; + + \?) echo + echoerror "Option does not exist : $OPTARG" + __usage + exit 1 + ;; + + esac # --- end of case --- +done +shift $((OPTIND-1)) + + +# Define our logging file and pipe paths +LOGFILE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.log/g )" +LOGPIPE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.logpipe/g )" +# Ensure no residual pipe exists +rm "$LOGPIPE" 2>/dev/null + +# Create our logging pipe +# On FreeBSD we have to use mkfifo instead of mknod +if ! (mknod "$LOGPIPE" p >/dev/null 2>&1 || mkfifo "$LOGPIPE" >/dev/null 2>&1); then + echoerror "Failed to create the named pipe required to log" + exit 1 +fi + +# What ever is written to the logpipe gets written to the logfile +tee < "$LOGPIPE" "$LOGFILE" & + +# Close STDOUT, reopen it directing it to the logpipe +exec 1>&- +exec 1>"$LOGPIPE" +# Close STDERR, reopen it directing it to the logpipe +exec 2>&- +exec 2>"$LOGPIPE" + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __exit_cleanup +# DESCRIPTION: Cleanup any leftovers after script has ended +# +# +# http://www.unix.com/man-page/POSIX/1posix/trap/ +# +# Signal Number Signal Name +# 1 SIGHUP +# 2 SIGINT +# 3 SIGQUIT +# 6 SIGABRT +# 9 SIGKILL +# 14 SIGALRM +# 15 SIGTERM +#---------------------------------------------------------------------------------------------------------------------- +APT_ERR=$(mktemp /tmp/apt_error.XXXXXX) +__exit_cleanup() { + EXIT_CODE=$? + + if [ "$ITYPE" = "git" ] && [ -d "${_SALT_GIT_CHECKOUT_DIR}" ]; then + if [ $_KEEP_TEMP_FILES -eq $BS_FALSE ]; then + # Clean up the checked out repository + echodebug "Cleaning up the Salt Temporary Git Repository" + # shellcheck disable=SC2164 + cd "${__SALT_GIT_CHECKOUT_PARENT_DIR}" + rm -rf "${_SALT_GIT_CHECKOUT_DIR}" + #rm -rf "${_SALT_GIT_CHECKOUT_DIR}/deps" + else + echowarn "Not cleaning up the Salt Temporary git repository on request" + echowarn "Note that if you intend to re-run this script using the git approach, you might encounter some issues" + fi + fi + + # Remove the logging pipe when the script exits + if [ -p "$LOGPIPE" ]; then + echodebug "Removing the logging pipe $LOGPIPE" + rm -f "$LOGPIPE" + fi + + # Remove the temporary apt error file when the script exits + if [ -f "$APT_ERR" ]; then + echodebug "Removing the temporary apt error file $APT_ERR" + rm -f "$APT_ERR" + fi + + # Kill tee when exiting, CentOS, at least requires this + # shellcheck disable=SC2009 + TEE_PID=$(ps ax | grep tee | grep "$LOGFILE" | awk '{print $1}') + + [ "$TEE_PID" = "" ] && exit $EXIT_CODE + + echodebug "Killing logging pipe tee's with pid(s): $TEE_PID" + + # We need to trap errors since killing tee will cause a 127 errno + # We also do this as late as possible so we don't "mis-catch" other errors + __trap_errors() { + echoinfo "Errors Trapped: $EXIT_CODE" + # Exit with the "original" exit code, not the trapped code + exit $EXIT_CODE + } + trap "__trap_errors" INT ABRT QUIT TERM + + # Now we're "good" to kill tee + kill -s TERM "$TEE_PID" + + # In case the 127 errno is not triggered, exit with the "original" exit code + exit $EXIT_CODE +} +trap "__exit_cleanup" EXIT INT + + +# Let's discover how we're being called +# shellcheck disable=SC2009 +CALLER=$(ps -a -o pid,args | grep $$ | grep -v grep | tr -s ' ' | cut -d ' ' -f 3) + +if [ "${CALLER}x" = "${0}x" ]; then + CALLER="shell pipe" +fi + +echoinfo "Running version: ${__ScriptVersion}" +echoinfo "Executed by: ${CALLER}" +echoinfo "Command line: '${__ScriptFullName} ${__ScriptArgs}'" +#echowarn "Running the unstable version of ${__ScriptName}" + +# Define installation type +if [ "$#" -gt 0 ];then + __check_unparsed_options "$*" + ITYPE=$1 + shift +fi + +# Check installation type +if [ "$(echo "$ITYPE" | grep -E '(stable|testing|git)')" = "" ]; then + echoerror "Installation type \"$ITYPE\" is not known..." + exit 1 +fi + +# If doing a git install, check what branch/tag/sha will be checked out +if [ "$ITYPE" = "git" ]; then + if [ "$#" -eq 0 ];then + GIT_REV="master" + else + GIT_REV="$1" + shift + fi + + # Disable shell warning about unbound variable during git install + STABLE_REV="latest" + +# If doing stable install, check if version specified +elif [ "$ITYPE" = "stable" ]; then + if [ "$#" -eq 0 ];then + STABLE_REV="latest" + else + if [ "$(echo "$1" | grep -E '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7|2018\.3|2019\.2|3000|3001)$')" != "" ]; then + STABLE_REV="$1" + shift + elif [ "$(echo "$1" | grep -E '^(2[0-9]*\.[0-9]*\.[0-9]*|[3-9][0-9]{3}*(\.[0-9]*)?)$')" != "" ]; then + if [ "$(uname)" = "Darwin" ]; then + STABLE_REV="$1" + else + STABLE_REV="archive/$1" + fi + shift + else + echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, 2018.3, 2019.2, 3000, 3001, latest, \$MAJOR.\$MINOR.\$PATCH until 2019.2, \$MAJOR or \$MAJOR.\$PATCH starting from 3000)" + exit 1 + fi + fi +fi + +# Check for any unparsed arguments. Should be an error. +if [ "$#" -gt 0 ]; then + __usage + echo + echoerror "Too many arguments." + exit 1 +fi + +# whoami alternative for SunOS +if [ -f /usr/xpg4/bin/id ]; then + whoami='/usr/xpg4/bin/id -un' +else + whoami='whoami' +fi + +# Root permissions are required to run this script +if [ "$($whoami)" != "root" ]; then + echoerror "Salt requires root privileges to install. Please re-run this script as root." + exit 1 +fi + +# Check that we're actually installing one of minion/master/syndic +if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echowarn "Nothing to install or configure" + exit 1 +fi + +# Check that we're installing a minion if we're being passed a master address +if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_SALT_MASTER_ADDRESS" != "null" ]; then + echoerror "Don't pass a master address (-A) if no minion is going to be bootstrapped." + exit 1 +fi + +# Check that we're installing a minion if we're being passed a minion id +if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_SALT_MINION_ID" != "null" ]; then + echoerror "Don't pass a minion id (-i) if no minion is going to be bootstrapped." + exit 1 +fi + +# Check that we're installing or configuring a master if we're being passed a master config json dict +if [ "$_CUSTOM_MASTER_CONFIG" != "null" ]; then + if [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoerror "Don't pass a master config JSON dict (-J) if no master is going to be bootstrapped or configured." + exit 1 + fi +fi + +# Check that we're installing or configuring a minion if we're being passed a minion config json dict +if [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then + if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoerror "Don't pass a minion config JSON dict (-j) if no minion is going to be bootstrapped or configured." + exit 1 + fi +fi + +# Check if we're installing via a different Python executable and set major version variables +if [ -n "$_PY_EXE" ]; then + if [ "$(uname)" = "Darwin" ]; then + _PY_PKG_VER=$(echo "$_PY_EXE" | sed "s/\\.//g") + else + _PY_PKG_VER=$(echo "$_PY_EXE" | sed -E "s/\\.//g") + fi + + _PY_MAJOR_VERSION=$(echo "$_PY_PKG_VER" | cut -c 7) + if [ "$_PY_MAJOR_VERSION" != 3 ] && [ "$_PY_MAJOR_VERSION" != 2 ]; then + echoerror "Detected -x option, but Python major version is not 2 or 3." + echoerror "The -x option must be passed as python2, python27, or python2.7 (or use the Python '3' versions of examples)." + exit 1 + fi + + echoinfo "Detected -x option. Using $_PY_EXE to install Salt." +else + _PY_PKG_VER="" + _PY_MAJOR_VERSION="" +fi + +# If the configuration directory or archive does not exist, error out +if [ "$_TEMP_CONFIG_DIR" != "null" ]; then + _TEMP_CONFIG_DIR="$(__check_config_dir "$_TEMP_CONFIG_DIR")" + [ "$_TEMP_CONFIG_DIR" = "null" ] && exit 1 +fi + +# If the pre-seed keys directory does not exist, error out +if [ "$_TEMP_KEYS_DIR" != "null" ] && [ ! -d "$_TEMP_KEYS_DIR" ]; then + echoerror "The pre-seed keys directory ${_TEMP_KEYS_DIR} does not exist." + exit 1 +fi + +# -a and -V only work from git +if [ "$ITYPE" != "git" ]; then + if [ $_PIP_ALL -eq $BS_TRUE ]; then + echoerror "Pip installing all python packages with -a is only possible when installing Salt via git" + exit 1 + fi + if [ "$_VIRTUALENV_DIR" != "null" ]; then + echoerror "Virtualenv installs via -V is only possible when installing Salt via git" + exit 1 + fi +fi + +# Set the _REPO_URL value based on if -R was passed or not. Defaults to repo.saltstack.com. +if [ "$_CUSTOM_REPO_URL" != "null" ]; then + _REPO_URL="$_CUSTOM_REPO_URL" + + # Check for -r since -R is being passed. Set -r with a warning. + if [ "$_DISABLE_REPOS" -eq $BS_FALSE ]; then + echowarn "Detected -R option. No other repositories will be configured when -R is used. Setting -r option to True." + _DISABLE_REPOS=$BS_TRUE + fi +fi + +# Check the _DISABLE_SSL value and set HTTP or HTTPS. +if [ "$_DISABLE_SSL" -eq $BS_TRUE ]; then + HTTP_VAL="http" +else + HTTP_VAL="https" +fi + +# Check the _QUIET_GIT_INSTALLATION value and set SETUP_PY_INSTALL_ARGS. +if [ "$_QUIET_GIT_INSTALLATION" -eq $BS_TRUE ]; then + SETUP_PY_INSTALL_ARGS="-q" +else + SETUP_PY_INSTALL_ARGS="" +fi + +# Handle the insecure flags +if [ "$_INSECURE_DL" -eq $BS_TRUE ]; then + _CURL_ARGS="${_CURL_ARGS} --insecure" + _FETCH_ARGS="${_FETCH_ARGS} --no-verify-peer" + _GPG_ARGS="${_GPG_ARGS} --keyserver-options no-check-cert" + _WGET_ARGS="${_WGET_ARGS} --no-check-certificate" +else + _GPG_ARGS="${_GPG_ARGS} --keyserver-options ca-cert-file=/etc/ssl/certs/ca-certificates.crt" +fi + +# Export the http_proxy configuration to our current environment +if [ "${_HTTP_PROXY}" != "" ]; then + export http_proxy="${_HTTP_PROXY}" + export https_proxy="${_HTTP_PROXY}" + # Using "deprecated" option here, but that appears the only way to make it work. + # See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=818802 + # and https://bugs.launchpad.net/ubuntu/+source/gnupg2/+bug/1625848 + _GPG_ARGS="${_GPG_ARGS},http-proxy=${_HTTP_PROXY}" +fi + +# Work around for 'Docker + salt-bootstrap failure' https://github.com/saltstack/salt-bootstrap/issues/394 +if [ "${_DISABLE_SALT_CHECKS}" -eq $BS_FALSE ] && [ -f /tmp/disable_salt_checks ]; then + # shellcheck disable=SC2016 + echowarn 'Found file: /tmp/disable_salt_checks, setting _DISABLE_SALT_CHECKS=$BS_TRUE' + _DISABLE_SALT_CHECKS=$BS_TRUE +fi + +# Because -a can only be installed into virtualenv +if [ "${_PIP_ALL}" -eq $BS_TRUE ] && [ "${_VIRTUALENV_DIR}" = "null" ]; then + usage + # Could possibly set up a default virtualenv location when -a flag is passed + echoerror "Using -a requires -V because pip pkgs should be siloed from python system pkgs" + exit 1 +fi + +# Make sure virtualenv directory does not already exist +if [ -d "${_VIRTUALENV_DIR}" ]; then + echoerror "The directory ${_VIRTUALENV_DIR} for virtualenv already exists" + exit 1 +fi + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __fetch_url +# DESCRIPTION: Retrieves a URL and writes it to a given path +#---------------------------------------------------------------------------------------------------------------------- +__fetch_url() { + # shellcheck disable=SC2086 + curl $_CURL_ARGS -L -s -f -o "$1" "$2" >/dev/null 2>&1 || + wget $_WGET_ARGS -q -O "$1" "$2" >/dev/null 2>&1 || + fetch $_FETCH_ARGS -q -o "$1" "$2" >/dev/null 2>&1 || # FreeBSD + fetch -q -o "$1" "$2" >/dev/null 2>&1 || # Pre FreeBSD 10 + ftp -o "$1" "$2" >/dev/null 2>&1 || # OpenBSD + (echoerror "$2 failed to download to $1"; exit 1) +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __fetch_verify +# DESCRIPTION: Retrieves a URL, verifies its content and writes it to standard output +#---------------------------------------------------------------------------------------------------------------------- +__fetch_verify() { + fetch_verify_url="$1" + fetch_verify_sum="$2" + fetch_verify_size="$3" + + fetch_verify_tmpf=$(mktemp) && \ + __fetch_url "$fetch_verify_tmpf" "$fetch_verify_url" && \ + test "$(stat --format=%s "$fetch_verify_tmpf")" -eq "$fetch_verify_size" && \ + test "$(md5sum "$fetch_verify_tmpf" | awk '{ print $1 }')" = "$fetch_verify_sum" && \ + cat "$fetch_verify_tmpf" && \ + if rm -f "$fetch_verify_tmpf"; then + return 0 + fi + echo "Failed verification of $fetch_verify_url" + return 1 +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_hardware_info +# DESCRIPTION: Discover hardware information +#---------------------------------------------------------------------------------------------------------------------- +__gather_hardware_info() { + if [ -f /proc/cpuinfo ]; then + CPU_VENDOR_ID=$(awk '/vendor_id|Processor/ {sub(/-.*$/,"",$3); print $3; exit}' /proc/cpuinfo ) + elif [ -f /usr/bin/kstat ]; then + # SmartOS. + # Solaris!? + # This has only been tested for a GenuineIntel CPU + CPU_VENDOR_ID=$(/usr/bin/kstat -p cpu_info:0:cpu_info0:vendor_id | awk '{print $2}') + else + CPU_VENDOR_ID=$( sysctl -n hw.model ) + fi + # shellcheck disable=SC2034 + CPU_VENDOR_ID_L=$( echo "$CPU_VENDOR_ID" | tr '[:upper:]' '[:lower:]' ) + CPU_ARCH=$(uname -m 2>/dev/null || uname -p 2>/dev/null || echo "unknown") + CPU_ARCH_L=$( echo "$CPU_ARCH" | tr '[:upper:]' '[:lower:]' ) +} +__gather_hardware_info + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_os_info +# DESCRIPTION: Discover operating system information +#---------------------------------------------------------------------------------------------------------------------- +__gather_os_info() { + OS_NAME=$(uname -s 2>/dev/null) + OS_NAME_L=$( echo "$OS_NAME" | tr '[:upper:]' '[:lower:]' ) + OS_VERSION=$(uname -r) + # shellcheck disable=SC2034 + OS_VERSION_L=$( echo "$OS_VERSION" | tr '[:upper:]' '[:lower:]' ) +} +__gather_os_info + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __parse_version_string +# DESCRIPTION: Parse version strings ignoring the revision. +# MAJOR.MINOR.REVISION becomes MAJOR.MINOR +#---------------------------------------------------------------------------------------------------------------------- +__parse_version_string() { + VERSION_STRING="$1" + PARSED_VERSION=$( + echo "$VERSION_STRING" | + sed -e 's/^/#/' \ + -e 's/^#[^0-9]*\([0-9][0-9]*\.[0-9][0-9]*\)\(\.[0-9][0-9]*\).*$/\1/' \ + -e 's/^#[^0-9]*\([0-9][0-9]*\.[0-9][0-9]*\).*$/\1/' \ + -e 's/^#[^0-9]*\([0-9][0-9]*\).*$/\1/' \ + -e 's/^#.*$//' + ) + echo "$PARSED_VERSION" +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __derive_debian_numeric_version +# DESCRIPTION: Derive the numeric version from a Debian version string. +#---------------------------------------------------------------------------------------------------------------------- +__derive_debian_numeric_version() { + NUMERIC_VERSION="" + INPUT_VERSION="$1" + if echo "$INPUT_VERSION" | grep -q '^[0-9]'; then + NUMERIC_VERSION="$INPUT_VERSION" + elif [ -z "$INPUT_VERSION" ] && [ -f "/etc/debian_version" ]; then + INPUT_VERSION="$(cat /etc/debian_version)" + fi + if [ -z "$NUMERIC_VERSION" ]; then + if [ "$INPUT_VERSION" = "wheezy/sid" ]; then + # I've found an EC2 wheezy image which did not tell its version + NUMERIC_VERSION=$(__parse_version_string "7.0") + elif [ "$INPUT_VERSION" = "jessie/sid" ]; then + NUMERIC_VERSION=$(__parse_version_string "8.0") + elif [ "$INPUT_VERSION" = "stretch/sid" ]; then + NUMERIC_VERSION=$(__parse_version_string "9.0") + elif [ "$INPUT_VERSION" = "buster/sid" ]; then + NUMERIC_VERSION=$(__parse_version_string "10.0") + else + echowarn "Unable to parse the Debian Version (codename: '$INPUT_VERSION')" + fi + fi + echo "$NUMERIC_VERSION" +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __unquote_string +# DESCRIPTION: Strip single or double quotes from the provided string. +#---------------------------------------------------------------------------------------------------------------------- +__unquote_string() { + # shellcheck disable=SC1117 + echo "$*" | sed -e "s/^\([\"\']\)\(.*\)\1\$/\2/g" +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __camelcase_split +# DESCRIPTION: Convert 'CamelCased' strings to 'Camel Cased' +#---------------------------------------------------------------------------------------------------------------------- +__camelcase_split() { + echo "$*" | sed -e 's/\([^[:upper:][:punct:]]\)\([[:upper:]]\)/\1 \2/g' +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __strip_duplicates +# DESCRIPTION: Strip duplicate strings +#---------------------------------------------------------------------------------------------------------------------- +__strip_duplicates() { + echo "$*" | tr -s '[:space:]' '\n' | awk '!x[$0]++' +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __sort_release_files +# DESCRIPTION: Custom sort function. Alphabetical or numerical sort is not +# enough. +#---------------------------------------------------------------------------------------------------------------------- +__sort_release_files() { + KNOWN_RELEASE_FILES=$(echo "(arch|alpine|centos|debian|ubuntu|fedora|redhat|suse|\ + mandrake|mandriva|gentoo|slackware|turbolinux|unitedlinux|void|lsb|system|\ + oracle|os)(-|_)(release|version)" | sed -E 's:[[:space:]]::g') + primary_release_files="" + secondary_release_files="" + # Sort know VS un-known files first + for release_file in $(echo "${@}" | sed -E 's:[[:space:]]:\n:g' | sort -f | uniq); do + match=$(echo "$release_file" | grep -E -i "${KNOWN_RELEASE_FILES}") + if [ "${match}" != "" ]; then + primary_release_files="${primary_release_files} ${release_file}" + else + secondary_release_files="${secondary_release_files} ${release_file}" + fi + done + + # Now let's sort by know files importance, max important goes last in the max_prio list + max_prio="redhat-release centos-release oracle-release fedora-release" + for entry in $max_prio; do + if [ "$(echo "${primary_release_files}" | grep "$entry")" != "" ]; then + primary_release_files=$(echo "${primary_release_files}" | sed -e "s:\\(.*\\)\\($entry\\)\\(.*\\):\\2 \\1 \\3:g") + fi + done + # Now, least important goes last in the min_prio list + min_prio="lsb-release" + for entry in $min_prio; do + if [ "$(echo "${primary_release_files}" | grep "$entry")" != "" ]; then + primary_release_files=$(echo "${primary_release_files}" | sed -e "s:\\(.*\\)\\($entry\\)\\(.*\\):\\1 \\3 \\2:g") + fi + done + + # Echo the results collapsing multiple white-space into a single white-space + echo "${primary_release_files} ${secondary_release_files}" | sed -E 's:[[:space:]]+:\n:g' +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_linux_system_info +# DESCRIPTION: Discover Linux system information +#---------------------------------------------------------------------------------------------------------------------- +__gather_linux_system_info() { + DISTRO_NAME="" + DISTRO_VERSION="" + + # Let's test if the lsb_release binary is available + rv=$(lsb_release >/dev/null 2>&1) + + # shellcheck disable=SC2181 + if [ $? -eq 0 ]; then + DISTRO_NAME=$(lsb_release -si) + if [ "${DISTRO_NAME}" = "Scientific" ]; then + DISTRO_NAME="Scientific Linux" + elif [ "$(echo "$DISTRO_NAME" | grep ^CloudLinux)" != "" ]; then + DISTRO_NAME="Cloud Linux" + elif [ "$(echo "$DISTRO_NAME" | grep ^RedHat)" != "" ]; then + # Let's convert 'CamelCased' to 'Camel Cased' + n=$(__camelcase_split "$DISTRO_NAME") + # Skip setting DISTRO_NAME this time, splitting CamelCase has failed. + # See https://github.com/saltstack/salt-bootstrap/issues/918 + [ "$n" = "$DISTRO_NAME" ] && DISTRO_NAME="" || DISTRO_NAME="$n" + elif [ "$( echo "${DISTRO_NAME}" | grep openSUSE )" != "" ]; then + # lsb_release -si returns "openSUSE Tumbleweed" on openSUSE tumbleweed + # lsb_release -si returns "openSUSE project" on openSUSE 12.3 + # lsb_release -si returns "openSUSE" on openSUSE 15.n + DISTRO_NAME="opensuse" + elif [ "${DISTRO_NAME}" = "SUSE LINUX" ]; then + if [ "$(lsb_release -sd | grep -i opensuse)" != "" ]; then + # openSUSE 12.2 reports SUSE LINUX on lsb_release -si + DISTRO_NAME="opensuse" + else + # lsb_release -si returns "SUSE LINUX" on SLES 11 SP3 + DISTRO_NAME="suse" + fi + elif [ "${DISTRO_NAME}" = "EnterpriseEnterpriseServer" ]; then + # This the Oracle Linux Enterprise ID before ORACLE LINUX 5 UPDATE 3 + DISTRO_NAME="Oracle Linux" + elif [ "${DISTRO_NAME}" = "OracleServer" ]; then + # This the Oracle Linux Server 6.5 + DISTRO_NAME="Oracle Linux" + elif [ "${DISTRO_NAME}" = "AmazonAMI" ] || [ "${DISTRO_NAME}" = "Amazon" ]; then + DISTRO_NAME="Amazon Linux AMI" + elif [ "${DISTRO_NAME}" = "ManjaroLinux" ]; then + DISTRO_NAME="Arch Linux" + elif [ "${DISTRO_NAME}" = "Arch" ]; then + DISTRO_NAME="Arch Linux" + return + fi + rv=$(lsb_release -sr) + [ "${rv}" != "" ] && DISTRO_VERSION=$(__parse_version_string "$rv") + elif [ -f /etc/lsb-release ]; then + # We don't have the lsb_release binary, though, we do have the file it parses + DISTRO_NAME=$(grep DISTRIB_ID /etc/lsb-release | sed -e 's/.*=//') + rv=$(grep DISTRIB_RELEASE /etc/lsb-release | sed -e 's/.*=//') + [ "${rv}" != "" ] && DISTRO_VERSION=$(__parse_version_string "$rv") + fi + + if [ "$DISTRO_NAME" != "" ] && [ "$DISTRO_VERSION" != "" ]; then + # We already have the distribution name and version + return + fi + # shellcheck disable=SC2035,SC2086 + for rsource in $(__sort_release_files "$( + cd /etc && /bin/ls *[_-]release *[_-]version 2>/dev/null | env -i sort | \ + sed -e '/^redhat-release$/d' -e '/^lsb-release$/d'; \ + echo redhat-release lsb-release + )"); do + + [ ! -f "/etc/${rsource}" ] && continue # Does not exist + + n=$(echo "${rsource}" | sed -e 's/[_-]release$//' -e 's/[_-]version$//') + shortname=$(echo "${n}" | tr '[:upper:]' '[:lower:]') + if [ "$shortname" = "debian" ]; then + rv=$(__derive_debian_numeric_version "$(cat /etc/${rsource})") + else + rv=$( (grep VERSION "/etc/${rsource}"; cat "/etc/${rsource}") | grep '[0-9]' | sed -e 'q' ) + fi + [ "${rv}" = "" ] && [ "$shortname" != "arch" ] && continue # There's no version information. Continue to next rsource + v=$(__parse_version_string "$rv") + case $shortname in + redhat ) + if [ "$(grep -E 'CentOS' /etc/${rsource})" != "" ]; then + n="CentOS" + elif [ "$(grep -E 'Scientific' /etc/${rsource})" != "" ]; then + n="Scientific Linux" + elif [ "$(grep -E 'Red Hat Enterprise Linux' /etc/${rsource})" != "" ]; then + n="ed at nterprise inux" + else + n="ed at inux" + fi + ;; + arch ) n="Arch Linux" ;; + alpine ) n="Alpine Linux" ;; + centos ) n="CentOS" ;; + debian ) n="Debian" ;; + ubuntu ) n="Ubuntu" ;; + fedora ) n="Fedora" ;; + suse|opensuse ) n="SUSE" ;; + mandrake*|mandriva ) n="Mandriva" ;; + gentoo ) n="Gentoo" ;; + slackware ) n="Slackware" ;; + turbolinux ) n="TurboLinux" ;; + unitedlinux ) n="UnitedLinux" ;; + void ) n="VoidLinux" ;; + oracle ) n="Oracle Linux" ;; + system ) + while read -r line; do + [ "${n}x" != "systemx" ] && break + case "$line" in + *Amazon*Linux*AMI*) + n="Amazon Linux AMI" + break + esac + done < "/etc/${rsource}" + ;; + os ) + nn="$(__unquote_string "$(grep '^ID=' /etc/os-release | sed -e 's/^ID=\(.*\)$/\1/g')")" + rv="$(__unquote_string "$(grep '^VERSION_ID=' /etc/os-release | sed -e 's/^VERSION_ID=\(.*\)$/\1/g')")" + [ "${rv}" != "" ] && v=$(__parse_version_string "$rv") || v="" + case $(echo "${nn}" | tr '[:upper:]' '[:lower:]') in + alpine ) + n="Alpine Linux" + v="${rv}" + ;; + amzn ) + # Amazon AMI's after 2014.09 match here + n="Amazon Linux AMI" + ;; + arch ) + n="Arch Linux" + v="" # Arch Linux does not provide a version. + ;; + cloudlinux ) + n="Cloud Linux" + ;; + debian ) + n="Debian" + v=$(__derive_debian_numeric_version "$v") + ;; + sles ) + n="SUSE" + v="${rv}" + ;; + opensuse-* ) + n="opensuse" + v="${rv}" + ;; + * ) + n=${nn} + ;; + esac + ;; + * ) n="${n}" ; + esac + DISTRO_NAME=$n + DISTRO_VERSION=$v + break + done +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __install_python() +# DESCRIPTION: Install a different version of python on a host. Currently this has only been tested on CentOS 6 and +# is considered experimental. +#---------------------------------------------------------------------------------------------------------------------- +__install_python() { + if [ "$_PY_EXE" = "" ]; then + echoerror "Must specify -x with -y to install a specific python version" + exit 1 + fi + + __PACKAGES="$_PY_PKG_VER" + + if [ ${_DISABLE_REPOS} -eq ${BS_FALSE} ]; then + echoinfo "Attempting to install a repo to help provide a separate python package" + echoinfo "$DISTRO_NAME_L" + case "$DISTRO_NAME_L" in + "red_hat"|"centos") + __PYTHON_REPO_URL="https://repo.ius.io/ius-release-el${DISTRO_MAJOR_VERSION}.rpm" + ;; + *) + echoerror "Installing a repo to provide a python package is only supported on Redhat/CentOS. + If a repo is already available, please try running script with -r." + exit 1 + ;; + esac + + echoinfo "Installing IUS repo" + __yum_install_noinput "${__PYTHON_REPO_URL}" || return 1 + fi + + echoinfo "Installing ${__PACKAGES}" + __yum_install_noinput "${__PACKAGES}" || return 1 +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_sunos_system_info +# DESCRIPTION: Discover SunOS system info +#---------------------------------------------------------------------------------------------------------------------- +__gather_sunos_system_info() { + if [ -f /sbin/uname ]; then + DISTRO_VERSION=$(/sbin/uname -X | awk '/[kK][eE][rR][nN][eE][lL][iI][dD]/ { print $3 }') + fi + + DISTRO_NAME="" + if [ -f /etc/release ]; then + while read -r line; do + [ "${DISTRO_NAME}" != "" ] && break + case "$line" in + *OpenIndiana*oi_[0-9]*) + DISTRO_NAME="OpenIndiana" + DISTRO_VERSION=$(echo "$line" | sed -nE "s/OpenIndiana(.*)oi_([[:digit:]]+)(.*)/\\2/p") + break + ;; + *OpenSolaris*snv_[0-9]*) + DISTRO_NAME="OpenSolaris" + DISTRO_VERSION=$(echo "$line" | sed -nE "s/OpenSolaris(.*)snv_([[:digit:]]+)(.*)/\\2/p") + break + ;; + *Oracle*Solaris*[0-9]*) + DISTRO_NAME="Oracle Solaris" + DISTRO_VERSION=$(echo "$line" | sed -nE "s/(Oracle Solaris) ([[:digit:]]+)(.*)/\\2/p") + break + ;; + *Solaris*) + DISTRO_NAME="Solaris" + # Let's make sure we not actually on a Joyent's SmartOS VM since some releases + # don't have SmartOS in `/etc/release`, only `Solaris` + if uname -v | grep joyent >/dev/null 2>&1; then + DISTRO_NAME="SmartOS" + fi + break + ;; + *NexentaCore*) + DISTRO_NAME="Nexenta Core" + break + ;; + *SmartOS*) + DISTRO_NAME="SmartOS" + break + ;; + *OmniOS*) + DISTRO_NAME="OmniOS" + DISTRO_VERSION=$(echo "$line" | awk '{print $3}') + _SIMPLIFY_VERSION=$BS_FALSE + break + ;; + esac + done < /etc/release + fi + + if [ "${DISTRO_NAME}" = "" ]; then + DISTRO_NAME="Solaris" + DISTRO_VERSION=$( + echo "${OS_VERSION}" | + sed -e 's;^4\.;1.;' \ + -e 's;^5\.\([0-6]\)[^0-9]*$;2.\1;' \ + -e 's;^5\.\([0-9][0-9]*\).*;\1;' + ) + fi + + if [ "${DISTRO_NAME}" = "SmartOS" ]; then + VIRTUAL_TYPE="smartmachine" + if [ "$(zonename)" = "global" ]; then + VIRTUAL_TYPE="global" + fi + fi +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_bsd_system_info +# DESCRIPTION: Discover OpenBSD, NetBSD and FreeBSD systems information +#---------------------------------------------------------------------------------------------------------------------- +__gather_bsd_system_info() { + DISTRO_NAME=${OS_NAME} + DISTRO_VERSION=$(echo "${OS_VERSION}" | sed -e 's;[()];;' -e 's/-.*$//') +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_osx_system_info +# DESCRIPTION: Discover MacOS X +#---------------------------------------------------------------------------------------------------------------------- +__gather_osx_system_info() { + DISTRO_NAME="MacOSX" + DISTRO_VERSION=$(sw_vers -productVersion) +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_system_info +# DESCRIPTION: Discover which system and distribution we are running. +#---------------------------------------------------------------------------------------------------------------------- +__gather_system_info() { + case ${OS_NAME_L} in + linux ) + __gather_linux_system_info + ;; + sunos ) + __gather_sunos_system_info + ;; + openbsd|freebsd|netbsd ) + __gather_bsd_system_info + ;; + darwin ) + __gather_osx_system_info + ;; + * ) + echoerror "${OS_NAME} not supported."; + exit 1 + ;; + esac + +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __ubuntu_derivatives_translation +# DESCRIPTION: Map Ubuntu derivatives to their Ubuntu base versions. +# If distro has a known Ubuntu base version, use those install +# functions by pretending to be Ubuntu (i.e. change global vars) +#---------------------------------------------------------------------------------------------------------------------- +# shellcheck disable=SC2034 +__ubuntu_derivatives_translation() { + UBUNTU_DERIVATIVES="(trisquel|linuxmint|linaro|elementary_os|neon)" + # Mappings + trisquel_6_ubuntu_base="12.04" + linuxmint_13_ubuntu_base="12.04" + linuxmint_17_ubuntu_base="14.04" + linuxmint_18_ubuntu_base="16.04" + linuxmint_19_ubuntu_base="18.04" + linaro_12_ubuntu_base="12.04" + elementary_os_02_ubuntu_base="12.04" + neon_16_ubuntu_base="16.04" + neon_18_ubuntu_base="18.04" + neon_20_ubuntu_base="20.04" + + # Translate Ubuntu derivatives to their base Ubuntu version + match=$(echo "$DISTRO_NAME_L" | grep -E ${UBUNTU_DERIVATIVES}) + + if [ "${match}" != "" ]; then + case $match in + "elementary_os") + _major=$(echo "$DISTRO_VERSION" | sed 's/\.//g') + ;; + "linuxmint") + export LSB_ETC_LSB_RELEASE=/etc/upstream-release/lsb-release + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + ;; + *) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + ;; + esac + + _ubuntu_version=$(eval echo "\$${match}_${_major}_ubuntu_base") + + if [ "$_ubuntu_version" != "" ]; then + echodebug "Detected Ubuntu $_ubuntu_version derivative" + DISTRO_NAME_L="ubuntu" + DISTRO_VERSION="$_ubuntu_version" + fi + fi +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_dpkg_architecture +# DESCRIPTION: Determine the primary architecture for packages to install on Debian and derivatives +# and issue all necessary error messages. +#---------------------------------------------------------------------------------------------------------------------- +__check_dpkg_architecture() { + if __check_command_exists dpkg; then + DPKG_ARCHITECTURE="$(dpkg --print-architecture)" + else + echoerror "dpkg: command not found." + return 1 + fi + + __REPO_ARCH="$DPKG_ARCHITECTURE" + __REPO_ARCH_DEB='deb' + __return_code=0 + + case $DPKG_ARCHITECTURE in + "i386") + error_msg="$_REPO_URL likely doesn't have all required 32-bit packages for $DISTRO_NAME $DISTRO_MAJOR_VERSION." + # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location + __REPO_ARCH="amd64" + ;; + "amd64") + error_msg="" + ;; + "arm64") + if [ "$_CUSTOM_REPO_URL" != "null" ]; then + warn_msg="Support for arm64 is experimental, make sure the custom repository used has the expected structure and contents." + else + # Saltstack official repository does not yet have arm64 metadata, + # use amd64 repositories on arm64, since all pkgs are arch-independent + __REPO_ARCH="amd64" + __REPO_ARCH_DEB="deb [arch=$__REPO_ARCH]" + warn_msg="Support for arm64 packages is experimental and might rely on architecture-independent packages from the amd64 repository." + fi + error_msg="" + ;; + "armhf") + if [ "$DISTRO_NAME_L" = "ubuntu" ] || [ "$DISTRO_MAJOR_VERSION" -lt 8 ]; then + error_msg="Support for armhf packages at $_REPO_URL is limited to Debian/Raspbian 8 platforms." + __return_code=1 + else + error_msg="" + fi + ;; + *) + error_msg="$_REPO_URL doesn't have packages for your system architecture: $DPKG_ARCHITECTURE." + __return_code=1 + ;; + esac + + if [ "${warn_msg:-}" != "" ]; then + # AArch64: Do not fail at this point, but warn the user about experimental support + # See https://github.com/saltstack/salt-bootstrap/issues/1240 + echowarn "${warn_msg}" + fi + if [ "${error_msg}" != "" ]; then + echoerror "${error_msg}" + if [ "$ITYPE" != "git" ]; then + echoerror "You can try git installation mode, i.e.: sh ${__ScriptName} git v2017.7.2." + echoerror "It may be necessary to use git installation mode with pip and disable the SaltStack apt repository." + echoerror "For example:" + echoerror " sh ${__ScriptName} -r -P git v2017.7.2" + fi + fi + + if [ "${__return_code}" -eq 0 ]; then + return 0 + else + return 1 + fi +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __ubuntu_codename_translation +# DESCRIPTION: Map Ubuntu major versions to their corresponding codenames +#---------------------------------------------------------------------------------------------------------------------- +# shellcheck disable=SC2034 +__ubuntu_codename_translation() { + case $DISTRO_MINOR_VERSION in + "04") + _april="yes" + ;; + "10") + _april="" + ;; + *) + _april="yes" + ;; + esac + + case $DISTRO_MAJOR_VERSION in + "12") + DISTRO_CODENAME="precise" + ;; + "14") + DISTRO_CODENAME="trusty" + ;; + "16") + DISTRO_CODENAME="xenial" + ;; + "18") + DISTRO_CODENAME="bionic" + ;; + "20") + DISTRO_CODENAME="focal" + ;; + *) + DISTRO_CODENAME="trusty" + ;; + esac +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __debian_derivatives_translation +# DESCRIPTION: Map Debian derivatives to their Debian base versions. +# If distro has a known Debian base version, use those install +# functions by pretending to be Debian (i.e. change global vars) +#---------------------------------------------------------------------------------------------------------------------- +# shellcheck disable=SC2034 +__debian_derivatives_translation() { + # If the file does not exist, return + [ ! -f /etc/os-release ] && return + + DEBIAN_DERIVATIVES="(cumulus|devuan|kali|linuxmint|raspbian|bunsenlabs|turnkey)" + # Mappings + cumulus_2_debian_base="7.0" + cumulus_3_debian_base="8.0" + cumulus_4_debian_base="10.0" + devuan_1_debian_base="8.0" + devuan_2_debian_base="9.0" + kali_1_debian_base="7.0" + linuxmint_1_debian_base="8.0" + raspbian_8_debian_base="8.0" + raspbian_9_debian_base="9.0" + raspbian_10_debian_base="10.0" + bunsenlabs_9_debian_base="9.0" + turnkey_9_debian_base="9.0" + + # Translate Debian derivatives to their base Debian version + match=$(echo "$DISTRO_NAME_L" | grep -E ${DEBIAN_DERIVATIVES}) + + if [ "${match}" != "" ]; then + case $match in + cumulus*) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="cumulus" + ;; + devuan) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="devuan" + ;; + kali) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="kali" + ;; + linuxmint) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="linuxmint" + ;; + raspbian) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="raspbian" + ;; + bunsenlabs) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="bunsenlabs" + ;; + turnkey) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="turnkey" + ;; + esac + + _debian_version=$(eval echo "\$${_debian_derivative}_${_major}_debian_base" 2>/dev/null) + + if [ "$_debian_version" != "" ]; then + echodebug "Detected Debian $_debian_version derivative" + DISTRO_NAME_L="debian" + DISTRO_VERSION="$_debian_version" + DISTRO_MAJOR_VERSION="$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g')" + fi + fi +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __debian_codename_translation +# DESCRIPTION: Map Debian major versions to their corresponding code names +#---------------------------------------------------------------------------------------------------------------------- +# shellcheck disable=SC2034 +__debian_codename_translation() { + + case $DISTRO_MAJOR_VERSION in + "7") + DISTRO_CODENAME="wheezy" + ;; + "8") + DISTRO_CODENAME="jessie" + ;; + "9") + DISTRO_CODENAME="stretch" + ;; + "10") + DISTRO_CODENAME="buster" + ;; + *) + DISTRO_CODENAME="jessie" + ;; + esac +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_end_of_life_versions +# DESCRIPTION: Check for end of life distribution versions +#---------------------------------------------------------------------------------------------------------------------- +__check_end_of_life_versions() { + case "${DISTRO_NAME_L}" in + debian) + # Debian versions below 7 are not supported + if [ "$DISTRO_MAJOR_VERSION" -lt 8 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://wiki.debian.org/DebianReleases" + exit 1 + fi + ;; + + ubuntu) + # Ubuntu versions not supported + # + # < 14.04 + # = 14.10 + # = 15.04, 15.10 + # = 16.10 + # = 17.04, 17.10 + if [ "$DISTRO_MAJOR_VERSION" -lt 14 ] || \ + [ "$DISTRO_MAJOR_VERSION" -eq 15 ] || \ + [ "$DISTRO_MAJOR_VERSION" -eq 17 ] || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 16 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; }; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://wiki.ubuntu.com/Releases" + exit 1 + fi + ;; + + opensuse) + # openSUSE versions not supported + # + # <= 13.X + # <= 42.2 + if [ "$DISTRO_MAJOR_VERSION" -lt 15 ] || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 42 ] && [ "$DISTRO_MINOR_VERSION" -le 2 ]; }; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " http://en.opensuse.org/Lifetime" + exit 1 + fi + ;; + + suse) + # SuSE versions not supported + # + # < 11 SP4 + # < 12 SP2 + # < 15 SP1 + SUSE_PATCHLEVEL=$(awk -F'=' '/VERSION_ID/ { print $2 }' /etc/os-release | grep -oP "\.\K\w+") + if [ "${SUSE_PATCHLEVEL}" = "" ]; then + SUSE_PATCHLEVEL="00" + fi + if [ "$DISTRO_MAJOR_VERSION" -lt 11 ] || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 11 ] && [ "$SUSE_PATCHLEVEL" -lt 04 ]; } || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 15 ] && [ "$SUSE_PATCHLEVEL" -lt 01 ]; } || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 12 ] && [ "$SUSE_PATCHLEVEL" -lt 02 ]; }; then + echoerror "Versions lower than SuSE 11 SP4, 12 SP2 or 15 SP1 are not supported." + echoerror "Please consider upgrading to the next stable" + echoerror " https://www.suse.com/lifecycle/" + exit 1 + fi + ;; + + fedora) + # Fedora lower than 27 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 30 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://fedoraproject.org/wiki/Releases" + exit 1 + fi + ;; + + centos) + # CentOS versions lower than 6 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " http://wiki.centos.org/Download" + exit 1 + fi + ;; + + red_hat*linux) + # Red Hat (Enterprise) Linux versions lower than 6 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://access.redhat.com/support/policy/updates/errata/" + exit 1 + fi + ;; + + oracle*linux) + # Oracle Linux versions lower than 6 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " http://www.oracle.com/us/support/library/elsp-lifetime-069338.pdf" + exit 1 + fi + ;; + + scientific*linux) + # Scientific Linux versions lower than 6 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://www.scientificlinux.org/downloads/sl-versions/" + exit 1 + fi + ;; + + cloud*linux) + # Cloud Linux versions lower than 6 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 6 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://docs.cloudlinux.com/index.html?cloudlinux_life-cycle.html" + exit 1 + fi + ;; + + amazon*linux*ami) + # Amazon Linux versions lower than 2012.0X no longer supported + # Except for Amazon Linux 2, which reset the major version counter + if [ "$DISTRO_MAJOR_VERSION" -lt 2012 ] && [ "$DISTRO_MAJOR_VERSION" -gt 10 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://aws.amazon.com/amazon-linux-ami/" + exit 1 + fi + ;; + + freebsd) + # FreeBSD versions lower than 11 are EOL + if [ "$DISTRO_MAJOR_VERSION" -lt 11 ]; then + echoerror "Versions lower than FreeBSD 11 are EOL and no longer supported." + exit 1 + fi + ;; + + *) + ;; + esac +} + + +__gather_system_info + +echo +echoinfo "System Information:" +echoinfo " CPU: ${CPU_VENDOR_ID}" +echoinfo " CPU Arch: ${CPU_ARCH}" +echoinfo " OS Name: ${OS_NAME}" +echoinfo " OS Version: ${OS_VERSION}" +echoinfo " Distribution: ${DISTRO_NAME} ${DISTRO_VERSION}" +echo + +# Simplify distro name naming on functions +DISTRO_NAME_L=$(echo "$DISTRO_NAME" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-zA-Z0-9_ ]//g' | sed -Ee 's/([[:space:]])+/_/g' | sed -Ee 's/tumbleweed//' ) + +# Simplify version naming on functions +if [ "$DISTRO_VERSION" = "" ] || [ ${_SIMPLIFY_VERSION} -eq $BS_FALSE ]; then + DISTRO_MAJOR_VERSION="" + DISTRO_MINOR_VERSION="" + PREFIXED_DISTRO_MAJOR_VERSION="" + PREFIXED_DISTRO_MINOR_VERSION="" +else + DISTRO_MAJOR_VERSION=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + DISTRO_MINOR_VERSION=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).\([0-9]*\).*/\2/g') + PREFIXED_DISTRO_MAJOR_VERSION="_${DISTRO_MAJOR_VERSION}" + if [ "${PREFIXED_DISTRO_MAJOR_VERSION}" = "_" ]; then + PREFIXED_DISTRO_MAJOR_VERSION="" + fi + PREFIXED_DISTRO_MINOR_VERSION="_${DISTRO_MINOR_VERSION}" + if [ "${PREFIXED_DISTRO_MINOR_VERSION}" = "_" ]; then + PREFIXED_DISTRO_MINOR_VERSION="" + fi +fi + +# For Ubuntu derivatives, pretend to be their Ubuntu base version +__ubuntu_derivatives_translation + +# For Debian derivates, pretend to be their Debian base version +__debian_derivatives_translation + +# Fail soon for end of life versions +__check_end_of_life_versions + +echodebug "Binaries will be searched using the following \$PATH: ${PATH}" + +# Let users know that we'll use a proxy +if [ "${_HTTP_PROXY}" != "" ]; then + echoinfo "Using http proxy $_HTTP_PROXY" +fi + +# Let users know what's going to be installed/configured +if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Installing minion" + else + echoinfo "Configuring minion" + fi +fi + +if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Installing master" + else + echoinfo "Configuring master" + fi +fi + +if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Installing syndic" + else + echoinfo "Configuring syndic" + fi +fi + +if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Installing salt-cloud and required python-libcloud package" +fi + +if [ $_START_DAEMONS -eq $BS_FALSE ]; then + echoinfo "Daemons will not be started" +fi + +if [ "${DISTRO_NAME_L}" = "ubuntu" ]; then + # For ubuntu versions, obtain the codename from the release version + __ubuntu_codename_translation +elif [ "${DISTRO_NAME_L}" = "debian" ]; then + # For debian versions, obtain the codename from the release version + __debian_codename_translation +fi + +if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(debian|ubuntu|centos|red_hat|oracle|scientific|amazon|fedora|macosx)')" = "" ] && [ "$ITYPE" = "stable" ] && [ "$STABLE_REV" != "latest" ]; then + echoerror "${DISTRO_NAME} does not have major version pegged packages support" + exit 1 +fi + +# Only RedHat based distros have testing support +if [ "${ITYPE}" = "testing" ]; then + if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(centos|red_hat|amazon|oracle)')" = "" ]; then + echoerror "${DISTRO_NAME} does not have testing packages support" + exit 1 + fi + _EPEL_REPO="epel-testing" +fi + +# Only Ubuntu has support for installing to virtualenvs +if [ "${DISTRO_NAME_L}" != "ubuntu" ] && [ "$_VIRTUALENV_DIR" != "null" ]; then + echoerror "${DISTRO_NAME} does not have -V support" + exit 1 +fi + +# Only Ubuntu has support for pip installing all packages +if [ "${DISTRO_NAME_L}" != "ubuntu" ] && [ $_PIP_ALL -eq $BS_TRUE ]; then + echoerror "${DISTRO_NAME} does not have -a support" + exit 1 +fi + +if [ "$ITYPE" = "git" ]; then + + if [ "${GIT_REV}" = "master" ]; then + _POST_NEON_INSTALL=$BS_TRUE + __TAG_REGEX_MATCH="MATCH" + else + case ${OS_NAME_L} in + openbsd|freebsd|netbsd|darwin ) + __NEW_VS_TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed -E 's/^(v?3[0-9]{3}(\.[0-9]{1,2})?).*$/MATCH/') + if [ "$__NEW_VS_TAG_REGEX_MATCH" = "MATCH" ]; then + _POST_NEON_INSTALL=$BS_TRUE + __TAG_REGEX_MATCH="${__NEW_VS_TAG_REGEX_MATCH}" + if [ "$(echo "${GIT_REV}" | cut -c -1)" != "v" ]; then + # We do this to properly clone tags + GIT_REV="v${GIT_REV}" + fi + echodebug "Post Neon Tag Regex Match On: ${GIT_REV}" + else + __TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed -E 's/^(v?[0-9]{1,4}\.[0-9]{1,2})(\.[0-9]{1,2})?.*$/MATCH/') + echodebug "Pre Neon Tag Regex Match On: ${GIT_REV}" + fi + ;; + * ) + __NEW_VS_TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed 's/^.*\(v\?3[[:digit:]]\{3\}\(\.[[:digit:]]\{1,2\}\)\?\).*$/MATCH/') + if [ "$__NEW_VS_TAG_REGEX_MATCH" = "MATCH" ]; then + _POST_NEON_INSTALL=$BS_TRUE + __TAG_REGEX_MATCH="${__NEW_VS_TAG_REGEX_MATCH}" + if [ "$(echo "${GIT_REV}" | cut -c -1)" != "v" ]; then + # We do this to properly clone tags + GIT_REV="v${GIT_REV}" + fi + echodebug "Post Neon Tag Regex Match On: ${GIT_REV}" + else + __TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed 's/^.*\(v\?[[:digit:]]\{1,4\}\.[[:digit:]]\{1,2\}\)\(\.[[:digit:]]\{1,2\}\)\?.*$/MATCH/') + echodebug "Pre Neon Tag Regex Match On: ${GIT_REV}" + fi + ;; + esac + fi + + if [ "$_POST_NEON_INSTALL" -eq $BS_TRUE ]; then + echo + echowarn "Post Neon git based installations will always install salt" + echowarn "and its dependencies using pip which will be upgraded to" + echowarn "at least v${_MINIMUM_PIP_VERSION}, and, in case the setuptools version is also" + echowarn "too old, it will be upgraded to at least v${_MINIMUM_SETUPTOOLS_VERSION}" + echo + echowarn "You have 10 seconds to cancel and stop the bootstrap process..." + echo + sleep 10 + _PIP_ALLOWED=$BS_TRUE + fi +fi + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __function_defined +# DESCRIPTION: Checks if a function is defined within this scripts scope +# PARAMETERS: function name +# RETURNS: 0 or 1 as in defined or not defined +#---------------------------------------------------------------------------------------------------------------------- +__function_defined() { + FUNC_NAME=$1 + if [ "$(command -v "$FUNC_NAME")" != "" ]; then + echoinfo "Found function $FUNC_NAME" + return 0 + fi + echodebug "$FUNC_NAME not found...." + return 1 +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __wait_for_apt +# DESCRIPTION: Check if any apt, apt-get, aptitude, or dpkg processes are running before +# calling these again. This is useful when these process calls are part of +# a boot process, such as on AWS AMIs. This func will wait until the boot +# process is finished so the script doesn't exit on a locked proc. +#---------------------------------------------------------------------------------------------------------------------- +__wait_for_apt(){ + # Timeout set at 15 minutes + WAIT_TIMEOUT=900 + + # Run our passed in apt command + "${@}" 2>"$APT_ERR" + APT_RETURN=$? + + # Make sure we're not waiting on a lock + while [ $APT_RETURN -ne 0 ] && grep -q '^E: Could not get lock' "$APT_ERR"; do + echoinfo "Aware of the lock. Patiently waiting $WAIT_TIMEOUT more seconds..." + sleep 1 + WAIT_TIMEOUT=$((WAIT_TIMEOUT - 1)) + + if [ "$WAIT_TIMEOUT" -eq 0 ]; then + echoerror "Apt, apt-get, aptitude, or dpkg process is taking too long." + echoerror "Bootstrap script cannot proceed. Aborting." + return 1 + else + "${@}" 2>"$APT_ERR" + APT_RETURN=$? + fi + done + + return $APT_RETURN +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __apt_get_install_noinput +# DESCRIPTION: (DRY) apt-get install with noinput options +# PARAMETERS: packages +#---------------------------------------------------------------------------------------------------------------------- +__apt_get_install_noinput() { + __wait_for_apt apt-get install -y -o DPkg::Options::=--force-confold "${@}"; return $? +} # ---------- end of function __apt_get_install_noinput ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __apt_get_upgrade_noinput +# DESCRIPTION: (DRY) apt-get upgrade with noinput options +#---------------------------------------------------------------------------------------------------------------------- +__apt_get_upgrade_noinput() { + __wait_for_apt apt-get upgrade -y -o DPkg::Options::=--force-confold; return $? +} # ---------- end of function __apt_get_upgrade_noinput ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __temp_gpg_pub +# DESCRIPTION: Create a temporary file for downloading a GPG public key. +#---------------------------------------------------------------------------------------------------------------------- +__temp_gpg_pub() { + if __check_command_exists mktemp; then + tempfile="$(mktemp /tmp/salt-gpg-XXXXXXXX.pub 2>/dev/null)" + + if [ -z "$tempfile" ]; then + echoerror "Failed to create temporary file in /tmp" + return 1 + fi + else + tempfile="/tmp/salt-gpg-$$.pub" + fi + + echo $tempfile +} # ----------- end of function __temp_gpg_pub ----------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __apt_key_fetch +# DESCRIPTION: Download and import GPG public key for "apt-secure" +# PARAMETERS: url +#---------------------------------------------------------------------------------------------------------------------- +__apt_key_fetch() { + url=$1 + + tempfile="$(__temp_gpg_pub)" + + __fetch_url "$tempfile" "$url" || return 1 + apt-key add "$tempfile" || return 1 + rm -f "$tempfile" + + return 0 +} # ---------- end of function __apt_key_fetch ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __rpm_import_gpg +# DESCRIPTION: Download and import GPG public key to rpm database +# PARAMETERS: url +#---------------------------------------------------------------------------------------------------------------------- +__rpm_import_gpg() { + url=$1 + + tempfile="$(__temp_gpg_pub)" + + __fetch_url "$tempfile" "$url" || return 1 + + # At least on CentOS 8, a missing newline at the end causes: + # error: /tmp/salt-gpg-n1gKUb1u.pub: key 1 not an armored public key. + # shellcheck disable=SC1003,SC2086 + sed -i -e '$a\' $tempfile + + rpm --import "$tempfile" || return 1 + rm -f "$tempfile" + + return 0 +} # ---------- end of function __rpm_import_gpg ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __yum_install_noinput +# DESCRIPTION: (DRY) yum install with noinput options +#---------------------------------------------------------------------------------------------------------------------- +__yum_install_noinput() { + + ENABLE_EPEL_CMD="" + # Skip Amazon Linux for the first round, since EPEL is no longer required. + # See issue #724 + if [ $_DISABLE_REPOS -eq $BS_FALSE ] && [ "$DISTRO_NAME_L" != "amazon_linux_ami" ]; then + ENABLE_EPEL_CMD="--enablerepo=${_EPEL_REPO}" + fi + + if [ "$DISTRO_NAME_L" = "oracle_linux" ]; then + # We need to install one package at a time because --enablerepo=X disables ALL OTHER REPOS!!!! + for package in "${@}"; do + yum -y install "${package}" || yum -y install "${package}" ${ENABLE_EPEL_CMD} || return $? + done + else + yum -y install "${@}" ${ENABLE_EPEL_CMD} || return $? + fi +} # ---------- end of function __yum_install_noinput ---------- + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __dnf_install_noinput +# DESCRIPTION: (DRY) dnf install with noinput options +#---------------------------------------------------------------------------------------------------------------------- +__dnf_install_noinput() { + + dnf -y install "${@}" || return $? +} # ---------- end of function __dnf_install_noinput ---------- + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __git_clone_and_checkout +# DESCRIPTION: (DRY) Helper function to clone and checkout salt to a +# specific revision. +#---------------------------------------------------------------------------------------------------------------------- +__git_clone_and_checkout() { + + echodebug "Installed git version: $(git --version | awk '{ print $3 }')" + # Turn off SSL verification if -I flag was set for insecure downloads + if [ "$_INSECURE_DL" -eq $BS_TRUE ]; then + export GIT_SSL_NO_VERIFY=1 + fi + + __SALT_GIT_CHECKOUT_PARENT_DIR=$(dirname "${_SALT_GIT_CHECKOUT_DIR}" 2>/dev/null) + __SALT_GIT_CHECKOUT_PARENT_DIR="${__SALT_GIT_CHECKOUT_PARENT_DIR:-/tmp/git}" + __SALT_CHECKOUT_REPONAME="$(basename "${_SALT_GIT_CHECKOUT_DIR}" 2>/dev/null)" + __SALT_CHECKOUT_REPONAME="${__SALT_CHECKOUT_REPONAME:-salt}" + [ -d "${__SALT_GIT_CHECKOUT_PARENT_DIR}" ] || mkdir "${__SALT_GIT_CHECKOUT_PARENT_DIR}" + # shellcheck disable=SC2164 + cd "${__SALT_GIT_CHECKOUT_PARENT_DIR}" + if [ -d "${_SALT_GIT_CHECKOUT_DIR}" ]; then + echodebug "Found a checked out Salt repository" + # shellcheck disable=SC2164 + cd "${_SALT_GIT_CHECKOUT_DIR}" + echodebug "Fetching git changes" + git fetch || return 1 + # Tags are needed because of salt's versioning, also fetch that + echodebug "Fetching git tags" + git fetch --tags || return 1 + + # If we have the SaltStack remote set as upstream, we also need to fetch the tags from there + if [ "$(git remote -v | grep $_SALTSTACK_REPO_URL)" != "" ]; then + echodebug "Fetching upstream(SaltStack's Salt repository) git tags" + git fetch --tags upstream + else + echoinfo "Adding SaltStack's Salt repository as a remote" + git remote add upstream "$_SALTSTACK_REPO_URL" + echodebug "Fetching upstream(SaltStack's Salt repository) git tags" + git fetch --tags upstream + fi + + echodebug "Hard reseting the cloned repository to ${GIT_REV}" + git reset --hard "$GIT_REV" || return 1 + + # Just calling `git reset --hard $GIT_REV` on a branch name that has + # already been checked out will not update that branch to the upstream + # HEAD; instead it will simply reset to itself. Check the ref to see + # if it is a branch name, check out the branch, and pull in the + # changes. + if git branch -a | grep -q "${GIT_REV}"; then + echodebug "Rebasing the cloned repository branch" + git pull --rebase || return 1 + fi + else + if [ "$_FORCE_SHALLOW_CLONE" -eq "${BS_TRUE}" ]; then + echoinfo "Forced shallow cloning of git repository." + __SHALLOW_CLONE=$BS_TRUE + elif [ "$__TAG_REGEX_MATCH" = "MATCH" ]; then + echoinfo "Git revision matches a Salt version tag, shallow cloning enabled." + __SHALLOW_CLONE=$BS_TRUE + else + echowarn "The git revision being installed does not match a Salt version tag. Shallow cloning disabled" + __SHALLOW_CLONE=$BS_FALSE + fi + + if [ "$__SHALLOW_CLONE" -eq $BS_TRUE ]; then + # Let's try shallow cloning to speed up. + # Test for "--single-branch" option introduced in git 1.7.10, the minimal version of git where the shallow + # cloning we need actually works + if [ "$(git clone 2>&1 | grep 'single-branch')" != "" ]; then + # The "--single-branch" option is supported, attempt shallow cloning + echoinfo "Attempting to shallow clone $GIT_REV from Salt's repository ${_SALT_REPO_URL}" + if git clone --depth 1 --branch "$GIT_REV" "$_SALT_REPO_URL" "$__SALT_CHECKOUT_REPONAME"; then + # shellcheck disable=SC2164 + cd "${_SALT_GIT_CHECKOUT_DIR}" + __SHALLOW_CLONE=$BS_TRUE + else + # Shallow clone above failed(missing upstream tags???), let's resume the old behaviour. + echowarn "Failed to shallow clone." + echoinfo "Resuming regular git clone and remote SaltStack repository addition procedure" + __SHALLOW_CLONE=$BS_FALSE + fi + else + echodebug "Shallow cloning not possible. Required git version not met." + __SHALLOW_CLONE=$BS_FALSE + fi + fi + + if [ "$__SHALLOW_CLONE" -eq $BS_FALSE ]; then + git clone "$_SALT_REPO_URL" "$__SALT_CHECKOUT_REPONAME" || return 1 + # shellcheck disable=SC2164 + cd "${_SALT_GIT_CHECKOUT_DIR}" + + if ! echo "$_SALT_REPO_URL" | grep -q -F -w "${_SALTSTACK_REPO_URL#*://}"; then + # We need to add the saltstack repository as a remote and fetch tags for proper versioning + echoinfo "Adding SaltStack's Salt repository as a remote" + git remote add upstream "$_SALTSTACK_REPO_URL" || return 1 + + echodebug "Fetching upstream (SaltStack's Salt repository) git tags" + git fetch --tags upstream || return 1 + + # Check if GIT_REV is a remote branch or just a commit hash + if git branch -r | grep -q -F -w "origin/$GIT_REV"; then + GIT_REV="origin/$GIT_REV" + fi + fi + + echodebug "Checking out $GIT_REV" + git checkout "$GIT_REV" || return 1 + fi + + fi + + echoinfo "Cloning Salt's git repository succeeded" + return 0 +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __copyfile +# DESCRIPTION: Simple function to copy files. Overrides if asked. +#---------------------------------------------------------------------------------------------------------------------- +__copyfile() { + overwrite=$_FORCE_OVERWRITE + if [ $# -eq 2 ]; then + sfile=$1 + dfile=$2 + elif [ $# -eq 3 ]; then + sfile=$1 + dfile=$2 + overwrite=$3 + else + echoerror "Wrong number of arguments for __copyfile()" + echoinfo "USAGE: __copyfile OR __copyfile " + exit 1 + fi + + # Does the source file exist? + if [ ! -f "$sfile" ]; then + echowarn "$sfile does not exist!" + return 1 + fi + + # If the destination is a directory, let's make it a full path so the logic + # below works as expected + if [ -d "$dfile" ]; then + echodebug "The passed destination ($dfile) is a directory" + dfile="${dfile}/$(basename "$sfile")" + echodebug "Full destination path is now: $dfile" + fi + + if [ ! -f "$dfile" ]; then + # The destination file does not exist, copy + echodebug "Copying $sfile to $dfile" + cp "$sfile" "$dfile" || return 1 + elif [ -f "$dfile" ] && [ "$overwrite" -eq $BS_TRUE ]; then + # The destination exist and we're overwriting + echodebug "Overwriting $dfile with $sfile" + cp -f "$sfile" "$dfile" || return 1 + elif [ -f "$dfile" ] && [ "$overwrite" -ne $BS_TRUE ]; then + echodebug "Not overwriting $dfile with $sfile" + fi + return 0 +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __movefile +# DESCRIPTION: Simple function to move files. Overrides if asked. +#---------------------------------------------------------------------------------------------------------------------- +__movefile() { + overwrite=$_FORCE_OVERWRITE + if [ $# -eq 2 ]; then + sfile=$1 + dfile=$2 + elif [ $# -eq 3 ]; then + sfile=$1 + dfile=$2 + overwrite=$3 + else + echoerror "Wrong number of arguments for __movefile()" + echoinfo "USAGE: __movefile OR __movefile " + exit 1 + fi + + if [ $_KEEP_TEMP_FILES -eq $BS_TRUE ]; then + # We're being told not to move files, instead copy them so we can keep + # them around + echodebug "Since BS_KEEP_TEMP_FILES=1 we're copying files instead of moving them" + __copyfile "$sfile" "$dfile" "$overwrite" + return $? + fi + + # Does the source file exist? + if [ ! -f "$sfile" ]; then + echowarn "$sfile does not exist!" + return 1 + fi + + # If the destination is a directory, let's make it a full path so the logic + # below works as expected + if [ -d "$dfile" ]; then + echodebug "The passed destination($dfile) is a directory" + dfile="${dfile}/$(basename "$sfile")" + echodebug "Full destination path is now: $dfile" + fi + + if [ ! -f "$dfile" ]; then + # The destination file does not exist, move + echodebug "Moving $sfile to $dfile" + mv "$sfile" "$dfile" || return 1 + elif [ -f "$dfile" ] && [ "$overwrite" -eq $BS_TRUE ]; then + # The destination exist and we're overwriting + echodebug "Overriding $dfile with $sfile" + mv -f "$sfile" "$dfile" || return 1 + elif [ -f "$dfile" ] && [ "$overwrite" -ne $BS_TRUE ]; then + echodebug "Not overriding $dfile with $sfile" + fi + + return 0 +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __linkfile +# DESCRIPTION: Simple function to create symlinks. Overrides if asked. Accepts globs. +#---------------------------------------------------------------------------------------------------------------------- +__linkfile() { + overwrite=$_FORCE_OVERWRITE + if [ $# -eq 2 ]; then + target=$1 + linkname=$2 + elif [ $# -eq 3 ]; then + target=$1 + linkname=$2 + overwrite=$3 + else + echoerror "Wrong number of arguments for __linkfile()" + echoinfo "USAGE: __linkfile OR __linkfile " + exit 1 + fi + + for sfile in $target; do + # Does the source file exist? + if [ ! -f "$sfile" ]; then + echowarn "$sfile does not exist!" + return 1 + fi + + # If the destination is a directory, let's make it a full path so the logic + # below works as expected + if [ -d "$linkname" ]; then + echodebug "The passed link name ($linkname) is a directory" + linkname="${linkname}/$(basename "$sfile")" + echodebug "Full destination path is now: $linkname" + fi + + if [ ! -e "$linkname" ]; then + # The destination file does not exist, create link + echodebug "Creating $linkname symlink pointing to $sfile" + ln -s "$sfile" "$linkname" || return 1 + elif [ -e "$linkname" ] && [ "$overwrite" -eq $BS_TRUE ]; then + # The destination exist and we're overwriting + echodebug "Overwriting $linkname symlink to point on $sfile" + ln -sf "$sfile" "$linkname" || return 1 + elif [ -e "$linkname" ] && [ "$overwrite" -ne $BS_TRUE ]; then + echodebug "Not overwriting $linkname symlink to point on $sfile" + fi + done + + return 0 +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __overwriteconfig() +# DESCRIPTION: Simple function to overwrite master or minion config files. +#---------------------------------------------------------------------------------------------------------------------- +__overwriteconfig() { + if [ $# -eq 2 ]; then + target=$1 + json=$2 + else + echoerror "Wrong number of arguments for __convert_json_to_yaml_str()" + echoinfo "USAGE: __convert_json_to_yaml_str " + exit 1 + fi + + # Make a tempfile to dump any python errors into. + if __check_command_exists mktemp; then + tempfile="$(mktemp /tmp/salt-config-XXXXXXXX 2>/dev/null)" + + if [ -z "$tempfile" ]; then + echoerror "Failed to create temporary file in /tmp" + return 1 + fi + else + tempfile="/tmp/salt-config-$$" + fi + + if [ -n "$_PY_EXE" ]; then + good_python="$_PY_EXE" + # If python does not have yaml installed we're on Arch and should use python2 + elif python -c "import yaml" 2> /dev/null; then + good_python=python + else + good_python=python2 + fi + + # Convert json string to a yaml string and write it to config file. Output is dumped into tempfile. + "$good_python" -c "import json; import yaml; jsn=json.loads('$json'); yml=yaml.safe_dump(jsn, line_break='\\n', default_flow_style=False); config_file=open('$target', 'w'); config_file.write(yml); config_file.close();" 2>$tempfile + + # No python errors output to the tempfile + if [ ! -s "$tempfile" ]; then + rm -f "$tempfile" + return 0 + fi + + # Errors are present in the tempfile - let's expose them to the user. + fullerror=$(cat "$tempfile") + echodebug "$fullerror" + echoerror "Python error encountered. This is likely due to passing in a malformed JSON string. Please use -D to see stacktrace." + + rm -f "$tempfile" + + return 1 + +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_systemd +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_systemd() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + if [ "$(systemctl is-enabled "${servicename}")" = "enabled" ]; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_systemd ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_upstart +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_upstart() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + # Check if service is enabled to start at boot + if initctl list | grep "${servicename}" > /dev/null 2>&1; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_upstart ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_sysvinit +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_sysvinit() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + if [ "$(LC_ALL=C /sbin/chkconfig --list | grep "\\<${servicename}\\>" | grep '[2-5]:on')" != "" ]; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_sysvinit ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_debian +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_debian() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + # Check if the service is going to be started at any runlevel, fixes bootstrap in container (Docker, LXC) + if ls /etc/rc?.d/S*"${servicename}" >/dev/null 2>&1; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_debian ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_openbsd +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_openbsd() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + # shellcheck disable=SC2086,SC2046,SC2144 + if rcctl get ${servicename} status; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_openbsd ---------- + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_alpine +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_alpine() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + # shellcheck disable=SC2086,SC2046,SC2144 + if rc-status $(rc-status -r) | tail -n +2 | grep -q "\\<$servicename\\>"; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_openbsd ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __create_virtualenv +# DESCRIPTION: Return 0 or 1 depending on successful creation of virtualenv +#---------------------------------------------------------------------------------------------------------------------- +__create_virtualenv() { + if [ ! -d "$_VIRTUALENV_DIR" ]; then + echoinfo "Creating virtualenv ${_VIRTUALENV_DIR}" + if [ $_PIP_ALL -eq $BS_TRUE ]; then + virtualenv --no-site-packages "${_VIRTUALENV_DIR}" || return 1 + else + virtualenv --system-site-packages "${_VIRTUALENV_DIR}" || return 1 + fi + fi + return 0 +} # ---------- end of function __create_virtualenv ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __activate_virtualenv +# DESCRIPTION: Return 0 or 1 depending on successful activation of virtualenv +#---------------------------------------------------------------------------------------------------------------------- +__activate_virtualenv() { + set +o nounset + # Is virtualenv empty + if [ -z "$_VIRTUALENV_DIR" ]; then + __create_virtualenv || return 1 + # shellcheck source=/dev/null + . "${_VIRTUALENV_DIR}/bin/activate" || return 1 + echoinfo "Activated virtualenv ${_VIRTUALENV_DIR}" + fi + set -o nounset + return 0 +} # ---------- end of function __activate_virtualenv ---------- + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __install_pip_pkgs +# DESCRIPTION: Return 0 or 1 if successfully able to install pip packages. Can provide a different python version to +# install pip packages with. If $py_ver is not specified it will use the default python version. +# PARAMETERS: pkgs, py_ver +#---------------------------------------------------------------------------------------------------------------------- + +__install_pip_pkgs() { + _pip_pkgs="$1" + _py_exe="$2" + _py_pkg=$(echo "$_py_exe" | sed -E "s/\\.//g") + _pip_cmd="${_py_exe} -m pip" + + if [ "${_py_exe}" = "" ]; then + _py_exe='python' + fi + + __check_pip_allowed + + # Install pip and pip dependencies + if ! __check_command_exists "${_pip_cmd} --version"; then + __PACKAGES="${_py_pkg}-setuptools ${_py_pkg}-pip gcc" + # shellcheck disable=SC2086 + if [ "$DISTRO_NAME_L" = "debian" ] || [ "$DISTRO_NAME_L" = "ubuntu" ];then + __PACKAGES="${__PACKAGES} ${_py_pkg}-dev" + __apt_get_install_noinput ${__PACKAGES} || return 1 + else + __PACKAGES="${__PACKAGES} ${_py_pkg}-devel" + if [ "$DISTRO_NAME_L" = "fedora" ];then + __dnf_install_noinput ${__PACKAGES} || return 1 + else + __yum_install_noinput ${__PACKAGES} || return 1 + fi + fi + + fi + + echoinfo "Installing pip packages: ${_pip_pkgs} using ${_py_exe}" + # shellcheck disable=SC2086 + ${_pip_cmd} install ${_pip_pkgs} || return 1 +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __install_tornado_pip +# PARAMETERS: python executable +# DESCRIPTION: Return 0 or 1 if successfully able to install tornado<5.0 +#---------------------------------------------------------------------------------------------------------------------- +__install_tornado_pip() { + # OS needs tornado <5.0 from pip + __check_pip_allowed "You need to allow pip based installations (-P) for Tornado <5.0 in order to install Salt on Python 3" + ## install pip if its not installed and install tornado + __install_pip_pkgs "tornado<5.0" "${1}" || return 1 +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __install_pip_deps +# DESCRIPTION: Return 0 or 1 if successfully able to install pip packages via requirements file +# PARAMETERS: requirements_file +#---------------------------------------------------------------------------------------------------------------------- +__install_pip_deps() { + # Install virtualenv to system pip before activating virtualenv if thats going to be used + # We assume pip pkg is installed since that is distro specific + if [ "$_VIRTUALENV_DIR" != "null" ]; then + if ! __check_command_exists pip; then + echoerror "Pip not installed: required for -a installs" + exit 1 + fi + pip install -U virtualenv + __activate_virtualenv || return 1 + else + echoerror "Must have virtualenv dir specified for -a installs" + fi + + requirements_file=$1 + if [ ! -f "${requirements_file}" ]; then + echoerror "Requirements file: ${requirements_file} cannot be found, needed for -a (pip pkg) installs" + exit 1 + fi + + __PIP_PACKAGES='' + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # shellcheck disable=SC2089 + __PIP_PACKAGES="${__PIP_PACKAGES} 'apache-libcloud>=$_LIBCLOUD_MIN_VERSION'" + fi + + # shellcheck disable=SC2086,SC2090 + pip install -U -r ${requirements_file} ${__PIP_PACKAGES} +} # ---------- end of function __install_pip_deps ---------- + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __install_salt_from_repo_post_neon +# DESCRIPTION: Return 0 or 1 if successfully able to install. Can provide a different python version to +# install pip packages with. If $py_exe is not specified it will use the default python version. +# PARAMETERS: py_exe +#---------------------------------------------------------------------------------------------------------------------- +__install_salt_from_repo_post_neon() { + _py_exe="$1" + + if [ "${_py_exe}" = "" ]; then + _py_exe='python' + fi + + echodebug "__install_salt_from_repo_post_neon py_exe=$_py_exe" + + _py_version=$(${_py_exe} -c "import sys; print('{0}.{1}'.format(*sys.version_info))") + _pip_cmd="pip${_py_version}" + if ! __check_command_exists "${_pip_cmd}"; then + echodebug "The pip binary '${_pip_cmd}' was not found in PATH" + _pip_cmd="pip$(echo "${_py_version}" | cut -c -1)" + if ! __check_command_exists "${_pip_cmd}"; then + echodebug "The pip binary '${_pip_cmd}' was not found in PATH" + _pip_cmd="pip" + if ! __check_command_exists "${_pip_cmd}"; then + echoerror "Unable to find a pip binary" + return 1 + fi + fi + fi + + __check_pip_allowed + + echodebug "Installed pip version: $(${_pip_cmd} --version)" + + CHECK_PIP_VERSION_SCRIPT=$(cat << EOM +import sys +try: + import pip + installed_pip_version=tuple([int(part.strip()) for part in pip.__version__.split('.') if part.isdigit()]) + desired_pip_version=($(echo ${_MINIMUM_PIP_VERSION} | sed 's/\./, /g' )) + if installed_pip_version < desired_pip_version: + print('Desired pip version {!r} > Installed pip version {!r}'.format('.'.join(map(str, desired_pip_version)), '.'.join(map(str, installed_pip_version)))) + sys.exit(1) + print('Desired pip version {!r} < Installed pip version {!r}'.format('.'.join(map(str, desired_pip_version)), '.'.join(map(str, installed_pip_version)))) + sys.exit(0) +except ImportError: + print('Failed to import pip') + sys.exit(1) +EOM +) + if ! ${_py_exe} -c "$CHECK_PIP_VERSION_SCRIPT"; then + # Upgrade pip to at least 1.2 which is when we can start using "python -m pip" + echodebug "Running '${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} pip>=${_MINIMUM_PIP_VERSION}'" + ${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} -v "pip>=${_MINIMUM_PIP_VERSION}" + sleep 1 + echodebug "PATH: ${PATH}" + _pip_cmd="pip${_py_version}" + if ! __check_command_exists "${_pip_cmd}"; then + echodebug "The pip binary '${_pip_cmd}' was not found in PATH" + _pip_cmd="pip$(echo "${_py_version}" | cut -c -1)" + if ! __check_command_exists "${_pip_cmd}"; then + echodebug "The pip binary '${_pip_cmd}' was not found in PATH" + _pip_cmd="pip" + if ! __check_command_exists "${_pip_cmd}"; then + echoerror "Unable to find a pip binary" + return 1 + fi + fi + fi + echodebug "Installed pip version: $(${_pip_cmd} --version)" + fi + + # We also lock setuptools to <45 which is the latest release to support both py2 and py3 + echodebug "Running '${_pip_cmd} install wheel setuptools>=${_MINIMUM_SETUPTOOLS_VERSION},<45'" + ${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} wheel "setuptools>=${_MINIMUM_SETUPTOOLS_VERSION},<45" + + echoinfo "Installing salt using ${_py_exe}" + cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 + + mkdir /tmp/git/deps + echoinfo "Downloading Salt Dependencies from PyPi" + echodebug "Running '${_pip_cmd} download -d /tmp/git/deps .'" + ${_pip_cmd} download -d /tmp/git/deps . || (echo "Failed to download salt dependencies" && return 1) + + echoinfo "Installing Downloaded Salt Dependencies" + echodebug "Running '${_pip_cmd} install --ignore-installed ${_POST_NEON_PIP_INSTALL_ARGS} /tmp/git/deps/*'" + ${_pip_cmd} install --ignore-installed ${_POST_NEON_PIP_INSTALL_ARGS} /tmp/git/deps/* || return 1 + rm -f /tmp/git/deps/* + + echoinfo "Building Salt Python Wheel" + + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + SETUP_PY_INSTALL_ARGS="-v" + fi + + echodebug "Running '${_py_exe} setup.py --salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} bdist_wheel'" + ${_py_exe} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} bdist_wheel || return 1 + mv dist/salt*.whl /tmp/git/deps/ || return 1 + + cd "${__SALT_GIT_CHECKOUT_PARENT_DIR}" || return 1 + + echoinfo "Installing Built Salt Wheel" + ${_pip_cmd} uninstall --yes salt 2>/dev/null || true + echodebug "Running '${_pip_cmd} install --no-deps --force-reinstall ${_POST_NEON_PIP_INSTALL_ARGS} /tmp/git/deps/salt*.whl'" + ${_pip_cmd} install --no-deps --force-reinstall \ + ${_POST_NEON_PIP_INSTALL_ARGS} \ + --global-option="--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" \ + /tmp/git/deps/salt*.whl || return 1 + + echoinfo "Checking if Salt can be imported using ${_py_exe}" + CHECK_SALT_SCRIPT=$(cat << EOM +import os +import sys +try: + import salt + import salt.version + print('\nInstalled Salt Version: {}'.format(salt.version.__version__)) + print('Installed Salt Package Path: {}\n'.format(os.path.dirname(salt.__file__))) + sys.exit(0) +except ImportError: + print('\nFailed to import salt\n') + sys.exit(1) +EOM +) + if ! ${_py_exe} -c "$CHECK_SALT_SCRIPT"; then + return 1 + fi + return 0 +} # ---------- end of function __install_salt_from_repo_post_neon ---------- + + +if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + if [ "x${_PY_MAJOR_VERSION}" = "x" ]; then + # Default to python 2 for pre Neon installs + _PY_MAJOR_VERSION=2 + fi +else + if [ "x${_PY_MAJOR_VERSION}" = "x" ]; then + # Default to python 3 for post Neon install + _PY_MAJOR_VERSION=3 + fi +fi + +####################################################################################################################### +# +# Distribution install functions +# +# In order to install salt for a distribution you need to define: +# +# To Install Dependencies, which is required, one of: +# 1. install____deps +# 2. install_____deps +# 3. install___deps +# 4 install____deps +# 5. install___deps +# 6. install__deps +# +# Optionally, define a salt configuration function, which will be called if +# the -c (config-dir) option is passed. One of: +# 1. config____salt +# 2. config_____salt +# 3. config___salt +# 4 config____salt +# 5. config___salt +# 6. config__salt +# 7. config_salt [THIS ONE IS ALREADY DEFINED AS THE DEFAULT] +# +# Optionally, define a salt master pre-seed function, which will be called if +# the -k (pre-seed master keys) option is passed. One of: +# 1. preseed____master +# 2. preseed_____master +# 3. preseed___master +# 4 preseed____master +# 5. preseed___master +# 6. preseed__master +# 7. preseed_master [THIS ONE IS ALREADY DEFINED AS THE DEFAULT] +# +# To install salt, which, of course, is required, one of: +# 1. install___ +# 2. install____ +# 3. install__ +# +# Optionally, define a post install function, one of: +# 1. install____post +# 2. install_____post +# 3. install___post +# 4 install____post +# 5. install___post +# 6. install__post +# +# Optionally, define a start daemons function, one of: +# 1. install____restart_daemons +# 2. install_____restart_daemons +# 3. install___restart_daemons +# 4 install____restart_daemons +# 5. install___restart_daemons +# 6. install__restart_daemons +# +# NOTE: The start daemons function should be able to restart any daemons +# which are running, or start if they're not running. +# +# Optionally, define a daemons running function, one of: +# 1. daemons_running___ +# 2. daemons_running____ +# 3. daemons_running__ +# 4 daemons_running___ +# 5. daemons_running__ +# 6. daemons_running_ +# 7. daemons_running [THIS ONE IS ALREADY DEFINED AS THE DEFAULT] +# +# Optionally, check enabled Services: +# 1. install____check_services +# 2. install_____check_services +# 3. install___check_services +# 4 install____check_services +# 5. install___check_services +# 6. install__check_services +# +####################################################################################################################### + + +####################################################################################################################### +# +# Ubuntu Install Functions +# +__enable_universe_repository() { + if [ "$(grep -R universe /etc/apt/sources.list /etc/apt/sources.list.d/ | grep -v '#')" != "" ]; then + # The universe repository is already enabled + return 0 + fi + + echodebug "Enabling the universe repository" + + add-apt-repository -y "deb http://archive.ubuntu.com/ubuntu $(lsb_release -sc) universe" || return 1 + + return 0 +} + +__install_saltstack_ubuntu_repository() { + # Workaround for latest non-LTS ubuntu + if [ "$DISTRO_MAJOR_VERSION" -eq 19 ] || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 18 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; }; then + echowarn "Non-LTS Ubuntu detected, but stable packages requested. Trying packages for previous LTS release. You may experience problems." + UBUNTU_VERSION=18.04 + UBUNTU_CODENAME="bionic" + else + UBUNTU_VERSION=${DISTRO_VERSION} + UBUNTU_CODENAME=${DISTRO_CODENAME} + fi + + # Install downloader backend for GPG keys fetching + __PACKAGES='wget' + + # Required as it is not installed by default on Ubuntu 18+ + if [ "$DISTRO_MAJOR_VERSION" -ge 18 ]; then + __PACKAGES="${__PACKAGES} gnupg" + fi + + # Make sure https transport is available + if [ "$HTTP_VAL" = "https" ] ; then + __PACKAGES="${__PACKAGES} apt-transport-https ca-certificates" + fi + + # shellcheck disable=SC2086,SC2090 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + __PY_VERSION_REPO="apt" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + # SaltStack's stable Ubuntu repository: + SALTSTACK_UBUNTU_URL="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/ubuntu/${UBUNTU_VERSION}/${__REPO_ARCH}/${STABLE_REV}" + echo "$__REPO_ARCH_DEB $SALTSTACK_UBUNTU_URL $UBUNTU_CODENAME main" > /etc/apt/sources.list.d/saltstack.list + + __apt_key_fetch "$SALTSTACK_UBUNTU_URL/SALTSTACK-GPG-KEY.pub" || return 1 + + __wait_for_apt apt-get update || return 1 +} + +install_ubuntu_deps() { + if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then + # Install add-apt-repository + if ! __check_command_exists add-apt-repository; then + __apt_get_install_noinput software-properties-common || return 1 + fi + + __enable_universe_repository || return 1 + + __wait_for_apt apt-get update || return 1 + fi + + __PACKAGES='' + + if [ "$DISTRO_MAJOR_VERSION" -lt 16 ]; then + # Minimal systems might not have upstart installed, install it + __PACKAGES="upstart" + fi + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 16 ] && [ -z "$_PY_EXE" ]; then + __PACKAGES="${__PACKAGES} python2.7" + fi + + if [ "$_VIRTUALENV_DIR" != "null" ]; then + __PACKAGES="${__PACKAGES} python-virtualenv" + fi + # Need python-apt for managing packages via Salt + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-apt" + + # requests is still used by many salt modules + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-requests" + + # YAML module is used for generating custom master/minion configs + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-yaml" + + # Additionally install procps and pciutils which allows for Docker bootstraps. See 366#issuecomment-39666813 + __PACKAGES="${__PACKAGES} procps pciutils" + + # shellcheck disable=SC2086,SC2090 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __apt_get_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_ubuntu_stable_deps() { + if [ "${_SLEEP}" -eq "${__DEFAULT_SLEEP}" ] && [ "$DISTRO_MAJOR_VERSION" -lt 16 ]; then + # The user did not pass a custom sleep value as an argument, let's increase the default value + echodebug "On Ubuntu systems we increase the default sleep value to 10." + echodebug "See https://github.com/saltstack/salt/issues/12248 for more info." + _SLEEP=10 + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 20 ]; then + # Default Ubuntu 20.04 to Py3 + if [ "x${_PY_EXE}" = "x" ]; then + _PY_EXE=python3 + _PY_MAJOR_VERSION=3 + PY_PKG_VER=3 + fi + fi + + if [ $_START_DAEMONS -eq $BS_FALSE ]; then + echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." + fi + + # No user interaction, libc6 restart services for example + export DEBIAN_FRONTEND=noninteractive + + __wait_for_apt apt-get update || return 1 + + if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then + if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then + __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && + apt-key update && apt-get update || return 1 + fi + + __apt_get_upgrade_noinput || return 1 + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __check_dpkg_architecture || return 1 + __install_saltstack_ubuntu_repository || return 1 + fi + + install_ubuntu_deps || return 1 +} + +install_ubuntu_git_deps() { + __wait_for_apt apt-get update || return 1 + + if ! __check_command_exists git; then + __apt_get_install_noinput git-core || return 1 + fi + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __apt_get_install_noinput ca-certificates + fi + + __git_clone_and_checkout || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + __PACKAGES="" + + # See how we are installing packages + if [ "${_PIP_ALL}" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python-dev swig libssl-dev libzmq3 libzmq3-dev" + + if ! __check_command_exists pip; then + __PACKAGES="${__PACKAGES} python-setuptools python-pip" + fi + + # Get just the apt packages that are required to build all the pythons + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + # Install the pythons from requirements (only zmq for now) + __install_pip_deps "${_SALT_GIT_CHECKOUT_DIR}/requirements/zeromq.txt" || return 1 + else + install_ubuntu_stable_deps || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PACKAGES="${__PACKAGES} python3-setuptools" + else + # There is no m2crypto package for Py3 at this time - only install for Py2 + __PACKAGES="${__PACKAGES} python-m2crypto" + fi + + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado python${PY_PKG_VER}-yaml" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-zmq" + __PACKAGES="${__PACKAGES} python-concurrent.futures" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # Install python-libcloud if asked to + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + fi + else + __PACKAGES="python${PY_PKG_VER}-dev python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_ubuntu_stable() { + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_ubuntu_git() { + # Activate virtualenv before install + if [ "${_VIRTUALENV_DIR}" != "null" ]; then + __activate_virtualenv || return 1 + fi + + if [ -n "$_PY_EXE" ]; then + _PYEXE=${_PY_EXE} + else + _PYEXE=python2.7 + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + # We can use --prefix on debian based ditributions + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + _POST_NEON_PIP_INSTALL_ARGS="--target=/usr/lib/python3/dist-packages --install-option=--install-scripts=/usr/bin" + else + _POST_NEON_PIP_INSTALL_ARGS="--target=/usr/lib/python2.7/dist-packages --install-option=--install-scripts=/usr/bin" + fi + _POST_NEON_PIP_INSTALL_ARGS="" + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 + sed -i 's:/usr/bin:/usr/local/bin:g' pkg/*.service + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + # shellcheck disable=SC2086 + "${_PYEXE}" setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 + else + # shellcheck disable=SC2086 + "${_PYEXE}" setup.py ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 + fi + + return 0 +} + +install_ubuntu_stable_post() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + # Using systemd + /bin/systemctl is-enabled salt-$fname.service > /dev/null 2>&1 || ( + /bin/systemctl preset salt-$fname.service > /dev/null 2>&1 && + /bin/systemctl enable salt-$fname.service > /dev/null 2>&1 + ) + sleep 1 + /bin/systemctl daemon-reload + elif [ -f /etc/init.d/salt-$fname ]; then + update-rc.d salt-$fname defaults + fi + done + + return 0 +} + +install_ubuntu_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 1 + systemctl daemon-reload + elif [ -f /sbin/initctl ]; then + _upstart_conf="/etc/init/salt-$fname.conf" + # We have upstart support + echodebug "There's upstart support" + if [ ! -f $_upstart_conf ]; then + # upstart does not know about our service, let's copy the proper file + echowarn "Upstart does not appear to know about salt-$fname" + echodebug "Copying ${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-$fname.upstart to $_upstart_conf" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.upstart" "$_upstart_conf" + # Set service to know about virtualenv + if [ "${_VIRTUALENV_DIR}" != "null" ]; then + echo "SALT_USE_VIRTUALENV=${_VIRTUALENV_DIR}" > /etc/default/salt-${fname} + fi + /sbin/initctl reload-configuration || return 1 + fi + # No upstart support in Ubuntu!? + elif [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.init" ]; then + echodebug "There's NO upstart support!?" + echodebug "Copying ${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.init to /etc/init.d/salt-$fname" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.init" "/etc/init.d/salt-$fname" + chmod +x /etc/init.d/salt-$fname + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + update-rc.d salt-$fname defaults + else + echoerror "Neither upstart nor init.d was setup for salt-$fname" + fi + done + + return 0 +} + +install_ubuntu_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + # Ensure upstart configs / systemd units are loaded + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then + systemctl daemon-reload + elif [ -f /sbin/initctl ]; then + /sbin/initctl reload-configuration + fi + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then + echodebug "There's systemd support while checking salt-$fname" + systemctl stop salt-$fname > /dev/null 2>&1 + systemctl start salt-$fname.service && continue + # We failed to start the service, let's test the SysV code below + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + fi + + if [ -f /sbin/initctl ]; then + echodebug "There's upstart support while checking salt-$fname" + + if status salt-$fname 2>/dev/null | grep -q running; then + stop salt-$fname || (echodebug "Failed to stop salt-$fname" && return 1) + fi + + start salt-$fname && continue + # We failed to start the service, let's test the SysV code below + echodebug "Failed to start salt-$fname using Upstart" + fi + + if [ ! -f /etc/init.d/salt-$fname ]; then + echoerror "No init.d support for salt-$fname was found" + return 1 + fi + + /etc/init.d/salt-$fname stop > /dev/null 2>&1 + /etc/init.d/salt-$fname start + done + + return 0 +} + +install_ubuntu_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then + __check_services_systemd salt-$fname || return 1 + elif [ -f /sbin/initctl ] && [ -f /etc/init/salt-${fname}.conf ]; then + __check_services_upstart salt-$fname || return 1 + elif [ -f /etc/init.d/salt-$fname ]; then + __check_services_debian salt-$fname || return 1 + fi + done + + return 0 +} +# +# End of Ubuntu Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Debian Install Functions +# +__install_saltstack_debian_repository() { + DEBIAN_RELEASE="$DISTRO_MAJOR_VERSION" + DEBIAN_CODENAME="$DISTRO_CODENAME" + + __PY_VERSION_REPO="apt" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + # Install downloader backend for GPG keys fetching + __PACKAGES='wget' + + # Required as it is not installed by default on Debian 9+ + if [ "$DISTRO_MAJOR_VERSION" -ge 9 ]; then + __PACKAGES="${__PACKAGES} gnupg2" + fi + + # Make sure https transport is available + if [ "$HTTP_VAL" = "https" ] ; then + __PACKAGES="${__PACKAGES} apt-transport-https ca-certificates" + fi + + # shellcheck disable=SC2086,SC2090 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location + SALTSTACK_DEBIAN_URL="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/debian/${DEBIAN_RELEASE}/${__REPO_ARCH}/${STABLE_REV}" + echo "$__REPO_ARCH_DEB $SALTSTACK_DEBIAN_URL $DEBIAN_CODENAME main" > "/etc/apt/sources.list.d/saltstack.list" + + __apt_key_fetch "$SALTSTACK_DEBIAN_URL/SALTSTACK-GPG-KEY.pub" || return 1 + + __wait_for_apt apt-get update || return 1 +} + +install_debian_deps() { + if [ $_START_DAEMONS -eq $BS_FALSE ]; then + echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." + fi + + # No user interaction, libc6 restart services for example + export DEBIAN_FRONTEND=noninteractive + + __wait_for_apt apt-get update || return 1 + + if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then + # Try to update GPG keys first if allowed + if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then + __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && + apt-key update && apt-get update || return 1 + fi + + __apt_get_upgrade_noinput || return 1 + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 10 ]; then + # Default Debian 10 to Py3 + if [ "x${_PY_EXE}" = "x" ]; then + _PY_EXE=python3 + _PY_MAJOR_VERSION=3 + PY_PKG_VER=3 + fi + fi + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + fi + + # Additionally install procps and pciutils which allows for Docker bootstraps. See 366#issuecomment-39666813 + __PACKAGES='procps pciutils' + + # YAML module is used for generating custom master/minion configs + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-yaml" + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __check_dpkg_architecture || return 1 + __install_saltstack_debian_repository || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __apt_get_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_debian_git_pre() { + if ! __check_command_exists git; then + __apt_get_install_noinput git || return 1 + fi + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __apt_get_install_noinput ca-certificates + fi + + __git_clone_and_checkout || return 1 + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi +} + +install_debian_git_deps() { + install_debian_deps || return 1 + install_debian_git_pre || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + __PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-backports.ssl-match-hostname" + __PACKAGES="${__PACKAGES} python-crypto python-jinja2 python-msgpack python-m2crypto" + __PACKAGES="${__PACKAGES} python-requests python-tornado python-yaml python-zmq" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # Install python-libcloud if asked to + __PACKAGES="${__PACKAGES} python-libcloud" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + else + __PACKAGES="python${PY_PKG_VER}-dev python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + echodebug "install_debian_git_deps() Installing ${__PACKAGES}" + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + fi + + return 0 +} + +install_debian_7_git_deps() { + install_debian_deps || return 1 + install_debian_git_deps || return 1 + + return 0 +} + +install_debian_8_git_deps() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + echodebug "CALLING install_debian_git_deps" + install_debian_git_deps || return 1 + return 0 + fi + + install_debian_deps || return 1 + + if ! __check_command_exists git; then + __apt_get_install_noinput git || return 1 + fi + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __apt_get_install_noinput ca-certificates + fi + + __git_clone_and_checkout || return 1 + + __PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-crypto python-jinja2" + __PACKAGES="${__PACKAGES} python-m2crypto python-msgpack python-requests python-systemd" + __PACKAGES="${__PACKAGES} python-yaml python-zmq python-concurrent.futures" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # Install python-libcloud if asked to + __PACKAGES="${__PACKAGES} python-libcloud" + fi + + __PIP_PACKAGES='' + if (__check_pip_allowed >/dev/null 2>&1); then + __PIP_PACKAGES='tornado<5.0' + # Install development environment for building tornado Python module + __PACKAGES="${__PACKAGES} build-essential python-dev" + + if ! __check_command_exists pip; then + __PACKAGES="${__PACKAGES} python-pip" + fi + # Attempt to configure backports repo on non-x86_64 system + elif [ $_DISABLE_REPOS -eq $BS_FALSE ] && [ "$DPKG_ARCHITECTURE" != "amd64" ]; then + # Check if Debian Backports repo already configured + if ! apt-cache policy | grep -q 'Debian Backports'; then + echo 'deb http://httpredir.debian.org/debian jessie-backports main' > \ + /etc/apt/sources.list.d/backports.list + fi + + __wait_for_apt apt-get update || return 1 + + # python-tornado package should be installed from backports repo + __PACKAGES="${__PACKAGES} python-backports.ssl-match-hostname python-tornado/jessie-backports" + else + __PACKAGES="${__PACKAGES} python-backports.ssl-match-hostname python-tornado" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + if [ "${__PIP_PACKAGES}" != "" ]; then + # shellcheck disable=SC2086,SC2090 + pip install -U ${__PIP_PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_debian_9_git_deps() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + install_debian_git_deps || return 1 + return 0 + fi + + install_debian_deps || return 1 + install_debian_git_pre || return 1 + + __PACKAGES="libzmq5 lsb-release" + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + + # These packages are PY2-ONLY + __PACKAGES="${__PACKAGES} python-backports-abc python-m2crypto python-concurrent.futures" + fi + + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-apt python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests python${PY_PKG_VER}-systemd" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado python${PY_PKG_VER}-yaml python${PY_PKG_VER}-zmq" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # Install python-libcloud if asked to + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_debian_10_git_deps() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + install_debian_git_deps || return 1 + return 0 + fi + + install_debian_deps || return 1 + install_debian_git_pre || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + _py=${_PY_EXE} + PY_PKG_VER=3 + __PACKAGES="python${PY_PKG_VER}-distutils" + else + _py="python" + PY_PKG_VER="" + __PACKAGES="" + fi + + __install_tornado_pip ${_py}|| return 1 + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado python${PY_PKG_VER}-yaml python${PY_PKG_VER}-zmq" + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_debian_stable() { + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_debian_7_stable() { + install_debian_stable || return 1 + return 0 +} + +install_debian_8_stable() { + install_debian_stable || return 1 + return 0 +} + +install_debian_9_stable() { + install_debian_stable || return 1 + return 0 +} + +install_debian_git() { + if [ -n "$_PY_EXE" ]; then + _PYEXE=${_PY_EXE} + else + _PYEXE=python + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + # We can use --prefix on debian based ditributions + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + _POST_NEON_PIP_INSTALL_ARGS="--target=/usr/lib/python3/dist-packages --install-option=--install-scripts=/usr/bin" + else + _POST_NEON_PIP_INSTALL_ARGS="--target=/usr/lib/python2.7/dist-packages --install-option=--install-scripts=/usr/bin" + fi + _POST_NEON_PIP_INSTALL_ARGS="" + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 + sed -i 's:/usr/bin:/usr/local/bin:g' pkg/*.service + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + # shellcheck disable=SC2086 + "${_PYEXE}" setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 + else + # shellcheck disable=SC2086 + "${_PYEXE}" setup.py ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 + fi +} + +install_debian_7_git() { + install_debian_git || return 1 + return 0 +} + +install_debian_8_git() { + install_debian_git || return 1 + return 0 +} + +install_debian_9_git() { + install_debian_git || return 1 + return 0 +} + +install_debian_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ "$fname" = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ "$fname" = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ "$fname" = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ "$fname" = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # Configure SystemD for Debian 8 "Jessie" and later + if [ -f /bin/systemctl ]; then + if [ ! -f /lib/systemd/system/salt-${fname}.service ] || \ + { [ -f /lib/systemd/system/salt-${fname}.service ] && [ $_FORCE_OVERWRITE -eq $BS_TRUE ]; }; then + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" /lib/systemd/system + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.environment" "/etc/default/salt-${fname}" + else + # workaround before adding Debian-specific unit files to the Salt main repo + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" /lib/systemd/system + sed -i -e '/^Type/ s/notify/simple/' /lib/systemd/system/salt-${fname}.service + fi + fi + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ "$fname" = "api" ] && continue + + /bin/systemctl enable "salt-${fname}.service" + SYSTEMD_RELOAD=$BS_TRUE + + # Install initscripts for Debian 7 "Wheezy" + elif [ ! -f "/etc/init.d/salt-$fname" ] || \ + { [ -f "/etc/init.d/salt-$fname" ] && [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; }; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/deb/salt-${fname}.init" "/etc/init.d/salt-${fname}" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/deb/salt-${fname}.environment" "/etc/default/salt-${fname}" + + if [ ! -f "/etc/init.d/salt-${fname}" ]; then + echowarn "The init script for salt-${fname} was not found, skipping it..." + continue + fi + + chmod +x "/etc/init.d/salt-${fname}" + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ "$fname" = "api" ] && continue + + update-rc.d "salt-${fname}" defaults + fi + done +} + +install_debian_restart_daemons() { + [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + # Debian 8 uses systemd + /bin/systemctl stop salt-$fname > /dev/null 2>&1 + /bin/systemctl start salt-$fname.service && continue + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + elif [ -f /etc/init.d/salt-$fname ]; then + # Still in SysV init + /etc/init.d/salt-$fname stop > /dev/null 2>&1 + /etc/init.d/salt-$fname start + fi + done +} + +install_debian_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + __check_services_systemd salt-$fname || return 1 + elif [ -f /etc/init.d/salt-$fname ]; then + __check_services_debian salt-$fname || return 1 + fi + done + return 0 +} +# +# Ended Debian Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Fedora Install Functions +# + +install_fedora_deps() { + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + dnf -y update || return 1 + fi + + __PACKAGES="${__PACKAGES:=}" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -lt 3 ]; then + echoerror "There are no Python 2 stable packages for Fedora, only Py3 packages" + return 1 + fi + + # Salt on Fedora is Py3 + PY_PKG_VER=3 + + __PACKAGES="${__PACKAGES} dnf-utils libyaml procps-ng python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests python${PY_PKG_VER}-zmq" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-pip python${PY_PKG_VER}-m2crypto python${PY_PKG_VER}-pyyaml" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-systemd" + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + fi + + # shellcheck disable=SC2086 + __dnf_install_noinput ${__PACKAGES} ${_EXTRA_PACKAGES} || return 1 + + return 0 +} + +install_fedora_stable() { + if [ "$STABLE_REV" = "latest" ]; then + __SALT_VERSION="" + else + __SALT_VERSION="$(dnf list --showduplicates salt | grep "$STABLE_REV" | head -n 1 | awk '{print $2}')" + if [ "x${__SALT_VERSION}" = "x" ]; then + echoerror "Could not find a stable install for Salt ${STABLE_REV}" + exit 1 + fi + echoinfo "Installing Stable Package Version ${__SALT_VERSION}" + __SALT_VERSION="-${__SALT_VERSION}" + fi + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud${__SALT_VERSION}" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master${__SALT_VERSION}" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion${__SALT_VERSION}" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic${__SALT_VERSION}" + fi + + # shellcheck disable=SC2086 + __dnf_install_noinput ${__PACKAGES} || return 1 + + __python="python3" + if ! __check_command_exists python3; then + echoerror "Could not find a python3 binary?!" + return 1 + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + __check_pip_allowed "You need to allow pip based installations (-P) for Tornado <5.0 in order to install Salt" + __installed_tornado_rpm=$(rpm -qa | grep python${PY_PKG_VER}-tornado) + if [ -n "${__installed_tornado_rpm}" ]; then + echodebug "Removing system package ${__installed_tornado_rpm}" + rpm -e --nodeps "${__installed_tornado_rpm}" || return 1 + fi + __get_site_packages_dir_code=$(cat << EOM +import site +print([d for d in site.getsitepackages() if d.startswith('/usr/lib/python')][0]) +EOM +) + __target_path=$(${__python} -c "${__get_site_packages_dir_code}") + echodebug "Running '${__python}' -m pip install --target ${__target_path} 'tornado<5.0'" + "${__python}" -m pip install --target "${__target_path}" "tornado<5" || return 1 + fi + + return 0 +} + +install_fedora_stable_post() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 1 + systemctl daemon-reload + done +} + +install_fedora_git_deps() { + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + # Packages are named python3- + PY_PKG_VER=3 + else + PY_PKG_VER=2 + fi + + __PACKAGES="" + if ! __check_command_exists ps; then + __PACKAGES="${__PACKAGES} procps-ng" + fi + if ! __check_command_exists git; then + __PACKAGES="${__PACKAGES} git" + fi + + if [ -n "${__PACKAGES}" ]; then + # shellcheck disable=SC2086 + __dnf_install_noinput ${__PACKAGES} || return 1 + __PACKAGES="" + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __PACKAGES="${__PACKAGES} ca-certificates" + fi + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud python${PY_PKG_VER}-netaddr" + fi + + install_fedora_deps || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + if __check_command_exists python3; then + __python="python3" + fi + elif [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + if __check_command_exists python2; then + __python="python2" + fi + else + if ! __check_command_exists python; then + echoerror "Unable to find a python binary?!" + return 1 + fi + # Let's hope it's the right one + __python="python" + fi + + grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" | while IFS=' + ' read -r dep; do + echodebug "Running '${__python}' -m pip install '${dep}'" + "${__python}" -m pip install "${dep}" || return 1 + done + else + __PACKAGES="python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + # shellcheck disable=SC2086 + __dnf_install_noinput ${__PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_fedora_git() { + if [ "${_PY_EXE}" != "" ]; then + _PYEXE=${_PY_EXE} + echoinfo "Using the following python version: ${_PY_EXE} to install salt" + else + _PYEXE='python2' + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + ${_PYEXE} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + else + ${_PYEXE} setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + fi + return 0 +} + +install_fedora_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 1 + systemctl daemon-reload + done +} + +install_fedora_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + systemctl stop salt-$fname > /dev/null 2>&1 + systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + done +} + +install_fedora_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_systemd salt-$fname || return 1 + done + + return 0 +} +# +# Ended Fedora Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# CentOS Install Functions +# +__install_epel_repository() { + if [ ${_EPEL_REPOS_INSTALLED} -eq $BS_TRUE ]; then + return 0 + fi + + # Check if epel repo is already enabled and flag it accordingly + if yum repolist | grep -q "^[!]\\?${_EPEL_REPO}/"; then + _EPEL_REPOS_INSTALLED=$BS_TRUE + return 0 + fi + + # Download latest 'epel-release' package for the distro version directly + epel_repo_url="${HTTP_VAL}://dl.fedoraproject.org/pub/epel/epel-release-latest-${DISTRO_MAJOR_VERSION}.noarch.rpm" + rpm -Uvh --force "$epel_repo_url" || return 1 + + _EPEL_REPOS_INSTALLED=$BS_TRUE + + return 0 +} + +__install_saltstack_rhel_repository() { + if [ "$ITYPE" = "stable" ]; then + repo_rev="$STABLE_REV" + else + repo_rev="latest" + fi + + __PY_VERSION_REPO="yum" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + # Avoid using '$releasever' variable for yum. + # Instead, this should work correctly on all RHEL variants. + base_url="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/\$basearch/${repo_rev}/" + gpg_key="SALTSTACK-GPG-KEY.pub" + repo_file="/etc/yum.repos.d/saltstack.repo" + + if [ ! -s "$repo_file" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then + cat <<_eof > "$repo_file" +[saltstack] +name=SaltStack ${repo_rev} Release Channel for RHEL/CentOS \$releasever +baseurl=${base_url} +skip_if_unavailable=True +gpgcheck=1 +gpgkey=${base_url}${gpg_key} +enabled=1 +enabled_metadata=1 +_eof + + fetch_url="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${repo_rev}/" + __rpm_import_gpg "${fetch_url}${gpg_key}" || return 1 + yum clean metadata || return 1 + elif [ "$repo_rev" != "latest" ]; then + echowarn "saltstack.repo already exists, ignoring salt version argument." + echowarn "Use -F (forced overwrite) to install $repo_rev." + fi + + return 0 +} + +install_centos_stable_deps() { + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + yum -y update || return 1 + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + # CentOS/RHEL 8 Default to Py3 + if [ "x${_PY_EXE}" = "x" ]; then + _PY_EXE=python3 + _PY_MAJOR_VERSION=3 + fi + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_TRUE" ] && [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + echowarn "Detected -r or -R option while installing Salt packages for Python 3." + echowarn "Python 3 packages for older Salt releases requires the EPEL repository to be installed." + echowarn "Installing the EPEL repository automatically is disabled when using the -r or -R options." + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then + __install_epel_repository || return 1 + __install_saltstack_rhel_repository || return 1 + fi + + # If -R was passed, we need to configure custom repo url with rsync-ed packages + # Which is still handled in __install_saltstack_rhel_repository. This call has + # its own check in case -r was passed without -R. + if [ "$_CUSTOM_REPO_URL" != "null" ]; then + __install_saltstack_rhel_repository || return 1 + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + __PACKAGES="dnf-utils chkconfig" + else + __PACKAGES="yum-utils chkconfig" + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + # YAML module is used for generating custom master/minion configs + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PACKAGES="${__PACKAGES} python3-pyyaml" + else + __PACKAGES="${__PACKAGES} python2-pyyaml" + fi + elif [ "$DISTRO_MAJOR_VERSION" -eq 7 ]; then + # YAML module is used for generating custom master/minion configs + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PACKAGES="${__PACKAGES} python36-PyYAML" + else + __PACKAGES="${__PACKAGES} PyYAML" + fi + else + # YAML module is used for generating custom master/minion configs + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PACKAGES="${__PACKAGES} python34-PyYAML" + else + __PACKAGES="${__PACKAGES} PyYAML" + fi + fi + fi + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + + return 0 +} + +install_centos_stable() { + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_centos_stable_post() { + SYSTEMD_RELOAD=$BS_FALSE + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + /bin/systemctl is-enabled salt-${fname}.service > /dev/null 2>&1 || ( + /bin/systemctl preset salt-${fname}.service > /dev/null 2>&1 && + /bin/systemctl enable salt-${fname}.service > /dev/null 2>&1 + ) + + SYSTEMD_RELOAD=$BS_TRUE + elif [ -f "/etc/init.d/salt-${fname}" ]; then + /sbin/chkconfig salt-${fname} on + fi + done + + if [ "$SYSTEMD_RELOAD" -eq $BS_TRUE ]; then + /bin/systemctl daemon-reload + fi + + return 0 +} + +install_centos_git_deps() { + install_centos_stable_deps || return 1 + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __yum_install_noinput ca-certificates || return 1 + fi + + if ! __check_command_exists git; then + __yum_install_noinput git || return 1 + fi + + __git_clone_and_checkout || return 1 + + __PACKAGES="" + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + # Packages are named python3- + PY_PKG_VER=3 + __PACKAGES="${__PACKAGES} python3" + else + # Packages are named python36- + PY_PKG_VER=36 + __PACKAGES="${__PACKAGES} python36" + fi + else + PY_PKG_VER="" + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + __PACKAGES="${__PACKAGES} python2" + elif [ "$DISTRO_MAJOR_VERSION" -eq 6 ]; then + PY_PKG_VER=27 + __PACKAGES="${__PACKAGES} python27" + else + __PACKAGES="${__PACKAGES} python" + fi + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + _install_m2crypto_req=false + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + _py=${_PY_EXE} + if [ "$DISTRO_MAJOR_VERSION" -gt 6 ]; then + _install_m2crypto_req=true + fi + else + if [ "$DISTRO_MAJOR_VERSION" -eq 6 ]; then + _install_m2crypto_req=true + fi + _py="python" + + # Only Py2 needs python-futures + __PACKAGES="${__PACKAGES} python-futures" + + # There is no systemd-python3 package as of this writing + if [ "$DISTRO_MAJOR_VERSION" -ge 7 ]; then + __PACKAGES="${__PACKAGES} systemd-python" + fi + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + __install_tornado_pip ${_py} || return 1 + __PACKAGES="${__PACKAGES} python3-m2crypto" + else + __PACKAGES="${__PACKAGES} m2crypto python${PY_PKG_VER}-crypto" + fi + + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado python${PY_PKG_VER}-zmq" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud" + fi + + if [ "${_INSTALL_PY}" -eq "${BS_TRUE}" ]; then + # Install Python if "-y" was passed in. + __install_python || return 1 + fi + + if [ "${_PY_EXE}" != "" ] && [ "$_PIP_ALLOWED" -eq "$BS_TRUE" ]; then + # If "-x" is defined, install dependencies with pip based on the Python version given. + _PIP_PACKAGES="m2crypto!=0.33.0 jinja2 msgpack-python pycrypto PyYAML tornado<5.0 zmq futures>=2.0" + + # install swig and openssl on cent6 + if $_install_m2crypto_req; then + __yum_install_noinput openssl-devel swig || return 1 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # Filter out any commented lines from the requirements file + _REQ_LINES="$(grep '^[^#]' "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + for SINGLE_PACKAGE in ${_PIP_PACKAGES}; do + __REQUIRED_VERSION="$(grep "${SINGLE_PACKAGE}" "${_REQ_LINES}")" + if [ "${__REQUIRED_VERSION}" != "" ]; then + _PIP_PACKAGES=$(echo "$_PIP_PACKAGES" | sed "s/${SINGLE_PACKAGE}/${__REQUIRED_VERSION}/") + fi + done + fi + + if [ "$_INSTALL_CLOUD" -eq "${BS_TRUE}" ]; then + _PIP_PACKAGES="${_PIP_PACKAGES} apache-libcloud" + fi + + __install_pip_pkgs "${_PIP_PACKAGES}" "${_PY_EXE}" || return 1 + else + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + else + if [ "${_INSTALL_PY}" -eq "${BS_TRUE}" ] && [ "$DISTRO_MAJOR_VERSION" -lt 8 ]; then + # Install Python if "-y" was passed in. + __install_python || return 1 + fi + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_centos_git() { + if [ "${_PY_EXE}" != "" ]; then + _PYEXE=${_PY_EXE} + echoinfo "Using the following python version: ${_PY_EXE} to install salt" + else + _PYEXE='python2' + fi + + echodebug "_PY_EXE: $_PY_EXE" + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + $_PYEXE setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + else + $_PYEXE setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + fi + + return 0 +} + +install_centos_git_post() { + SYSTEMD_RELOAD=$BS_FALSE + + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + if [ ! -f "/usr/lib/systemd/system/salt-${fname}.service" ] || \ + { [ -f "/usr/lib/systemd/system/salt-${fname}.service" ] && [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; }; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" /usr/lib/systemd/system + fi + + SYSTEMD_RELOAD=$BS_TRUE + elif [ ! -f "/etc/init.d/salt-$fname" ] || \ + { [ -f "/etc/init.d/salt-$fname" ] && [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; }; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}" /etc/init.d + chmod +x /etc/init.d/salt-${fname} + fi + done + + if [ "$SYSTEMD_RELOAD" -eq $BS_TRUE ]; then + /bin/systemctl daemon-reload + fi + + install_centos_stable_post || return 1 + + return 0 +} + +install_centos_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /sbin/initctl ] && [ -f /etc/init/salt-${fname}.conf ]; then + # We have upstart support and upstart knows about our service + if ! /sbin/initctl status salt-$fname > /dev/null 2>&1; then + # Everything is in place and upstart gave us an error code? Fail! + return 1 + fi + + # upstart knows about this service. + # Let's try to stop it, and then start it + /sbin/initctl stop salt-$fname > /dev/null 2>&1 + # Restart service + if ! /sbin/initctl start salt-$fname > /dev/null 2>&1; then + # Failed the restart?! + return 1 + fi + elif [ -f /etc/init.d/salt-$fname ]; then + # Disable stdin to fix shell session hang on killing tee pipe + service salt-$fname stop < /dev/null > /dev/null 2>&1 + service salt-$fname start < /dev/null + elif [ -f /usr/bin/systemctl ]; then + # CentOS 7 uses systemd + /usr/bin/systemctl stop salt-$fname > /dev/null 2>&1 + /usr/bin/systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + fi + done +} + +install_centos_testing_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_centos_testing() { + install_centos_stable || return 1 + return 0 +} + +install_centos_testing_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_centos_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /sbin/initctl ] && [ -f /etc/init/salt-${fname}.conf ]; then + __check_services_upstart salt-$fname || return 1 + elif [ -f /etc/init.d/salt-$fname ]; then + __check_services_sysvinit salt-$fname || return 1 + elif [ -f /usr/bin/systemctl ]; then + __check_services_systemd salt-$fname || return 1 + fi + done + + return 0 +} +# +# Ended CentOS Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# RedHat Install Functions +# +install_red_hat_linux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_red_hat_linux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_red_hat_enterprise_linux_stable_deps() { + install_red_hat_linux_stable_deps || return 1 + return 0 +} + +install_red_hat_enterprise_linux_git_deps() { + install_red_hat_linux_git_deps || return 1 + return 0 +} + +install_red_hat_enterprise_server_stable_deps() { + install_red_hat_linux_stable_deps || return 1 + return 0 +} + +install_red_hat_enterprise_server_git_deps() { + install_red_hat_linux_git_deps || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_stable_deps() { + install_red_hat_linux_stable_deps || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_git_deps() { + install_red_hat_linux_git_deps || return 1 + return 0 +} + +install_red_hat_linux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_red_hat_linux_git() { + install_centos_git || return 1 + return 0 +} + +install_red_hat_enterprise_linux_stable() { + install_red_hat_linux_stable || return 1 + return 0 +} + +install_red_hat_enterprise_linux_git() { + install_red_hat_linux_git || return 1 + return 0 +} + +install_red_hat_enterprise_server_stable() { + install_red_hat_linux_stable || return 1 + return 0 +} + +install_red_hat_enterprise_server_git() { + install_red_hat_linux_git || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_stable() { + install_red_hat_linux_stable || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_git() { + install_red_hat_linux_git || return 1 + return 0 +} + +install_red_hat_linux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_red_hat_linux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_red_hat_linux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_red_hat_enterprise_linux_stable_post() { + install_red_hat_linux_stable_post || return 1 + return 0 +} + +install_red_hat_enterprise_linux_restart_daemons() { + install_red_hat_linux_restart_daemons || return 1 + return 0 +} + +install_red_hat_enterprise_linux_git_post() { + install_red_hat_linux_git_post || return 1 + return 0 +} + +install_red_hat_enterprise_server_stable_post() { + install_red_hat_linux_stable_post || return 1 + return 0 +} + +install_red_hat_enterprise_server_restart_daemons() { + install_red_hat_linux_restart_daemons || return 1 + return 0 +} + +install_red_hat_enterprise_server_git_post() { + install_red_hat_linux_git_post || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_stable_post() { + install_red_hat_linux_stable_post || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_restart_daemons() { + install_red_hat_linux_restart_daemons || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_git_post() { + install_red_hat_linux_git_post || return 1 + return 0 +} + +install_red_hat_linux_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_red_hat_linux_testing() { + install_centos_testing || return 1 + return 0 +} + +install_red_hat_linux_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_red_hat_enterprise_server_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_red_hat_enterprise_server_testing() { + install_centos_testing || return 1 + return 0 +} + +install_red_hat_enterprise_server_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_testing() { + install_centos_testing || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_testing_post() { + install_centos_testing_post || return 1 + return 0 +} +# +# Ended RedHat Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Oracle Linux Install Functions +# +install_oracle_linux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_oracle_linux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_oracle_linux_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_oracle_linux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_oracle_linux_git() { + install_centos_git || return 1 + return 0 +} + +install_oracle_linux_testing() { + install_centos_testing || return 1 + return 0 +} + +install_oracle_linux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_oracle_linux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_oracle_linux_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_oracle_linux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_oracle_linux_check_services() { + install_centos_check_services || return 1 + return 0 +} +# +# Ended Oracle Linux Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Scientific Linux Install Functions +# +install_scientific_linux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_scientific_linux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_scientific_linux_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_scientific_linux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_scientific_linux_git() { + install_centos_git || return 1 + return 0 +} + +install_scientific_linux_testing() { + install_centos_testing || return 1 + return 0 +} + +install_scientific_linux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_scientific_linux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_scientific_linux_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_scientific_linux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_scientific_linux_check_services() { + install_centos_check_services || return 1 + return 0 +} +# +# Ended Scientific Linux Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# CloudLinux Install Functions +# +install_cloud_linux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_cloud_linux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_cloud_linux_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_cloud_linux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_cloud_linux_git() { + install_centos_git || return 1 + return 0 +} + +install_cloud_linux_testing() { + install_centos_testing || return 1 + return 0 +} + +install_cloud_linux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_cloud_linux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_cloud_linux_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_cloud_linux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_cloud_linux_check_services() { + install_centos_check_services || return 1 + return 0 +} +# +# End of CloudLinux Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Alpine Linux Install Functions +# +install_alpine_linux_stable_deps() { + if ! grep -q '^[^#].\+alpine/.\+/community' /etc/apk/repositories; then + # Add community repository entry based on the "main" repo URL + __REPO=$(grep '^[^#].\+alpine/.\+/main\>' /etc/apk/repositories) + echo "${__REPO}" | sed -e 's/main/community/' >> /etc/apk/repositories + fi + + apk update + + # Get latest root CA certs + apk -U add ca-certificates + + if ! __check_command_exists openssl; then + # Install OpenSSL to be able to pull from https:// URLs + apk -U add openssl + fi +} + +install_alpine_linux_git_deps() { + install_alpine_linux_stable_deps || return 1 + + if ! __check_command_exists git; then + apk -U add git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + apk -U add python2 py-virtualenv py2-crypto py2-m2crypto py2-setuptools \ + py2-jinja2 py2-yaml py2-markupsafe py2-msgpack py2-psutil \ + py2-zmq zeromq py2-requests || return 1 + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + apk -U add py2-tornado || return 1 + fi + fi + else + apk -U add python2 py2-pip py2-setuptools || return 1 + _PY_EXE=python2 + return 0 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi +} + +install_alpine_linux_stable() { + __PACKAGES="salt" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + apk -U add ${__PACKAGES} || return 1 + return 0 +} + +install_alpine_linux_git() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + python2 setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install || return 1 + else + python2 setup.py ${SETUP_PY_INSTALL_ARGS} install || return 1 + fi +} + +install_alpine_linux_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /sbin/rc-update ]; then + script_url="${_SALTSTACK_REPO_URL%.git}/raw/master/pkg/alpine/salt-$fname" + [ -f "/etc/init.d/salt-$fname" ] || __fetch_url "/etc/init.d/salt-$fname" "$script_url" + + # shellcheck disable=SC2181 + if [ $? -eq 0 ]; then + chmod +x "/etc/init.d/salt-$fname" + else + echoerror "Failed to get OpenRC init script for $OS_NAME from $script_url." + return 1 + fi + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + /sbin/rc-update add "salt-$fname" > /dev/null 2>&1 || return 1 + fi + done +} + +install_alpine_linux_restart_daemons() { + [ "${_START_DAEMONS}" -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # Disable stdin to fix shell session hang on killing tee pipe + /sbin/rc-service salt-$fname stop < /dev/null > /dev/null 2>&1 + /sbin/rc-service salt-$fname start < /dev/null || return 1 + done +} + +install_alpine_linux_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_alpine salt-$fname || return 1 + done + + return 0 +} + +daemons_running_alpine_linux() { + [ "${_START_DAEMONS}" -eq $BS_FALSE ] && return + + FAILED_DAEMONS=0 + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # shellcheck disable=SC2009 + if [ "$(ps wwwaux | grep -v grep | grep salt-$fname)" = "" ]; then + echoerror "salt-$fname was not found running" + FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) + fi + done + + return $FAILED_DAEMONS +} + +# +# Ended Alpine Linux Install Functions +# +####################################################################################################################### + + +####################################################################################################################### +# +# Amazon Linux AMI Install Functions +# + +install_amazon_linux_ami_deps() { + # Shim to figure out if we're using old (rhel) or new (aws) rpms. + _USEAWS=$BS_FALSE + pkg_append="python" + + if [ "$ITYPE" = "stable" ]; then + repo_rev="$STABLE_REV" + else + repo_rev="latest" + fi + + if echo $repo_rev | grep -E -q '^archive'; then + year=$(echo "$repo_rev" | cut -d '/' -f 2 | cut -c1-4) + else + year=$(echo "$repo_rev" | cut -c1-4) + fi + + if echo "$repo_rev" | grep -E -q '^(latest|2016\.11)$' || \ + [ "$year" -gt 2016 ]; then + _USEAWS=$BS_TRUE + pkg_append="python27" + fi + + # We need to install yum-utils before doing anything else when installing on + # Amazon Linux ECS-optimized images. See issue #974. + __yum_install_noinput yum-utils + + # Do upgrade early + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + yum -y update || return 1 + fi + + if [ $_DISABLE_REPOS -eq $BS_FALSE ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __REPO_FILENAME="saltstack-repo.repo" + + # Set a few vars to make life easier. + if [ $_USEAWS -eq $BS_TRUE ]; then + base_url="$HTTP_VAL://${_REPO_URL}/yum/amazon/latest/\$basearch/$repo_rev/" + gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" + repo_name="SaltStack repo for Amazon Linux" + else + base_url="$HTTP_VAL://${_REPO_URL}/yum/redhat/6/\$basearch/$repo_rev/" + gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" + repo_name="SaltStack repo for RHEL/CentOS 6" + fi + + # This should prob be refactored to use __install_saltstack_rhel_repository() + # With args passed in to do the right thing. Reformatted to be more like the + # amazon linux yum file. + if [ ! -s "/etc/yum.repos.d/${__REPO_FILENAME}" ]; then + cat <<_eof > "/etc/yum.repos.d/${__REPO_FILENAME}" +[saltstack-repo] +name=$repo_name +failovermethod=priority +priority=10 +gpgcheck=1 +gpgkey=$gpg_key +baseurl=$base_url +_eof + fi + + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + # Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64 + # which is already installed + __PACKAGES="m2crypto ${pkg_append}-crypto ${pkg_append}-jinja2 ${pkg_append}-PyYAML" + __PACKAGES="${__PACKAGES} ${pkg_append}-msgpack ${pkg_append}-requests ${pkg_append}-zmq" + __PACKAGES="${__PACKAGES} ${pkg_append}-futures" + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi +} + +install_amazon_linux_ami_git_deps() { + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + yum -y install ca-certificates || return 1 + fi + + PIP_EXE='pip' + if __check_command_exists python2.7; then + if ! __check_command_exists pip2.7; then + if ! __check_command_exists easy_install-2.7; then + __yum_install_noinput python27-setuptools + fi + /usr/bin/easy_install-2.7 pip || return 1 + fi + PIP_EXE='/usr/local/bin/pip2.7' + _PY_EXE='python2.7' + fi + + install_amazon_linux_ami_deps || return 1 + + if ! __check_command_exists git; then + __yum_install_noinput git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + __PACKAGES="" + __PIP_PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __check_pip_allowed "You need to allow pip based installations (-P) in order to install apache-libcloud" + __PACKAGES="${__PACKAGES} python27-pip" + __PIP_PACKAGES="${__PIP_PACKAGES} apache-libcloud>=$_LIBCLOUD_MIN_VERSION" + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + __PACKAGES="${__PACKAGES} ${pkg_append}-tornado" + fi + fi + + if [ "${__PACKAGES}" != "" ]; then + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if [ "${__PIP_PACKAGES}" != "" ]; then + # shellcheck disable=SC2086 + ${PIP_EXE} install ${__PIP_PACKAGES} || return 1 + fi + else + __PACKAGES="python27-pip python27-setuptools python27-devel gcc" + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_amazon_linux_ami_2_git_deps() { + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + yum -y install ca-certificates || return 1 + fi + + install_amazon_linux_ami_2_deps || return 1 + + if [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + PY_PKG_VER=2 + PIP_EXE='/bin/pip' + else + PY_PKG_VER=3 + PIP_EXE='/bin/pip3' + fi + __PACKAGES="python${PY_PKG_VER}-pip" + + if ! __check_command_exists "${PIP_EXE}"; then + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if ! __check_command_exists git; then + __yum_install_noinput git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + __PACKAGES="" + __PIP_PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq "$BS_TRUE" ]; then + __check_pip_allowed "You need to allow pip based installations (-P) in order to install apache-libcloud" + if [ "$PARSED_VERSION" -eq "2" ]; then + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq "3" ]; then + __PACKAGES="${__PACKAGES} python3-pip" + __PIP_PACKAGES="${__PIP_PACKAGES} tornado<$_TORNADO_MAX_PY3_VERSION" + else + __PACKAGES="${__PACKAGES} python2-pip" + fi + else + __PACKAGES="${__PACKAGES} python27-pip" + fi + __PIP_PACKAGES="${__PIP_PACKAGES} apache-libcloud>=$_LIBCLOUD_MIN_VERSION" + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq "3" ]; then + __PIP_PACKAGES="${__PIP_PACKAGES} tornado<$_TORNADO_MAX_PY3_VERSION" + else + __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-tornado" + fi + fi + fi + + if [ "${__PIP_PACKAGES}" != "" ]; then + __check_pip_allowed "You need to allow pip based installations (-P) in order to install ${__PIP_PACKAGES}" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-pip" + fi + + if [ "${__PACKAGES}" != "" ]; then + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if [ "${__PIP_PACKAGES}" != "" ]; then + # shellcheck disable=SC2086 + ${PIP_EXE} install ${__PIP_PACKAGES} || return 1 + fi + else + __PACKAGES="python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools python${PY_PKG_VER}-devel gcc" + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_amazon_linux_ami_2_deps() { + # Shim to figure out if we're using old (rhel) or new (aws) rpms. + _USEAWS=$BS_FALSE + pkg_append="python" + + if [ "$ITYPE" = "stable" ]; then + repo_rev="$STABLE_REV" + else + repo_rev="latest" + fi + + if echo $repo_rev | grep -E -q '^archive'; then + year=$(echo "$repo_rev" | cut -d '/' -f 2 | cut -c1-4) + else + year=$(echo "$repo_rev" | cut -c1-4) + fi + + if echo "$repo_rev" | grep -E -q '^(latest|2016\.11)$' || \ + [ "$year" -gt 2016 ]; then + _USEAWS=$BS_TRUE + pkg_append="python" + fi + + # We need to install yum-utils before doing anything else when installing on + # Amazon Linux ECS-optimized images. See issue #974. + __yum_install_noinput yum-utils + + # Do upgrade early + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + yum -y update || return 1 + fi + + if [ $_DISABLE_REPOS -eq $BS_FALSE ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __REPO_FILENAME="saltstack-repo.repo" + __PY_VERSION_REPO="yum" + PY_PKG_VER="" + repo_label="saltstack-repo" + repo_name="SaltStack repo for Amazon Linux 2" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __REPO_FILENAME="saltstack-py3-repo.repo" + __PY_VERSION_REPO="py3" + PY_PKG_VER=3 + repo_label="saltstack-py3-repo" + repo_name="SaltStack Python 3 repo for Amazon Linux 2" + fi + + base_url="$HTTP_VAL://${_REPO_URL}/${__PY_VERSION_REPO}/amazon/2/\$basearch/$repo_rev/" + gpg_key="${base_url}SALTSTACK-GPG-KEY.pub,${base_url}base/RPM-GPG-KEY-CentOS-7" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" + fi + + # This should prob be refactored to use __install_saltstack_rhel_repository() + # With args passed in to do the right thing. Reformatted to be more like the + # amazon linux yum file. + if [ ! -s "/etc/yum.repos.d/${__REPO_FILENAME}" ]; then + cat <<_eof > "/etc/yum.repos.d/${__REPO_FILENAME}" +[$repo_label] +name=$repo_name +failovermethod=priority +priority=10 +gpgcheck=1 +gpgkey=$gpg_key +baseurl=$base_url +_eof + fi + + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + # Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64 + # which is already installed + if [ -n "${PY_PKG_VER}" ] && [ "${PY_PKG_VER}" -eq 3 ]; then + __PACKAGES="${pkg_append}${PY_PKG_VER}-m2crypto ${pkg_append}${PY_PKG_VER}-pyyaml" + else + __PACKAGES="m2crypto PyYAML ${pkg_append}-futures" + fi + + __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-crypto ${pkg_append}${PY_PKG_VER}-jinja2 procps-ng" + __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-msgpack ${pkg_append}${PY_PKG_VER}-requests ${pkg_append}${PY_PKG_VER}-zmq" + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi +} + +install_amazon_linux_ami_stable() { + install_centos_stable || return 1 + return 0 +} + +install_amazon_linux_ami_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_amazon_linux_ami_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_amazon_linux_ami_git() { + install_centos_git || return 1 + return 0 +} + +install_amazon_linux_ami_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_amazon_linux_ami_testing() { + install_centos_testing || return 1 + return 0 +} + +install_amazon_linux_ami_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_amazon_linux_ami_2_stable() { + install_centos_stable || return 1 + return 0 +} + +install_amazon_linux_ami_2_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_amazon_linux_ami_2_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_amazon_linux_ami_2_git() { + install_centos_git || return 1 + return 0 +} + +install_amazon_linux_ami_2_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_amazon_linux_ami_2_testing() { + install_centos_testing || return 1 + return 0 +} + +install_amazon_linux_ami_2_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_amazon_linux_ami_2_check_services() { + install_centos_check_services || return 1 + return 0 +} + +# +# Ended Amazon Linux AMI Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Arch Install Functions +# +install_arch_linux_stable_deps() { + if [ ! -f /etc/pacman.d/gnupg ]; then + pacman-key --init && pacman-key --populate archlinux || return 1 + fi + + # Pacman does not resolve dependencies on outdated versions + # They always need to be updated + pacman -Syy --noconfirm + + pacman -S --noconfirm --needed archlinux-keyring || return 1 + + pacman -Su --noconfirm --needed pacman || return 1 + + if __check_command_exists pacman-db-upgrade; then + pacman-db-upgrade || return 1 + fi + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + PY_PKG_VER=2 + else + PY_PKG_VER="" + fi + + # YAML module is used for generating custom master/minion configs + # shellcheck disable=SC2086 + pacman -Su --noconfirm --needed python${PY_PKG_VER}-yaml + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # shellcheck disable=SC2086 + pacman -Su --noconfirm --needed python${PY_PKG_VER}-apache-libcloud || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + pacman -Su --noconfirm --needed ${_EXTRA_PACKAGES} || return 1 + fi +} + +install_arch_linux_git_deps() { + install_arch_linux_stable_deps + + # Don't fail if un-installing python2-distribute threw an error + if ! __check_command_exists git; then + pacman -Sy --noconfirm --needed git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + pacman -R --noconfirm python2-distribute + pacman -Su --noconfirm --needed python2-crypto python2-setuptools python2-jinja \ + python2-m2crypto python2-futures python2-markupsafe python2-msgpack python2-psutil \ + python2-pyzmq zeromq python2-requests python2-systemd || return 1 + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + pacman -Su --noconfirm --needed python2-tornado + fi + fi + else + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + PY_PKG_VER=2 + else + PY_PKG_VER="" + fi + __PACKAGES="python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + # shellcheck disable=SC2086 + pacman -Su --noconfirm --needed ${__PACKAGES} + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_arch_linux_stable() { + # Pacman does not resolve dependencies on outdated versions + # They always need to be updated + pacman -Syy --noconfirm + + pacman -Su --noconfirm --needed pacman || return 1 + # See https://mailman.archlinux.org/pipermail/arch-dev-public/2013-June/025043.html + # to know why we're ignoring below. + pacman -Syu --noconfirm --ignore filesystem,bash || return 1 + pacman -S --noconfirm --needed bash || return 1 + pacman -Su --noconfirm || return 1 + # We can now resume regular salt update + pacman -Syu --noconfirm salt python2-futures || return 1 + return 0 +} + +install_arch_linux_git() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + python2 setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install || return 1 + else + python2 setup.py ${SETUP_PY_INSTALL_ARGS} install || return 1 + fi + return 0 +} + +install_arch_linux_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # Since Arch's pacman renames configuration files + if [ "$_TEMP_CONFIG_DIR" != "null" ] && [ -f "$_SALT_ETC_DIR/$fname.pacorig" ]; then + # Since a configuration directory was provided, it also means that any + # configuration file copied was renamed by Arch, see: + # https://wiki.archlinux.org/index.php/Pacnew_and_Pacsave_Files#.pacorig + __copyfile "$_SALT_ETC_DIR/$fname.pacorig" "$_SALT_ETC_DIR/$fname" $BS_TRUE + fi + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + if [ -f /usr/bin/systemctl ]; then + # Using systemd + /usr/bin/systemctl is-enabled salt-$fname.service > /dev/null 2>&1 || ( + /usr/bin/systemctl preset salt-$fname.service > /dev/null 2>&1 && + /usr/bin/systemctl enable salt-$fname.service > /dev/null 2>&1 + ) + sleep 1 + /usr/bin/systemctl daemon-reload + continue + fi + + # XXX: How do we enable old Arch init.d scripts? + done +} + +install_arch_linux_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /usr/bin/systemctl ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + /usr/bin/systemctl is-enabled salt-${fname}.service > /dev/null 2>&1 || ( + /usr/bin/systemctl preset salt-${fname}.service > /dev/null 2>&1 && + /usr/bin/systemctl enable salt-${fname}.service > /dev/null 2>&1 + ) + sleep 1 + /usr/bin/systemctl daemon-reload + continue + fi + + # SysV init!? + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-$fname" "/etc/rc.d/init.d/salt-$fname" + chmod +x /etc/rc.d/init.d/salt-$fname + done +} + +install_arch_linux_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /usr/bin/systemctl ]; then + /usr/bin/systemctl stop salt-$fname.service > /dev/null 2>&1 + /usr/bin/systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + fi + + /etc/rc.d/salt-$fname stop > /dev/null 2>&1 + /etc/rc.d/salt-$fname start + done +} + +install_arch_check_services() { + if [ ! -f /usr/bin/systemctl ]; then + # Not running systemd!? Don't check! + return 0 + fi + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_systemd salt-$fname || return 1 + done + + return 0 +} +# +# Ended Arch Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# FreeBSD Install Functions +# + +# Using a separate conf step to head for idempotent install... +__configure_freebsd_pkg_details() { + _SALT_ETC_DIR="/usr/local/etc/salt" +} + +install_freebsd_deps() { + __configure_freebsd_pkg_details + pkg install -y pkg +} + +install_freebsd_git_deps() { + install_freebsd_deps || return 1 + + if ! __check_command_exists git; then + /usr/local/sbin/pkg install -y git || return 1 + fi + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + SALT_DEPENDENCIES=$(/usr/local/sbin/pkg rquery %dn py37-salt) + # shellcheck disable=SC2086 + /usr/local/sbin/pkg install -y ${SALT_DEPENDENCIES} python || return 1 + + /usr/local/sbin/pkg install -y py37-requests || return 1 + else + /usr/local/sbin/pkg install -y python python-pip python-setuptools || return 1 + fi + + echodebug "Adapting paths to FreeBSD" + # The list of files was taken from Salt's BSD port Makefile + for file in doc/man/salt-key.1 doc/man/salt-cp.1 doc/man/salt-minion.1 \ + doc/man/salt-syndic.1 doc/man/salt-master.1 doc/man/salt-run.1 \ + doc/man/salt.7 doc/man/salt.1 doc/man/salt-call.1; do + [ ! -f $file ] && continue + echodebug "Patching ${file}" + sed -in -e "s|/etc/salt|${_SALT_ETC_DIR}|" \ + -e "s|/srv/salt|${_SALT_ETC_DIR}/states|" \ + -e "s|/srv/pillar|${_SALT_ETC_DIR}/pillar|" ${file} + done + if [ ! -f salt/syspaths.py ]; then + # We still can't provide the system paths, salt 0.16.x + # Let's patch salt's source and adapt paths to what's expected on FreeBSD + echodebug "Replacing occurrences of '/etc/salt' with ${_SALT_ETC_DIR}" + # The list of files was taken from Salt's BSD port Makefile + for file in conf/minion conf/master salt/config.py salt/client.py \ + salt/modules/mysql.py salt/utils/parsers.py salt/modules/tls.py \ + salt/modules/postgres.py salt/utils/migrations.py; do + [ ! -f $file ] && continue + echodebug "Patching ${file}" + sed -in -e "s|/etc/salt|${_SALT_ETC_DIR}|" \ + -e "s|/srv/salt|${_SALT_ETC_DIR}/states|" \ + -e "s|/srv/pillar|${_SALT_ETC_DIR}/pillar|" ${file} + done + fi + echodebug "Finished patching" + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + + fi + + return 0 +} + +install_freebsd_stable() { +# +# installing latest version of salt from FreeBSD CURRENT ports repo +# + # shellcheck disable=SC2086 + /usr/local/sbin/pkg install -y py37-salt || return 1 + + return 0 +} + +install_freebsd_git() { + + # /usr/local/bin/python3 in FreeBSD is a symlink to /usr/local/bin/python3.7 + __PYTHON_PATH=$(readlink -f "$(command -v python3)") + __ESCAPED_PYTHON_PATH=$(echo "${__PYTHON_PATH}" | sed 's/\//\\\//g') + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${__PYTHON_PATH}" || return 1 + return 0 + fi + + # Install from git + if [ ! -f salt/syspaths.py ]; then + # We still can't provide the system paths, salt 0.16.x + ${__PYTHON_PATH} setup.py ${SETUP_PY_INSTALL_ARGS} install || return 1 + else + ${__PYTHON_PATH} setup.py \ + --salt-root-dir=/ \ + --salt-config-dir="${_SALT_ETC_DIR}" \ + --salt-cache-dir="${_SALT_CACHE_DIR}" \ + --salt-sock-dir=/var/run/salt \ + --salt-srv-root-dir="${_SALT_ETC_DIR}" \ + --salt-base-file-roots-dir="${_SALT_ETC_DIR}/states" \ + --salt-base-pillar-roots-dir="${_SALT_ETC_DIR}/pillar" \ + --salt-base-master-roots-dir="${_SALT_ETC_DIR}/salt-master" \ + --salt-logs-dir=/var/log/salt \ + --salt-pidfile-dir=/var/run \ + ${SETUP_PY_INSTALL_ARGS} install \ + || return 1 + fi + + for script in salt_api salt_master salt_minion salt_proxy salt_syndic; do + __fetch_url "/usr/local/etc/rc.d/${script}" "https://raw.githubusercontent.com/freebsd/freebsd-ports/master/sysutils/py-salt/files/${script}.in" || return 1 + sed -i '' 's/%%PREFIX%%/\/usr\/local/g' /usr/local/etc/rc.d/${script} + sed -i '' "s/%%PYTHON_CMD%%/${__ESCAPED_PYTHON_PATH}/g" /usr/local/etc/rc.d/${script} + chmod +x /usr/local/etc/rc.d/${script} || return 1 + done + + # And we're good to go + return 0 +} + +install_freebsd_stable_post() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + enable_string="salt_${fname}_enable=YES" + grep "$enable_string" /etc/rc.conf >/dev/null 2>&1 + [ $? -eq 1 ] && sysrc $enable_string + + done +} + +install_freebsd_git_post() { + install_freebsd_stable_post || return 1 + return 0 +} + +install_freebsd_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + service salt_$fname stop > /dev/null 2>&1 + service salt_$fname start + done +} +# +# Ended FreeBSD Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# OpenBSD Install Functions +# + +install_openbsd_deps() { + if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then + OPENBSD_REPO='https://cdn.openbsd.org/pub/OpenBSD' + echoinfo "setting package repository to $OPENBSD_REPO" + echo "${OPENBSD_REPO}" >/etc/installurl || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + pkg_add -I -v ${_EXTRA_PACKAGES} || return 1 + fi + return 0 +} + +install_openbsd_git_deps() { + install_openbsd_deps || return 1 + + if ! __check_command_exists git; then + pkg_add -I -v git || return 1 + fi + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + pkg_add -I -v py-pip py-setuptools + fi + + # + # Let's trigger config_salt() + # + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_openbsd_git() { + # + # Install from git + # + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ ! -f salt/syspaths.py ]; then + # We still can't provide the system paths, salt 0.16.x + /usr/local/bin/python2.7 setup.py ${SETUP_PY_INSTALL_ARGS} install || return 1 + fi + return 0 +} + +install_openbsd_stable() { + pkg_add -r -I -v salt || return 1 + return 0 +} + +install_openbsd_post() { + for fname in api master minion syndic; do + [ $fname = "api" ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + rcctl enable salt_$fname + done + + return 0 +} + +install_openbsd_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && continue + + if [ -f /etc/rc.d/salt_${fname} ]; then + __check_services_openbsd salt_${fname} || return 1 + fi + done + + return 0 +} + +install_openbsd_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + rcctl restart salt_${fname} + done + + return 0 +} + +# +# Ended OpenBSD Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# SmartOS Install Functions +# +install_smartos_deps() { + smartos_deps="$(pkgin show-deps salt | grep '^\s' | grep -v '\snot' | xargs) py27-m2crypto" + pkgin -y install "${smartos_deps}" || return 1 + + # Set _SALT_ETC_DIR to SmartOS default if they didn't specify + _SALT_ETC_DIR=${BS_SALT_ETC_DIR:-/opt/local/etc/salt} + # We also need to redefine the PKI directory + _PKI_DIR=${_SALT_ETC_DIR}/pki + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + # Let's set the configuration directory to /tmp + _TEMP_CONFIG_DIR="/tmp" + CONFIG_SALT_FUNC="config_salt" + + # Let's download, since they were not provided, the default configuration files + if [ ! -f "$_SALT_ETC_DIR/minion" ] && [ ! -f "$_TEMP_CONFIG_DIR/minion" ]; then + # shellcheck disable=SC2086 + curl $_CURL_ARGS -s -o "$_TEMP_CONFIG_DIR/minion" -L \ + https://raw.githubusercontent.com/saltstack/salt/master/conf/minion || return 1 + fi + if [ ! -f "$_SALT_ETC_DIR/master" ] && [ ! -f $_TEMP_CONFIG_DIR/master ]; then + # shellcheck disable=SC2086 + curl $_CURL_ARGS -s -o "$_TEMP_CONFIG_DIR/master" -L \ + https://raw.githubusercontent.com/saltstack/salt/master/conf/master || return 1 + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + pkgin -y install py27-apache-libcloud || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + pkgin -y install ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_smartos_git_deps() { + install_smartos_deps || return 1 + + if ! __check_command_exists git; then + pkgin -y install git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # Install whichever tornado is in the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + __check_pip_allowed "You need to allow pip based installations (-P) in order to install the python package '${__REQUIRED_TORNADO}'" + + # Install whichever futures is in the requirements file + __REQUIRED_FUTURES="$(grep futures "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + __check_pip_allowed "You need to allow pip based installations (-P) in order to install the python package '${__REQUIRED_FUTURES}'" + + if [ "${__REQUIRED_TORNADO}" != "" ]; then + if ! __check_command_exists pip; then + pkgin -y install py27-pip + fi + pip install -U "${__REQUIRED_TORNADO}" + fi + + if [ "${__REQUIRED_FUTURES}" != "" ]; then + if ! __check_command_exists pip; then + pkgin -y install py27-pip + fi + pip install -U "${__REQUIRED_FUTURES}" + fi + fi + else + if ! __check_command_exists pip; then + pkgin -y install py27-pip + fi + pkgin -y install py27-setuptools + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_smartos_stable() { + pkgin -y install salt || return 1 + return 0 +} + +install_smartos_git() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + # Use setuptools in order to also install dependencies + # lets force our config path on the setup for now, since salt/syspaths.py only got fixed in 2015.5.0 + USE_SETUPTOOLS=1 /opt/local/bin/python setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install || return 1 + return 0 +} + +install_smartos_post() { + smf_dir="/opt/custom/smf" + + # Install manifest files if needed. + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + svcs network/salt-$fname > /dev/null 2>&1 + if [ $? -eq 1 ]; then + if [ ! -f "$_TEMP_CONFIG_DIR/salt-$fname.xml" ]; then + # shellcheck disable=SC2086 + curl $_CURL_ARGS -s -o "$_TEMP_CONFIG_DIR/salt-$fname.xml" -L \ + "https://raw.githubusercontent.com/saltstack/salt/master/pkg/smartos/salt-$fname.xml" + fi + svccfg import "$_TEMP_CONFIG_DIR/salt-$fname.xml" + if [ "${VIRTUAL_TYPE}" = "global" ]; then + if [ ! -d "$smf_dir" ]; then + mkdir -p "$smf_dir" || return 1 + fi + if [ ! -f "$smf_dir/salt-$fname.xml" ]; then + __copyfile "$_TEMP_CONFIG_DIR/salt-$fname.xml" "$smf_dir/" || return 1 + fi + fi + fi + done + + return 0 +} + +install_smartos_git_post() { + smf_dir="/opt/custom/smf" + + # Install manifest files if needed. + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + svcs "network/salt-$fname" > /dev/null 2>&1 + if [ $? -eq 1 ]; then + svccfg import "${_SALT_GIT_CHECKOUT_DIR}/pkg/smartos/salt-$fname.xml" + if [ "${VIRTUAL_TYPE}" = "global" ]; then + if [ ! -d $smf_dir ]; then + mkdir -p "$smf_dir" + fi + if [ ! -f "$smf_dir/salt-$fname.xml" ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/smartos/salt-$fname.xml" "$smf_dir/" + fi + fi + fi + done + + return 0 +} + +install_smartos_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # Stop if running && Start service + svcadm disable salt-$fname > /dev/null 2>&1 + svcadm enable salt-$fname + done + + return 0 +} +# +# Ended SmartOS Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# openSUSE Install Functions. +# +__ZYPPER_REQUIRES_REPLACE_FILES=-1 + +__set_suse_pkg_repo() { + + # Set distro repo variable + if [ "${DISTRO_MAJOR_VERSION}" -gt 2015 ]; then + DISTRO_REPO="openSUSE_Tumbleweed" + elif [ "${DISTRO_MAJOR_VERSION}" -ge 42 ] || [ "${DISTRO_MAJOR_VERSION}" -eq 15 ]; then + DISTRO_REPO="openSUSE_Leap_${DISTRO_MAJOR_VERSION}.${DISTRO_MINOR_VERSION}" + else + DISTRO_REPO="SLE_${DISTRO_MAJOR_VERSION}_SP${SUSE_PATCHLEVEL}" + fi + + if [ "$_DOWNSTREAM_PKG_REPO" -eq $BS_TRUE ]; then + suse_pkg_url_base="https://download.opensuse.org/repositories/systemsmanagement:/saltstack" + suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack.repo" + else + suse_pkg_url_base="${HTTP_VAL}://repo.saltstack.com/opensuse" + suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack:products.repo" + fi + SUSE_PKG_URL="$suse_pkg_url_base/$suse_pkg_url_path" +} + +__check_and_refresh_suse_pkg_repo() { + # Check to see if systemsmanagement_saltstack exists + __zypper repos | grep -q systemsmanagement_saltstack + + if [ $? -eq 1 ]; then + # zypper does not yet know anything about systemsmanagement_saltstack + __zypper addrepo --refresh "${SUSE_PKG_URL}" || return 1 + fi +} + +__version_lte() { + if ! __check_command_exists python; then + zypper --non-interactive install --replacefiles --auto-agree-with-licenses python || \ + zypper --non-interactive install --auto-agree-with-licenses python || return 1 + fi + + if [ "$(python -c 'import sys; V1=tuple([int(i) for i in sys.argv[1].split(".")]); V2=tuple([int(i) for i in sys.argv[2].split(".")]); print V1<=V2' "$1" "$2")" = "True" ]; then + __ZYPPER_REQUIRES_REPLACE_FILES=${BS_TRUE} + else + __ZYPPER_REQUIRES_REPLACE_FILES=${BS_FALSE} + fi +} + +__zypper() { + # Check if any zypper process is running before calling zypper again. + # This is useful when a zypper call is part of a boot process and will + # wait until the zypper process is finished, such as on AWS AMIs. + while pgrep -l zypper; do + sleep 1 + done + + zypper --non-interactive "${@}" + # Return codes between 100 and 104 are only informations, not errors + # https://en.opensuse.org/SDB:Zypper_manual#EXIT_CODES + if [ "$?" -gt "99" ] && [ "$?" -le "104" ]; then + return 0 + fi + return $? +} + +__zypper_install() { + if [ "${__ZYPPER_REQUIRES_REPLACE_FILES}" = "-1" ]; then + __version_lte "1.10.4" "$(zypper --version | awk '{ print $2 }')" + fi + if [ "${__ZYPPER_REQUIRES_REPLACE_FILES}" = "${BS_TRUE}" ]; then + # In case of file conflicts replace old files. + # Option present in zypper 1.10.4 and newer: + # https://github.com/openSUSE/zypper/blob/95655728d26d6d5aef7796b675f4cc69bc0c05c0/package/zypper.changes#L253 + __zypper install --auto-agree-with-licenses --replacefiles "${@}"; return $? + else + __zypper install --auto-agree-with-licenses "${@}"; return $? + fi +} + +__opensuse_prep_install() { + # DRY function for common installation preparatory steps for SUSE + if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then + # Is the repository already known + __set_suse_pkg_repo + # Check zypper repos and refresh if necessary + __check_and_refresh_suse_pkg_repo + fi + + __zypper --gpg-auto-import-keys refresh + + # shellcheck disable=SC2181 + if [ $? -ne 0 ] && [ $? -ne 4 ]; then + # If the exit code is not 0, and it's not 4 (failed to update a + # repository) return a failure. Otherwise continue. + return 1 + fi + + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + __zypper --gpg-auto-import-keys update || return 1 + fi +} + +install_opensuse_stable_deps() { + __opensuse_prep_install || return 1 + + if [ "$DISTRO_MAJOR_VERSION" -eq 12 ] && [ "$DISTRO_MINOR_VERSION" -eq 3 ]; then + # Because patterns-openSUSE-minimal_base-conflicts conflicts with python, lets remove the first one + __zypper remove patterns-openSUSE-minimal_base-conflicts + fi + + # YAML module is used for generating custom master/minion configs + # requests is still used by many salt modules + # Salt needs python-zypp installed in order to use the zypper module + __PACKAGES="python-PyYAML python-requests python-zypp" + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __zypper_install ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_opensuse_git_deps() { + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ] && ! __check_command_exists update-ca-certificates; then + __zypper_install ca-certificates || return 1 + fi + + install_opensuse_stable_deps || return 1 + + if ! __check_command_exists git; then + __zypper_install git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + __zypper_install patch || return 1 + + __PACKAGES="libzmq5 python-Jinja2 python-m2crypto python-msgpack-python python-pycrypto python-pyzmq python-xml python-futures" + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + __PACKAGES="${__PACKAGES} python-tornado" + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python-apache-libcloud" + fi + else + __PACKAGES="python-pip python-setuptools gcc" + fi + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_opensuse_stable() { + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __zypper_install $__PACKAGES || return 1 + + return 0 +} + +install_opensuse_git() { + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + python setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + return 0 +} + +install_opensuse_stable_post() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ] || [ -f /usr/bin/systemctl ]; then + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 1 + systemctl daemon-reload + continue + fi + + /sbin/chkconfig --add salt-$fname + /sbin/chkconfig salt-$fname on + done + + return 0 +} + +install_opensuse_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + use_usr_lib=$BS_FALSE + + if [ "${DISTRO_MAJOR_VERSION}" -ge 15 ]; then + use_usr_lib=$BS_TRUE + fi + + if [ "${DISTRO_MAJOR_VERSION}" -eq 12 ] && [ -d "/usr/lib/systemd/" ]; then + use_usr_lib=$BS_TRUE + fi + + if [ "${use_usr_lib}" -eq $BS_TRUE ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/usr/lib/systemd/system/salt-${fname}.service" + else + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + fi + + continue + fi + + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-$fname" "/etc/init.d/salt-$fname" + chmod +x /etc/init.d/salt-$fname + done + + install_opensuse_stable_post || return 1 + + return 0 +} + +install_opensuse_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + systemctl stop salt-$fname > /dev/null 2>&1 + systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + fi + + service salt-$fname stop > /dev/null 2>&1 + service salt-$fname start + done +} + +install_opensuse_check_services() { + if [ ! -f /bin/systemctl ]; then + # Not running systemd!? Don't check! + return 0 + fi + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_systemd salt-$fname > /dev/null 2>&1 || __check_services_systemd salt-$fname.service > /dev/null 2>&1 || return 1 + done + + return 0 +} +# +# End of openSUSE Install Functions. +# +####################################################################################################################### + +####################################################################################################################### +# +# openSUSE Leap 15 +# + +install_opensuse_15_stable_deps() { + __opensuse_prep_install || return 1 + + # SUSE only packages Salt for Python 3 on Leap 15 + # Py3 is the default bootstrap install for Leap 15 + # However, git installs might specify "-x python2" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + PY_PKG_VER=2 + else + PY_PKG_VER=3 + fi + + # YAML module is used for generating custom master/minion configs + # requests is still used by many salt modules + __PACKAGES="python${PY_PKG_VER}-PyYAML python${PY_PKG_VER}-requests" + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __zypper_install ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_opensuse_15_git_deps() { + install_opensuse_15_stable_deps || return 1 + + if ! __check_command_exists git; then + __zypper_install git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + PY_PKG_VER=2 + else + PY_PKG_VER=3 + fi + + __PACKAGES="python${PY_PKG_VER}-xml" + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + # Py3 is the default bootstrap install for Leap 15 + # However, git installs might specify "-x python2" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + # This is required by some of the python2 packages below + __PACKAGES="${__PACKAGES} libpython2_7-1_0 python2-futures python-ipaddress" + fi + + __PACKAGES="${__PACKAGES} libzmq5 python${PY_PKG_VER}-Jinja2 python${PY_PKG_VER}-msgpack" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-pycrypto python${PY_PKG_VER}-pyzmq" + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado" + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-apache-libcloud" + fi + else + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + fi + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_opensuse_15_git() { + + # Py3 is the default bootstrap install for Leap 15 + if [ -n "$_PY_EXE" ]; then + _PYEXE=${_PY_EXE} + else + _PYEXE=python3 + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + ${_PYEXE} setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + return 0 +} + +# +# End of openSUSE Leap 15 +# +####################################################################################################################### + +####################################################################################################################### +# +# SUSE Enterprise 15 +# + +install_suse_15_stable_deps() { + __opensuse_prep_install || return 1 + install_opensuse_15_stable_deps || return 1 + + return 0 +} + +install_suse_15_git_deps() { + install_suse_15_stable_deps || return 1 + + if ! __check_command_exists git; then + __zypper_install git-core || return 1 + fi + + install_opensuse_15_git_deps || return 1 + + return 0 +} + +install_suse_15_stable() { + install_opensuse_stable || return 1 + return 0 +} + +install_suse_15_git() { + install_opensuse_15_git || return 1 + return 0 +} + +install_suse_15_stable_post() { + install_opensuse_stable_post || return 1 + return 0 +} + +install_suse_15_git_post() { + install_opensuse_git_post || return 1 + return 0 +} + +install_suse_15_restart_daemons() { + install_opensuse_restart_daemons || return 1 + return 0 +} + +# +# End of SUSE Enterprise 15 +# +####################################################################################################################### + +####################################################################################################################### +# +# SUSE Enterprise 12 +# + +install_suse_12_stable_deps() { + __opensuse_prep_install || return 1 + + # YAML module is used for generating custom master/minion configs + # requests is still used by many salt modules + # Salt needs python-zypp installed in order to use the zypper module + __PACKAGES="python-PyYAML python-requests python-zypp" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python-apache-libcloud" + fi + + # shellcheck disable=SC2086,SC2090 + __zypper_install ${__PACKAGES} || return 1 + + # SLES 11 SP3 ships with both python-M2Crypto-0.22.* and python-m2crypto-0.21 and we will be asked which + # we want to install, even with --non-interactive. + # Let's try to install the higher version first and then the lower one in case of failure + __zypper_install 'python-M2Crypto>=0.22' || __zypper_install 'python-M2Crypto>=0.21' || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __zypper_install ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_suse_12_git_deps() { + install_suse_12_stable_deps || return 1 + + if ! __check_command_exists git; then + __zypper_install git-core || return 1 + fi + + __git_clone_and_checkout || return 1 + + __PACKAGES="" + # shellcheck disable=SC2089 + __PACKAGES="${__PACKAGES} libzmq4 python-Jinja2 python-msgpack-python python-pycrypto" + __PACKAGES="${__PACKAGES} python-pyzmq python-xml" + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + __PACKAGES="${__PACKAGES} python-tornado" + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python-apache-libcloud" + fi + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_suse_12_stable() { + install_opensuse_stable || return 1 + return 0 +} + +install_suse_12_git() { + install_opensuse_git || return 1 + return 0 +} + +install_suse_12_stable_post() { + install_opensuse_stable_post || return 1 + return 0 +} + +install_suse_12_git_post() { + install_opensuse_git_post || return 1 + return 0 +} + +install_suse_12_restart_daemons() { + install_opensuse_restart_daemons || return 1 + return 0 +} + +# +# End of SUSE Enterprise 12 +# +####################################################################################################################### + +####################################################################################################################### +# +# SUSE Enterprise 11 +# + +install_suse_11_stable_deps() { + __opensuse_prep_install || return 1 + + # YAML module is used for generating custom master/minion configs + __PACKAGES="python-PyYAML" + + # shellcheck disable=SC2086,SC2090 + __zypper_install ${__PACKAGES} || return 1 + + # SLES 11 SP3 ships with both python-M2Crypto-0.22.* and python-m2crypto-0.21 and we will be asked which + # we want to install, even with --non-interactive. + # Let's try to install the higher version first and then the lower one in case of failure + __zypper_install 'python-M2Crypto>=0.22' || __zypper_install 'python-M2Crypto>=0.21' || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __zypper_install ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_suse_11_git_deps() { + install_suse_11_stable_deps || return 1 + + if ! __check_command_exists git; then + __zypper_install git || return 1 + fi + + __git_clone_and_checkout || return 1 + + __PACKAGES="" + # shellcheck disable=SC2089 + __PACKAGES="${__PACKAGES} libzmq4 python-Jinja2 python-msgpack-python python-pycrypto" + __PACKAGES="${__PACKAGES} python-pyzmq python-xml python-zypp" + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + __PACKAGES="${__PACKAGES} python-tornado" + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python-apache-libcloud" + fi + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_suse_11_stable() { + install_opensuse_stable || return 1 + return 0 +} + +install_suse_11_git() { + install_opensuse_git || return 1 + return 0 +} + +install_suse_11_stable_post() { + install_opensuse_stable_post || return 1 + return 0 +} + +install_suse_11_git_post() { + install_opensuse_git_post || return 1 + return 0 +} + +install_suse_11_restart_daemons() { + install_opensuse_restart_daemons || return 1 + return 0 +} + + +# +# End of SUSE Enterprise 11 +# +####################################################################################################################### + +####################################################################################################################### +# +# SUSE Enterprise General Functions +# + +# Used for both SLE 11 and 12 +install_suse_check_services() { + if [ ! -f /bin/systemctl ]; then + # Not running systemd!? Don't check! + return 0 + fi + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_systemd salt-$fname || return 1 + done + + return 0 +} + +# +# End of SUSE Enterprise General Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Gentoo Install Functions. +# +__autounmask() { + emerge --autounmask-write --autounmask-only "${@}"; return $? +} + +__emerge() { + if [ "$_GENTOO_USE_BINHOST" -eq $BS_TRUE ]; then + emerge --getbinpkg "${@}"; return $? + fi + emerge "${@}"; return $? +} + +__gentoo_config_protection() { + # usually it's a good thing to have config files protected by portage, but + # in this case this would require to interrupt the bootstrapping script at + # this point, manually merge the changes using etc-update/dispatch-conf/ + # cfg-update and then restart the bootstrapping script, so instead we allow + # at this point to modify certain config files directly + export CONFIG_PROTECT_MASK="${CONFIG_PROTECT_MASK:-} /etc/portage/package.accept_keywords /etc/portage/package.keywords /etc/portage/package.license /etc/portage/package.unmask /etc/portage/package.use" + + # emerge currently won't write to files that aren't there, so we need to ensure their presence + touch /etc/portage/package.accept_keywords /etc/portage/package.keywords /etc/portage/package.license /etc/portage/package.unmask /etc/portage/package.use +} + +__gentoo_pre_dep() { + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + if __check_command_exists eix; then + eix-sync + else + emerge --sync + fi + else + if __check_command_exists eix; then + eix-sync -q + else + emerge --sync --quiet + fi + fi + if [ ! -d /etc/portage ]; then + mkdir /etc/portage + fi +} + +__gentoo_post_dep() { + # ensures dev-lib/crypto++ compiles happily + __emerge --oneshot 'sys-devel/libtool' + # the -o option asks it to emerge the deps but not the package. + __gentoo_config_protection + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __autounmask 'dev-python/libcloud' + __emerge -v 'dev-python/libcloud' + fi + + __autounmask 'dev-python/requests' + __autounmask 'app-admin/salt' + + __emerge -vo 'dev-python/requests' + __emerge -vo 'app-admin/salt' + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __autounmask ${_EXTRA_PACKAGES} || return 1 + # shellcheck disable=SC2086 + __emerge -v ${_EXTRA_PACKAGES} || return 1 + fi +} + +install_gentoo_deps() { + __gentoo_pre_dep || return 1 + __gentoo_post_dep || return 1 +} + +install_gentoo_git_deps() { + __gentoo_pre_dep || return 1 + __gentoo_post_dep || return 1 +} + +install_gentoo_stable() { + __gentoo_config_protection + __emerge -v 'app-admin/salt' || return 1 +} + +install_gentoo_git() { + __gentoo_config_protection + __emerge -v '=app-admin/salt-9999' || return 1 +} + +install_gentoo_post() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -d "/run/systemd/system" ]; then + systemctl enable salt-$fname.service + systemctl start salt-$fname.service + else + rc-update add salt-$fname default + /etc/init.d/salt-$fname start + fi + done +} + +install_gentoo_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -d "/run/systemd/system" ]; then + systemctl stop salt-$fname > /dev/null 2>&1 + systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + else + /etc/init.d/salt-$fname stop > /dev/null 2>&1 + /etc/init.d/salt-$fname start + fi + done +} + +install_gentoo_check_services() { + if [ ! -d "/run/systemd/system" ]; then + # Not running systemd!? Don't check! + return 0 + fi + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_systemd salt-$fname || return 1 + done + + return 0 +} +# +# End of Gentoo Install Functions. +# +####################################################################################################################### + +####################################################################################################################### +# +# VoidLinux Install Functions +# +install_voidlinux_stable_deps() { + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + xbps-install -Suy || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + xbps-install -Suy "${_EXTRA_PACKAGES}" || return 1 + fi + + return 0 +} + +install_voidlinux_stable() { + xbps-install -Suy salt || return 1 + return 0 +} + +install_voidlinux_stable_post() { + for fname in master minion syndic; do + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + ln -s /etc/sv/salt-$fname /var/service/. + done +} + +install_voidlinux_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in master minion syndic; do + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + sv restart salt-$fname + done +} + +install_voidlinux_check_services() { + for fname in master minion syndic; do + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + [ -e /var/service/salt-$fname ] || return 1 + done + + return 0 +} + +daemons_running_voidlinux() { + [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 + + FAILED_DAEMONS=0 + for fname in master minion syndic; do + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ "$(sv status salt-$fname | grep run)" = "" ]; then + echoerror "salt-$fname was not found running" + FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) + fi + done + + return $FAILED_DAEMONS +} +# +# Ended VoidLinux Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# OS X / Darwin Install Functions +# + +__macosx_get_packagesite() { + DARWIN_ARCH="x86_64" + + __PY_VERSION_REPO="py2" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + PKG="salt-${STABLE_REV}-${__PY_VERSION_REPO}-${DARWIN_ARCH}.pkg" + SALTPKGCONFURL="https://repo.saltstack.com/osx/${PKG}" +} + +# Using a separate conf step to head for idempotent install... +__configure_macosx_pkg_details() { + __macosx_get_packagesite || return 1 + return 0 +} + +install_macosx_stable_deps() { + __configure_macosx_pkg_details || return 1 + return 0 +} + +install_macosx_git_deps() { + install_macosx_stable_deps || return 1 + + if ! echo "$PATH" | grep -q /usr/local/bin; then + echowarn "/usr/local/bin was not found in \$PATH. Adding it for the duration of the script execution." + export PATH=/usr/local/bin:$PATH + fi + + __fetch_url "/tmp/get-pip.py" "https://bootstrap.pypa.io/get-pip.py" || return 1 + + if [ -n "$_PY_EXE" ]; then + _PYEXE=${_PY_EXE} + else + _PYEXE=python2.7 + fi + + # Install PIP + $_PYEXE /tmp/get-pip.py || return 1 + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + return 0 + fi + + __PIP_REQUIREMENTS="dev_python27.txt" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PIP_REQUIREMENTS="dev_python34.txt" + fi + + requirements_file="${_SALT_GIT_CHECKOUT_DIR}/requirements/${__PIP_REQUIREMENTS}" + pip install -U -r "${requirements_file}" --install-option="--prefix=/opt/salt" || return 1 + + return 0 +} + +install_macosx_stable() { + install_macosx_stable_deps || return 1 + + /usr/bin/curl "${SALTPKGCONFURL}" > "/tmp/${PKG}" || return 1 + + /usr/sbin/installer -pkg "/tmp/${PKG}" -target / || return 1 + + return 0 +} + +install_macosx_git() { + + if [ -n "$_PY_EXE" ]; then + _PYEXE=${_PY_EXE} + else + _PYEXE=python2.7 + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + $_PYEXE setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/opt/salt || return 1 + else + $_PYEXE setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/opt/salt || return 1 + fi + + return 0 +} + +install_macosx_stable_post() { + if [ ! -f /etc/paths.d/salt ]; then + print "%s\n" "/opt/salt/bin" "/usr/local/sbin" > /etc/paths.d/salt + fi + + # Don'f fail because of unknown variable on the next step + set +o nounset + # shellcheck disable=SC1091 + . /etc/profile + # Revert nounset to it's previous state + set -o nounset + + return 0 +} + +install_macosx_git_post() { + install_macosx_stable_post || return 1 + return 0 +} + +install_macosx_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + /bin/launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 + /bin/launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 + + return 0 +} +# +# Ended OS X / Darwin Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Default minion configuration function. Matches ANY distribution as long as +# the -c options is passed. +# +config_salt() { + # If the configuration directory is not passed, return + [ "$_TEMP_CONFIG_DIR" = "null" ] && return + + if [ "$_CONFIG_ONLY" -eq $BS_TRUE ]; then + echowarn "Passing -C (config only) option implies -F (forced overwrite)." + + if [ "$_FORCE_OVERWRITE" -ne $BS_TRUE ]; then + echowarn "Overwriting configs in 11 seconds!" + sleep 11 + _FORCE_OVERWRITE=$BS_TRUE + fi + fi + + # Let's create the necessary directories + [ -d "$_SALT_ETC_DIR" ] || mkdir "$_SALT_ETC_DIR" || return 1 + [ -d "$_PKI_DIR" ] || (mkdir -p "$_PKI_DIR" && chmod 700 "$_PKI_DIR") || return 1 + + # If -C or -F was passed, we don't need a .bak file for the config we're updating + # This is used in the custom master/minion config file checks below + CREATE_BAK=$BS_TRUE + if [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then + CREATE_BAK=$BS_FALSE + fi + + CONFIGURED_ANYTHING=$BS_FALSE + + # Copy the grains file if found + if [ -f "$_TEMP_CONFIG_DIR/grains" ]; then + echodebug "Moving provided grains file from $_TEMP_CONFIG_DIR/grains to $_SALT_ETC_DIR/grains" + __movefile "$_TEMP_CONFIG_DIR/grains" "$_SALT_ETC_DIR/grains" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + if [ "$_INSTALL_MINION" -eq $BS_TRUE ] || \ + [ "$_CONFIG_ONLY" -eq $BS_TRUE ] || [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then + # Create the PKI directory + [ -d "$_PKI_DIR/minion" ] || (mkdir -p "$_PKI_DIR/minion" && chmod 700 "$_PKI_DIR/minion") || return 1 + + # Check to see if a custom minion config json dict was provided + if [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then + + # Check if a minion config file already exists and move to .bak if needed + if [ -f "$_SALT_ETC_DIR/minion" ] && [ "$CREATE_BAK" -eq "$BS_TRUE" ]; then + __movefile "$_SALT_ETC_DIR/minion" "$_SALT_ETC_DIR/minion.bak" $BS_TRUE || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + # Overwrite/create the config file with the yaml string + __overwriteconfig "$_SALT_ETC_DIR/minion" "$_CUSTOM_MINION_CONFIG" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + + # Copy the minions configuration if found + # Explicitly check for custom master config to avoid moving the minion config + elif [ -f "$_TEMP_CONFIG_DIR/minion" ] && [ "$_CUSTOM_MASTER_CONFIG" = "null" ]; then + __movefile "$_TEMP_CONFIG_DIR/minion" "$_SALT_ETC_DIR" "$_FORCE_OVERWRITE" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + # Copy the minion's keys if found + if [ -f "$_TEMP_CONFIG_DIR/minion.pem" ]; then + __movefile "$_TEMP_CONFIG_DIR/minion.pem" "$_PKI_DIR/minion/" "$_FORCE_OVERWRITE" || return 1 + chmod 400 "$_PKI_DIR/minion/minion.pem" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + if [ -f "$_TEMP_CONFIG_DIR/minion.pub" ]; then + __movefile "$_TEMP_CONFIG_DIR/minion.pub" "$_PKI_DIR/minion/" "$_FORCE_OVERWRITE" || return 1 + chmod 664 "$_PKI_DIR/minion/minion.pub" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + # For multi-master-pki, copy the master_sign public key if found + if [ -f "$_TEMP_CONFIG_DIR/master_sign.pub" ]; then + __movefile "$_TEMP_CONFIG_DIR/master_sign.pub" "$_PKI_DIR/minion/" || return 1 + chmod 664 "$_PKI_DIR/minion/master_sign.pub" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + fi + + # only (re)place master or syndic configs if -M (install master) or -S + # (install syndic) specified + OVERWRITE_MASTER_CONFIGS=$BS_FALSE + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ] && [ "$_CONFIG_ONLY" -eq $BS_TRUE ]; then + OVERWRITE_MASTER_CONFIGS=$BS_TRUE + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ] && [ "$_CONFIG_ONLY" -eq $BS_TRUE ]; then + OVERWRITE_MASTER_CONFIGS=$BS_TRUE + fi + + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ] || [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ] || [ "$OVERWRITE_MASTER_CONFIGS" -eq $BS_TRUE ] || [ "$_CUSTOM_MASTER_CONFIG" != "null" ]; then + # Create the PKI directory + [ -d "$_PKI_DIR/master" ] || (mkdir -p "$_PKI_DIR/master" && chmod 700 "$_PKI_DIR/master") || return 1 + + # Check to see if a custom master config json dict was provided + if [ "$_CUSTOM_MASTER_CONFIG" != "null" ]; then + + # Check if a master config file already exists and move to .bak if needed + if [ -f "$_SALT_ETC_DIR/master" ] && [ "$CREATE_BAK" -eq "$BS_TRUE" ]; then + __movefile "$_SALT_ETC_DIR/master" "$_SALT_ETC_DIR/master.bak" $BS_TRUE || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + # Overwrite/create the config file with the yaml string + __overwriteconfig "$_SALT_ETC_DIR/master" "$_CUSTOM_MASTER_CONFIG" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + + # Copy the masters configuration if found + elif [ -f "$_TEMP_CONFIG_DIR/master" ]; then + __movefile "$_TEMP_CONFIG_DIR/master" "$_SALT_ETC_DIR" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + # Copy the master's keys if found + if [ -f "$_TEMP_CONFIG_DIR/master.pem" ]; then + __movefile "$_TEMP_CONFIG_DIR/master.pem" "$_PKI_DIR/master/" || return 1 + chmod 400 "$_PKI_DIR/master/master.pem" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + if [ -f "$_TEMP_CONFIG_DIR/master.pub" ]; then + __movefile "$_TEMP_CONFIG_DIR/master.pub" "$_PKI_DIR/master/" || return 1 + chmod 664 "$_PKI_DIR/master/master.pub" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # Recursively copy salt-cloud configs with overwriting if necessary + for file in "$_TEMP_CONFIG_DIR"/cloud*; do + if [ -f "$file" ]; then + __copyfile "$file" "$_SALT_ETC_DIR" || return 1 + elif [ -d "$file" ]; then + subdir="$(basename "$file")" + mkdir -p "$_SALT_ETC_DIR/$subdir" + for file_d in "$_TEMP_CONFIG_DIR/$subdir"/*; do + if [ -f "$file_d" ]; then + __copyfile "$file_d" "$_SALT_ETC_DIR/$subdir" || return 1 + fi + done + fi + done + fi + + if [ "$_CONFIG_ONLY" -eq $BS_TRUE ] && [ $CONFIGURED_ANYTHING -eq $BS_FALSE ]; then + echowarn "No configuration or keys were copied over. No configuration was done!" + exit 0 + fi + + return 0 +} +# +# Ended Default Configuration function +# +####################################################################################################################### + +####################################################################################################################### +# +# Default salt master minion keys pre-seed function. Matches ANY distribution +# as long as the -k option is passed. +# +preseed_master() { + # Create the PKI directory + + if [ "$(find "$_TEMP_KEYS_DIR" -maxdepth 1 -type f | wc -l)" -lt 1 ]; then + echoerror "No minion keys were uploaded. Unable to pre-seed master" + return 1 + fi + + SEED_DEST="$_PKI_DIR/master/minions" + [ -d "$SEED_DEST" ] || (mkdir -p "$SEED_DEST" && chmod 700 "$SEED_DEST") || return 1 + + for keyfile in "$_TEMP_KEYS_DIR"/*; do + keyfile=$(basename "${keyfile}") + src_keyfile="${_TEMP_KEYS_DIR}/${keyfile}" + dst_keyfile="${SEED_DEST}/${keyfile}" + + # If it's not a file, skip to the next + [ ! -f "$src_keyfile" ] && continue + + __movefile "$src_keyfile" "$dst_keyfile" || return 1 + chmod 664 "$dst_keyfile" || return 1 + done + + return 0 +} +# +# Ended Default Salt Master Pre-Seed minion keys function +# +####################################################################################################################### + +####################################################################################################################### +# +# This function checks if all of the installed daemons are running or not. +# +daemons_running() { + [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 + + FAILED_DAEMONS=0 + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # shellcheck disable=SC2009 + if [ "${DISTRO_NAME}" = "SmartOS" ]; then + if [ "$(svcs -Ho STA salt-$fname)" != "ON" ]; then + echoerror "salt-$fname was not found running" + FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) + fi + elif [ "$(ps wwwaux | grep -v grep | grep salt-$fname)" = "" ]; then + echoerror "salt-$fname was not found running" + FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) + fi + done + + return $FAILED_DAEMONS +} +# +# Ended daemons running check function +# +####################################################################################################################### + +#====================================================================================================================== +# LET'S PROCEED WITH OUR INSTALLATION +#====================================================================================================================== + +# Let's get the dependencies install function +DEP_FUNC_NAMES="" +if [ ${_NO_DEPS} -eq $BS_FALSE ]; then + DEP_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_deps" + DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_deps" + DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_deps" + DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_deps" + DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_deps" + DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}_deps" +fi + +DEPS_INSTALL_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$DEP_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + DEPS_INSTALL_FUNC="$FUNC_NAME" + break + fi +done +echodebug "DEPS_INSTALL_FUNC=${DEPS_INSTALL_FUNC}" + +# Let's get the Salt config function +CONFIG_FUNC_NAMES="config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}_${ITYPE}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_salt" + +CONFIG_SALT_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$CONFIG_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + CONFIG_SALT_FUNC="$FUNC_NAME" + break + fi +done +echodebug "CONFIG_SALT_FUNC=${CONFIG_SALT_FUNC}" + +# Let's get the pre-seed master function +PRESEED_FUNC_NAMES="preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}_${ITYPE}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_master" + +PRESEED_MASTER_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$PRESEED_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + PRESEED_MASTER_FUNC="$FUNC_NAME" + break + fi +done +echodebug "PRESEED_MASTER_FUNC=${PRESEED_MASTER_FUNC}" + +# Let's get the install function +INSTALL_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}" +INSTALL_FUNC_NAMES="$INSTALL_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}" +INSTALL_FUNC_NAMES="$INSTALL_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}" + +INSTALL_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$INSTALL_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + INSTALL_FUNC="$FUNC_NAME" + break + fi +done +echodebug "INSTALL_FUNC=${INSTALL_FUNC}" + +# Let's get the post install function +POST_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}_post" + +POST_INSTALL_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$POST_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + POST_INSTALL_FUNC="$FUNC_NAME" + break + fi +done +echodebug "POST_INSTALL_FUNC=${POST_INSTALL_FUNC}" + +# Let's get the start daemons install function +STARTDAEMONS_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}_restart_daemons" + +STARTDAEMONS_INSTALL_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$STARTDAEMONS_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + STARTDAEMONS_INSTALL_FUNC="$FUNC_NAME" + break + fi +done +echodebug "STARTDAEMONS_INSTALL_FUNC=${STARTDAEMONS_INSTALL_FUNC}" + +# Let's get the daemons running check function. +DAEMONS_RUNNING_FUNC_NAMES="daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}_${ITYPE}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running" + +DAEMONS_RUNNING_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$DAEMONS_RUNNING_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + DAEMONS_RUNNING_FUNC="$FUNC_NAME" + break + fi +done +echodebug "DAEMONS_RUNNING_FUNC=${DAEMONS_RUNNING_FUNC}" + +# Let's get the check services function +if [ ${_DISABLE_SALT_CHECKS} -eq $BS_FALSE ]; then + CHECK_SERVICES_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}_check_services" +else + CHECK_SERVICES_FUNC_NAMES="" +fi + +CHECK_SERVICES_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$CHECK_SERVICES_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + CHECK_SERVICES_FUNC="$FUNC_NAME" + break + fi +done +echodebug "CHECK_SERVICES_FUNC=${CHECK_SERVICES_FUNC}" + +if [ ${_NO_DEPS} -eq $BS_FALSE ] && [ "$DEPS_INSTALL_FUNC" = "null" ]; then + echoerror "No dependencies installation function found. Exiting..." + exit 1 +fi + +if [ "$INSTALL_FUNC" = "null" ]; then + echoerror "No installation function found. Exiting..." + exit 1 +fi + + +# Install dependencies +if [ ${_NO_DEPS} -eq $BS_FALSE ] && [ $_CONFIG_ONLY -eq $BS_FALSE ]; then + # Only execute function is not in config mode only + echoinfo "Running ${DEPS_INSTALL_FUNC}()" + if ! ${DEPS_INSTALL_FUNC}; then + echoerror "Failed to run ${DEPS_INSTALL_FUNC}()!!!" + exit 1 + fi +fi + + +if [ "${ITYPE}" = "git" ] && [ ${_NO_DEPS} -eq ${BS_TRUE} ]; then + if ! __git_clone_and_checkout; then + echo "Failed to clone and checkout git repository." + exit 1 + fi +fi + + +# Triggering config_salt() if overwriting master or minion configs +if [ "$_CUSTOM_MASTER_CONFIG" != "null" ] || [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="$_SALT_ETC_DIR" + fi + + if [ ${_NO_DEPS} -eq $BS_FALSE ] && [ $_CONFIG_ONLY -eq $BS_TRUE ]; then + # Execute function to satisfy dependencies for configuration step + echoinfo "Running ${DEPS_INSTALL_FUNC}()" + if ! ${DEPS_INSTALL_FUNC}; then + echoerror "Failed to run ${DEPS_INSTALL_FUNC}()!!!" + exit 1 + fi + fi +fi + +# Configure Salt +if [ "$CONFIG_SALT_FUNC" != "null" ] && [ "$_TEMP_CONFIG_DIR" != "null" ]; then + echoinfo "Running ${CONFIG_SALT_FUNC}()" + if ! ${CONFIG_SALT_FUNC}; then + echoerror "Failed to run ${CONFIG_SALT_FUNC}()!!!" + exit 1 + fi +fi + +# Drop the master address if passed +if [ "$_SALT_MASTER_ADDRESS" != "null" ]; then + [ ! -d "$_SALT_ETC_DIR/minion.d" ] && mkdir -p "$_SALT_ETC_DIR/minion.d" + cat <<_eof > "$_SALT_ETC_DIR/minion.d/99-master-address.conf" +master: $_SALT_MASTER_ADDRESS +_eof +fi + +# Drop the minion id if passed +if [ "$_SALT_MINION_ID" != "null" ]; then + [ ! -d "$_SALT_ETC_DIR" ] && mkdir -p "$_SALT_ETC_DIR" + echo "$_SALT_MINION_ID" > "$_SALT_ETC_DIR/minion_id" +fi + +# Pre-seed master keys +if [ "$PRESEED_MASTER_FUNC" != "null" ] && [ "$_TEMP_KEYS_DIR" != "null" ]; then + echoinfo "Running ${PRESEED_MASTER_FUNC}()" + if ! ${PRESEED_MASTER_FUNC}; then + echoerror "Failed to run ${PRESEED_MASTER_FUNC}()!!!" + exit 1 + fi +fi + +# Install Salt +if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + # Only execute function is not in config mode only + echoinfo "Running ${INSTALL_FUNC}()" + if ! ${INSTALL_FUNC}; then + echoerror "Failed to run ${INSTALL_FUNC}()!!!" + exit 1 + fi +fi + +# Run any post install function. Only execute function if not in config mode only +if [ "$POST_INSTALL_FUNC" != "null" ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Running ${POST_INSTALL_FUNC}()" + if ! ${POST_INSTALL_FUNC}; then + echoerror "Failed to run ${POST_INSTALL_FUNC}()!!!" + exit 1 + fi +fi + +# Run any check services function, Only execute function if not in config mode only +if [ "$CHECK_SERVICES_FUNC" != "null" ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Running ${CHECK_SERVICES_FUNC}()" + if ! ${CHECK_SERVICES_FUNC}; then + echoerror "Failed to run ${CHECK_SERVICES_FUNC}()!!!" + exit 1 + fi +fi + +# Run any start daemons function +if [ "$STARTDAEMONS_INSTALL_FUNC" != "null" ] && [ ${_START_DAEMONS} -eq $BS_TRUE ]; then + echoinfo "Running ${STARTDAEMONS_INSTALL_FUNC}()" + echodebug "Waiting ${_SLEEP} seconds for processes to settle before checking for them" + sleep ${_SLEEP} + if ! ${STARTDAEMONS_INSTALL_FUNC}; then + echoerror "Failed to run ${STARTDAEMONS_INSTALL_FUNC}()!!!" + exit 1 + fi +fi + +# Check if the installed daemons are running or not +if [ "$DAEMONS_RUNNING_FUNC" != "null" ] && [ ${_START_DAEMONS} -eq $BS_TRUE ]; then + echoinfo "Running ${DAEMONS_RUNNING_FUNC}()" + echodebug "Waiting ${_SLEEP} seconds for processes to settle before checking for them" + sleep ${_SLEEP} # Sleep a little bit to let daemons start + if ! ${DAEMONS_RUNNING_FUNC}; then + echoerror "Failed to run ${DAEMONS_RUNNING_FUNC}()!!!" + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ "$_ECHO_DEBUG" -eq $BS_FALSE ]; then + echoerror "salt-$fname was not found running. Pass '-D' to ${__ScriptName} when bootstrapping for additional debugging information..." + continue + fi + + [ ! -f "$_SALT_ETC_DIR/$fname" ] && [ $fname != "syndic" ] && echodebug "$_SALT_ETC_DIR/$fname does not exist" + + echodebug "Running salt-$fname by hand outputs: $(nohup salt-$fname -l debug)" + + [ ! -f /var/log/salt/$fname ] && echodebug "/var/log/salt/$fname does not exist. Can't cat its contents!" && continue + + echodebug "DAEMON LOGS for $fname:" + echodebug "$(cat /var/log/salt/$fname)" + echo + done + + echodebug "Running Processes:" + echodebug "$(ps auxwww)" + + exit 1 + fi +fi + +# Done! +if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Salt installed!" +else + echoinfo "Salt configured!" +fi + +exit 0 + +# vim: set sts=4 ts=4 et diff --git a/salt/soc/files/kratos/kratos.yaml b/salt/soc/files/kratos/kratos.yaml index 2171971bc..2e8a408fd 100644 --- a/salt/soc/files/kratos/kratos.yaml +++ b/salt/soc/files/kratos/kratos.yaml @@ -1,4 +1,4 @@ -{%- set WEBACCESS = salt['pillar.get']('manager:url_base', '') -%} +{%- set WEBACCESS = salt['pillar.get']('global:url_base', '') -%} {%- set KRATOSKEY = salt['pillar.get']('kratos:kratoskey', '') -%} selfservice: diff --git a/salt/soc/files/soc/changes.json b/salt/soc/files/soc/changes.json index 4f359a996..5b0204ae0 100644 --- a/salt/soc/files/soc/changes.json +++ b/salt/soc/files/soc/changes.json @@ -1,43 +1,21 @@ { - "title": "Security Onion 2.0.3 RC1 is here!", + "title": "Security Onion 2.1.0 RC2 is here!", "changes": [ - { "summary": "Resolved an issue with large drives and the ISO install." }, - { "summary": "Modified ISO installation to use Logical Volume Management (LVM) for disk partitioning." }, - { "summary": "Updated Elastic Stack components to version 7.8.1." }, - { "summary": "Updated Zeek to version 3.0.8." }, - { "summary": "Fixed standalone pcap interval issue." }, - { "summary": "Security Fix 1067: variables.txt from ISO install stays on disk for 10 days." }, - { "summary": "Security Fix 1068: Remove user values from static.sls." }, - { "summary": "Issue 1059: Fix distributed deployment sensor interval issue allowing PCAP." }, - { "summary": "Issue 1058: Support for passwords that start with special characters." }, - { "summary": "Minor soup updates." }, - { "summary": "Re-branded 2.0 to give it a fresh look." }, - { "summary": "All documentation has moved to https://docs.securityonion.net/en/2.0" }, - { "summary": "soup is alive! Note: This tool only updates Security Onion components. Please use the built-in OS update process to keep the OS and other components up to date." }, - { "summary": "so-import-pcap is back! See the docs here: http://docs.securityonion.net/en/2.0/so-import-pcap." }, - { "summary": "Fixed issue with so-features-enable." }, - { "summary": "Users can now pivot to PCAP from Suricata alerts." }, - { "summary": "ISO install now prompts users to create an admin/sudo user instead of using a default account name." }, - { "summary": "The web email & password set during setup is now used to create the initial accounts for TheHive, Cortex, and Fleet." }, - { "summary": "Fixed issue with disk cleanup." }, - { "summary": "Changed the default permissions for /opt/so to keep non-priviledged users from accessing salt and related files." }, - { "summary": "Locked down access to certain SSL keys." }, - { "summary": "Suricata logs now compress after they roll over." }, - { "summary": "Users can now easily customize shard counts per index." }, - { "summary": "Improved Elastic ingest parsers including Windows event logs and Sysmon logs shipped with WinLogbeat and Osquery (ECS)." }, - { "summary": "Elastic nodes are now HOT by default, making it easier to add a warm node later." }, - { "summary": "so-allow now runs at the end of an install so users can enable access right away." }, - { "summary": "Alert severities across Wazuh, Suricata and Playbook (Sigma) have been standardized and copied to event.severity:
  • 1 = Low
  • 2 = Medium
  • 3 = High
  • 4 = Critical
" }, - { "summary": "Initial implementation of alerting queues:
  • Low & Medium alerts are accessible through Kibana & Hunt.
  • High & Critical alerts are accessible through Kibana, Hunt and TheHive for immediate analysis.
" }, - { "summary": "ATT&CK Navigator is now a statically-hosted site in the nginx container." }, - { "summary": "Playbook updates:
  • All Sigma rules in the community repo (500+) are now imported and kept up to date.
  • Initial implementation of automated testing when a Play's detection logic has been edited (i.e., Unit Testing).
  • Updated UI Theme.
  • Once authenticated through SOC, users can now access Playbook with analyst permissions without login.
" }, - { "summary": "Kolide Launcher has been updated to include the ability to pass arbitrary flags. This new functionality was sponsored by SOS." }, - { "summary": "Fixed issue with Wazuh authd registration service port not being correctly exposed." }, - { "summary": "Added option for exposure of Elasticsearch REST API (port 9200) to so-allow for easier external querying/integration with other tools." }, - { "summary": "Added option to so-allow for external Strelka file uploads (e.g., via strelka-fileshot)." }, - { "summary": "Added default YARA rules for Strelka. Default rules are maintained by Florian Roth and pulled from https://github.com/Neo23x0/signature-base." }, - { "summary": "Added the ability to use custom Zeek scripts." }, - { "summary": "Renamed master server to manager node." }, - { "summary": "Improved unification of Zeek and Strelka file data." } + { "summary": "Known Issues
  • Once you update your grid to RC2, any new nodes that join the grid must be RC2 so if you try to join a new RC1 node it will fail. For best results, use the latest RC2 ISO (or RC2 installer from github) when joining to an RC2 grid.
  • Shipping Windows Eventlogs with Osquery will fail intermittently with utf8 errors logged in the Application log. This is scheduled to be fixed in Osquery 4.5.
  • When running soup to upgrade from RC1 to RC2, there is a Salt error that occurs during the final highstate. This error is related to the patch_os_schedule and can be ignored as it will not occur again in subsequent highstates.
  • When Search Nodes are upgraded from RC1 to RC2, there is a chance of a race condition where certificates are missing. This will show errors in the manager log to the remote node. To fix this run the following on the search node that is having the issue:
    1. Stop elasticsearch - sudo so-elasticsearch-stop
    2. Run the SSL state - sudo salt-call state.apply ssl
    3. Restart elasticsearch - sudo so-elasticsearch-restart
" }, + { "summary": "Fixed an issue where the console was timing out and making it appear that the installer was hung." }, + { "summary": "Introduced Import node, which is ideal for running so-import-pcap to import pcap files and view the resulting logs in Hunt or Kibana." }, + { "summary": "Suricata stats.log now rotates once a day. If you have a bunch of suriloss defunct processes on nodes that have it, do the following:
  • Stop suricata - sudo so-suricata-stop
  • Remove the current stats.log - sudo rm /opt/so/log/suricata/stats.log
  • Reboot the machine - shutdown -r now
" }, + { "summary": "Moved static.sls to global.sls to align the name with the functionality." }, + { "summary": "Traffic between nodes in a distributed deployment is now fully encrypted." }, + { "summary": "Playbook
  • Elastalert now runs active Plays every 3 minutes
  • Changed default rule-update config to only import Windows rules from the Sigma Community repo
  • Lots of bug fixes & stability improvements
" }, + { "summary": "Ingest Node parsing updates for Osquery and Winlogbeat - implemented single pipeline for Windows eventlogs & sysmon logs" }, + { "summary": "Upgraded Osquery to 4.4 and re-enabled auto-updates." }, + { "summary": "Upgraded to Salt 3001.1" }, + { "summary": "Upgraded Wazuh to 3.13.1" }, + { "summary": "Hunt interface now shows the timezone being used for the selected date range." }, + { "summary": "Fixed Cortex initialization so that TheHive integration and initial user set is correctly configured." }, + { "summary": "Improved management of TheHive/Cortex credentials." }, + { "summary": "SOC now allows for arbitrary, time-bounded PCAP job creation, with optional filtering by host and port." }, + { "summary": "Historical release notes can be found on our docs website: https://docs.securityonion.net/en/2.1/release-notes.html" } ] -} +} \ No newline at end of file diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json index 31e49fc86..b44733cb1 100644 --- a/salt/soc/files/soc/soc.json +++ b/salt/soc/files/soc/soc.json @@ -1,5 +1,6 @@ -{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%} -{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%} +{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') -%} +{%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') -%} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} { "logFilename": "/opt/sensoroni/logs/sensoroni-server.log", "server": { @@ -33,53 +34,47 @@ "mostRecentlyUsedLimit": 5, "eventFields": { "default": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "network.community_id", "event.dataset" ], - "bro_conn": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "protocol", "service", "log.id.uid" ], - "bro_dce_rpc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "endpoint", "named_pipe", "operation", "log.id.uid" ], - "bro_dhcp": ["soc_timestamp", "source.ip", "destination.ip", "domain_name", "hostname", "message_types", "log.id.uid" ], - "bro_dnp3": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "fc_reply", "log.id.uid" ], - "bro_dns": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "protocol", "query", "query_type_name", "rcode_name", "log.id.uid" ], - "bro_dpd": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid" ], - "bro_files": ["soc_timestamp", "source.ip", "destination.ip", "log.id.flog.id.uid", "mimetype", "source", "log.id.uid" ], - "bro_ftp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ftp_argument", "ftp_command", "reply_code", "log.id.uid", "username" ], - "bro_http": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "method", "virtual_host", "status_code", "status_message", "log.id.uid" ], - "bro_intel": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "indicator", "indicator_type", "seen_where", "log.id.uid" ], - "bro_irc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "irc_command", "log.id.uid", "value" ], - "bro_kerberos": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "client", "service", "request_type", "log.id.uid" ], - "bro_modbus": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "function", "log.id.uid" ], - "bro_mysql": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "mysql_argument", "mysql_command", "mysql_success", "response", "log.id.uid" ], - "bro_notice": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "note", "msg", "log.id.uid" ], - "bro_ntlm": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "hostname", "ntlm_success", "server_dns_computer_name", "server_nb_computer_name", "server_tree_name", "log.id.uid" ], - "bro_pe": ["soc_timestamp", "is_64bit", "is_exe", "machine", "os", "subsystem", "log.id.flog.id.uid" ], - "bro_radius": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "username", "framed_addr", "reply_msg", "result" ], - "bro_rdp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "client_build", "client_name", "cookie", "encryption_level", "encryption_method", "keyboard_layout", "result", "security_protocol", "log.id.uid" ], - "bro_rfb": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "authentication_method", "auth", "share_flag", "desktop_name", "log.id.uid" ], - "bro_signatures" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "note", "signature_id", "event_message", "sub_message", "signature_count", "host_count", "log.id.uid" ], - "bro_sip": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "method", "uri", "request_from", "request_to", "response_from", "response_to", "call_id", "subject", "user_agent", "status_code", "log.id.uid" ], - "bro_smb_files" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.flog.id.uid", "action", "path", "name", "size", "prev_name", "log.id.uid" ], - "bro_smb_mapping" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "path", "service", "share_type", "log.id.uid" ], - "bro_smtp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "helo", "mail_from", "recipient_to", "from", "to", "cc", "reply_to", "subject", "useragent", "log.id.uid" ], - "bro_snmp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "community", "version", "log.id.uid" ], - "bro_socks": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid" ], - "bro_software": ["soc_timestamp", "source.ip", "name", "software_type" ], - "bro_ssh": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "version", "hassh", "direction", "client", "server", "log.id.uid" ], - "bro_ssl": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "cipher", "curve", "server_name", "log.id.uid", "validation_status", "version" ], - "bro_syslog": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "facility", "protocol", "severity", "syslog-priority", "log.id.uid" ], - "bro_tunnels": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "tunnel_type", "action", "log.id.uid" ], - "bro_weird": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "name", "log.id.uid" ], - "bro_x509": ["soc_timestamp", "certificate_common_name", "certificate_country_code", "certificate_key_length", "issuer_organization", "log.id.id" ], - "cron" : ["soc_timestamp", "message" ], - "anacron": ["soc_timestamp", "message" ], - "bluetoothd": ["soc_timestamp", "message" ], - "firewall": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "protocol", "direction", "interface", "action", "reason" ], - "ntpd" : ["soc_timestamp", "message" ], - "ossec": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "alert_level", "classification", "description", "username", "escalated_user", "location", "process" ], - "pulseaudio": ["soc_timestamp", "message" ], - "snort": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "sid", "alert", "category", "classification", "severity" ], - "su" : ["soc_timestamp", "message" ], - "sudo" : ["soc_timestamp", "message" ], - "systemd": ["soc_timestamp", "message" ], - "sysmon": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "host.name", "event.dataset", "parent_image_path", "source_name", "task", "user.name" ], - "wineventlog": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "host.name", "event.code", "event.dataset", "source_name", "task" ] + "::conn": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "network.protocol", "log.id.uid" ], + "::dce_rpc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "dce_rpc.endpoint", "dce_rpc.named_pipe", "dce_rpc.operation", "log.id.uid" ], + "::dhcp": ["soc_timestamp", "source.ip", "destination.ip", "host.domain", "host.hostname", "dhcp.message_types", "log.id.uid" ], + "::dnp3": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "dnp3.fc_reply", "log.id.uid" ], + "::dns": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "dns.query.name", "dns.query.type_name", "dns.response.code_name", "log.id.uid" ], + "::dpd": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.protocol", "observer.analyser", "error.reason", "log.id.uid" ], + "::files": ["soc_timestamp", "source.ip", "destination.ip", "file.name", "file.mime_type", "file.source", "file.bytes.total", "log.id.fuid", "log.id.uid" ], + "::ftp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ftp.user", "ftp.command", "ftp.argument", "ftp.reply_code", "file.size", "log.id.uid" ], + "::http": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "http.method", "http.virtual_host", "http.status_code", "http.status_message", "http.request.body.length", "http.response.body.length", "log.id.uid" ], + "::intel": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "intel.indicator", "intel.indicator_type", "intel.seen_where", "log.id.uid" ], + "::irc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "irc.username", "irc.nickname", "irc.command.type", "irc.command.value", "irc.command.info", "log.id.uid" ], + "::kerberos": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "kerberos.client", "kerberos.service", "kerberos.request_type", "log.id.uid" ], + "::modbus": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "modbus.function", "log.id.uid" ], + "::mysql": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "mysql.command", "mysql.argument", "mysql.success", "mysql.response", "log.id.uid" ], + "::notice": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "notice.note", "notice.message", "log.id.fuid", "log.id.uid" ], + "::ntlm": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ntlm.name", "ntlm.success", "ntlm.server.dns.name", "ntlm.server.nb.name", "ntlm.server.tree.name", "log.id.uid" ], + "::pe": ["soc_timestamp", "file.is_64bit", "file.is_exe", "file.machine", "file.os", "file.subsystem", "log.id.fuid" ], + "::radius": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "username", "radius.framed_address", "radius.reply_message", "radius.result" ], + "::rdp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rdp.client_build", "client_name", "rdp.cookie", "rdp.encryption_level", "rdp.encryption_method", "rdp.keyboard_layout", "rdp.result", "rdp.security_protocol", "log.id.uid" ], + "::rfb": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rfb.authentication.method", "rfb.authentication.success", "rfb.share_flag", "rfb.desktop.name", "log.id.uid" ], + "::signatures" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "note", "signature_id", "event_message", "sub_message", "signature_count", "host.count", "log.id.uid" ], + "::sip": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "sip.method", "sip.uri", "sip.request.from", "sip.request.to", "sip.response.from", "sip.response.to", "sip.call_id", "sip.subject", "sip.user_agent", "sip.status_code", "log.id.uid" ], + "::smb_files" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.fuid", "file.action", "file.path", "file.name", "file.size", "file.prev_name", "log.id.uid" ], + "::smb_mapping" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "smb.path", "smb.service", "smb.share_type", "log.id.uid" ], + "::smtp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "smtp.helo", "smtp.mail_from", "smtp.recipient_to", "smtp.from", "smtp.to", "smtp.cc", "smtp.reply_to", "smtp.subject", "smtp.useragent", "log.id.uid" ], + "::snmp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "snmp.community", "snmp.version", "log.id.uid" ], + "::socks": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "socks.name", "socks.request.host", "socks.request.port", "socks.status", "log.id.uid" ], + "::software": ["soc_timestamp", "source.ip", "software.name", "software.type" ], + "::ssh": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssh.version", "ssh.hassh_version", "ssh.direction", "ssh.client", "ssh.server", "log.id.uid" ], + "::ssl": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssl.cipher", "ssl.curve", "ssl.certificate.subject", "ssl.validation_status", "ssl.version", "log.id.uid" ], + "::syslog": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "syslog.facility", "network.protocol", "syslog.severity", "log.id.uid" ], + "::tunnels": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "tunnel_type", "action", "log.id.uid" ], + "::weird": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "weird.name", "log.id.uid" ], + "::x509": ["soc_timestamp", "x509.certificate.subject", "x509.certificate.key.type", "x509.certificate.key.length", "x509.certificate.issuer", "log.id.id" ], + ":firewall:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "direction", "interface", "action", "reason" ], + ":osquery:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "source.hostname", "event.dataset", "process.executable", "user.name" ], + ":ossec:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rule.name", "rule.level", "rule.category", "process.name", "user.name", "user.escalated", "location", "process.name" ], + ":strelka:file": ["soc_timestamp", "scan.exiftool.OriginalFileName", "file.size", "hash.md5", "scan.exiftool.CompanyName", "scan.exiftool.Description", "scan.exiftool.Directory", "scan.exiftool.FileType", "scan.exiftool.FileOS", "log.id.fuid" ], + ":suricata:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rule.gid", "rule.name", "rule.category", "rule.rev", "event.severity", "event.severity_label" ], + ":sysmon:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "source.hostname", "event.dataset", "process.executable", "user.name" ], + ":windows_eventlog:": ["soc_timestamp", "user.name" ] }, "queries": [ { "name": "Default Query", "description": "Show all events grouped by the origin host", "query": "* | groupby observer.name"}, diff --git a/salt/soc/init.sls b/salt/soc/init.sls index e3fdf538a..1c25f42a1 100644 --- a/salt/soc/init.sls +++ b/salt/soc/init.sls @@ -1,5 +1,5 @@ -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} socdir: diff --git a/salt/soctopus/files/SOCtopus.conf b/salt/soctopus/files/SOCtopus.conf index 477113376..39e9c276d 100644 --- a/salt/soctopus/files/SOCtopus.conf +++ b/salt/soctopus/files/SOCtopus.conf @@ -1,6 +1,6 @@ -{%- set MANAGER = salt['pillar.get']('manager:url_base', '') %} -{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %} -{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %} +{%- set MANAGER = salt['pillar.get']('global:url_base', '') %} +{%- set HIVEKEY = salt['pillar.get']('global:hivekey', '') %} +{%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %} [es] es_url = http://{{MANAGER}}:9200 diff --git a/salt/soctopus/files/templates/es-generic.template b/salt/soctopus/files/templates/es-generic.template index b56050741..8183a5af4 100644 --- a/salt/soctopus/files/templates/es-generic.template +++ b/salt/soctopus/files/templates/es-generic.template @@ -1,4 +1,4 @@ -{% set ES = salt['pillar.get']('static:managerip', '') %} +{% set ES = salt['pillar.get']('global:managerip', '') %} alert: modules.so.playbook-es.PlaybookESAlerter elasticsearch_host: "{{ ES }}:9200" diff --git a/salt/soctopus/files/templates/generic.template b/salt/soctopus/files/templates/generic.template index 7bb5a969d..2dd2c96c7 100644 --- a/salt/soctopus/files/templates/generic.template +++ b/salt/soctopus/files/templates/generic.template @@ -1,7 +1,9 @@ -{% set es = salt['pillar.get']('static:managerip', '') %} -{% set hivehost = salt['pillar.get']('static:managerip', '') %} -{% set hivekey = salt['pillar.get']('static:hivekey', '') %} -alert: hivealerter +{% set es = salt['pillar.get']('global:url_base', '') %} +{% set hivehost = salt['pillar.get']('global:managerip', '') %} +{% set hivekey = salt['pillar.get']('global:hivekey', '') %} +alert: +- "modules.so.playbook-es.PlaybookESAlerter" +- "hivealerter" hive_connection: hive_host: http://{{hivehost}} @@ -13,7 +15,7 @@ hive_proxies: https: '' hive_alert_config: - title: '{rule[name]} - ' + title: "{rule[name]} - " type: 'playbook' source: 'SecurityOnion' description: "`Play:` https://{{es}}/playbook/issues/6000 \n\n `View Event:` \n\n `Raw Data:` {match[message]}" @@ -24,7 +26,6 @@ hive_alert_config: follow: True caseTemplate: '5000' -alert: modules.so.playbook-es.PlaybookESAlerter elasticsearch_host: "{{ es }}:9200" play_title: "" event.module: "playbook" diff --git a/salt/soctopus/files/templates/osquery.template b/salt/soctopus/files/templates/osquery.template index 4fff9a1d5..9c770fc6f 100644 --- a/salt/soctopus/files/templates/osquery.template +++ b/salt/soctopus/files/templates/osquery.template @@ -1,7 +1,9 @@ -{% set es = salt['pillar.get']('static:managerip', '') %} -{% set hivehost = salt['pillar.get']('static:managerip', '') %} -{% set hivekey = salt['pillar.get']('static:hivekey', '') %} -alert: hivealerter +{% set es = salt['pillar.get']('global:url_base', '') %} +{% set hivehost = salt['pillar.get']('global:managerip', '') %} +{% set hivekey = salt['pillar.get']('global:hivekey', '') %} +alert: +- "modules.so.playbook-es.PlaybookESAlerter" +- "hivealerter" hive_connection: hive_host: http://{{hivehost}} @@ -19,7 +21,7 @@ hive_observable_data_mapping: - other: '{match[osquery][hostname]}' hive_alert_config: - title: '{rule[name]} -- {match[osquery][hostname]} -- {match[osquery][name]}' + title: "{rule[name]} -- {match[osquery][hostname]} -- {match[osquery][name]}" type: 'osquery' source: 'SecurityOnion' description: "`Play:` https://{{es}}/playbook/issues/6000 \n\n `View Event:` \n\n `Hostname:` __{match[osquery][hostname]}__ `Live Query:`__[Pivot Link](https://{{es}}/fleet/queries/new?host_uuids={match[osquery][LiveQuery]})__ `Pack:` __{match[osquery][name]}__ `Data:` {match[osquery][columns]}" @@ -31,7 +33,6 @@ hive_alert_config: caseTemplate: '5000' -alert: modules.so.playbook-es.PlaybookESAlerter elasticsearch_host: "{{ es }}:9200" play_title: "" event.module: "playbook" diff --git a/salt/soctopus/init.sls b/salt/soctopus/init.sls index 3fcdf8717..39768fc42 100644 --- a/salt/soctopus/init.sls +++ b/salt/soctopus/init.sls @@ -1,8 +1,8 @@ -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} -{%- set MANAGER_URL = salt['pillar.get']('manager:url_base', '') %} -{%- set MANAGER_IP = salt['pillar.get']('static:managerip', '') %} +{%- set MANAGER_URL = salt['pillar.get']('global:url_base', '') %} +{%- set MANAGER_IP = salt['pillar.get']('global:managerip', '') %} soctopusdir: file.directory: diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index efa3032dc..70d4c4b6a 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -1,13 +1,13 @@ {% set manager = salt['grains.get']('master') %} -{% set managerip = salt['pillar.get']('static:managerip', '') %} +{% set managerip = salt['pillar.get']('global:managerip', '') %} {% set HOSTNAME = salt['grains.get']('host') %} {% set global_ca_text = [] %} {% set global_ca_server = [] %} {% set MAININT = salt['pillar.get']('host:mainint') %} {% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %} -{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %} +{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %} -{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone'] %} +{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone', 'import'] %} {% set trusttheca_text = salt['mine.get'](grains.id, 'x509.get_pem_entries')[grains.id]['/etc/pki/ca.crt']|replace('\n', '') %} {% set ca_server = grains.id %} {% else %} @@ -37,6 +37,34 @@ m2cryptopkgs: - python-m2crypto {% endif %} +removefbcertdir: + file.absent: + - name: /etc/pki/filebeat.crt + - onlyif: "[ -d /etc/pki/filebeat.crt ]" + +removefbp8dir: + file.absent: + - name: /etc/pki/filebeat.p8 + - onlyif: "[ -d /etc/pki/filebeat.p8 ]" + +removeesp12dir: + file.absent: + - name: /etc/pki/elasticsearch.p12 + - onlyif: "[ -d /etc/pki/elasticsearch.p12 ]" + +/etc/pki/influxdb.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/influxdb.key') -%} + - prereq: + - x509: /etc/pki/influxdb.crt + {%- endif %} + # Create a cert for the talking to influxdb /etc/pki/influxdb.crt: x509.certificate_managed: @@ -47,10 +75,10 @@ m2cryptopkgs: - days_remaining: 0 - days_valid: 820 - backup: True - - managed_private_key: - name: /etc/pki/influxdb.key - bits: 4096 - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/influxdb.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' influxkeyperms: file.managed: @@ -59,7 +87,56 @@ influxkeyperms: - mode: 640 - group: 939 -{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone'] %} +{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode'] %} +# Create a cert for Redis encryption +/etc/pki/redis.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/redis.key') -%} + - prereq: + - x509: /etc/pki/redis.crt + {%- endif %} + +/etc/pki/redis.crt: + x509.certificate_managed: + - ca_server: {{ ca_server }} + - signing_policy: registry + - public_key: /etc/pki/redis.key + - CN: {{ manager }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/redis.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' + +rediskeyperms: + file.managed: + - replace: False + - name: /etc/pki/redis.key + - mode: 640 + - group: 939 +{% endif %} + +{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode'] %} +/etc/pki/filebeat.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/filebeat.key') -%} + - prereq: + - x509: /etc/pki/filebeat.crt + {%- endif %} # Request a cert and drop it where it needs to go to be distributed /etc/pki/filebeat.crt: @@ -68,19 +145,22 @@ influxkeyperms: - signing_policy: filebeat - public_key: /etc/pki/filebeat.key {% if grains.role == 'so-heavynode' %} - - CN: {{grains.id}} + - CN: {{grains.host}} {% else %} - CN: {{manager}} {% endif %} - days_remaining: 0 - days_valid: 820 - backup: True - - managed_private_key: - name: /etc/pki/filebeat.key - bits: 4096 - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/filebeat.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' cmd.run: - name: "/usr/bin/openssl pkcs8 -in /etc/pki/filebeat.key -topk8 -out /etc/pki/filebeat.p8 -nocrypt" + - onchanges: + - x509: /etc/pki/filebeat.key + fbperms: file.managed: @@ -96,7 +176,8 @@ chownilogstashfilebeatp8: - mode: 640 - user: 931 - group: 939 - + + {% if grains.role != 'so-heavynode' %} # Create Symlinks to the keys so I can distribute it to all the things filebeatdir: file.directory: @@ -107,11 +188,28 @@ fbkeylink: file.symlink: - name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.p8 - target: /etc/pki/filebeat.p8 + - user: socore + - group: socore fbcrtlink: file.symlink: - name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.crt - target: /etc/pki/filebeat.crt + - user: socore + - group: socore + +/etc/pki/registry.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/registry.key') -%} + - prereq: + - x509: /etc/pki/registry.crt + {%- endif %} # Create a cert for the docker registry /etc/pki/registry.crt: @@ -123,10 +221,10 @@ fbcrtlink: - days_remaining: 0 - days_valid: 820 - backup: True - - managed_private_key: - name: /etc/pki/registry.key - bits: 4096 - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/registry.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' regkeyperms: file.managed: @@ -135,6 +233,100 @@ regkeyperms: - mode: 640 - group: 939 +/etc/pki/minio.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/minio.key') -%} + - prereq: + - x509: /etc/pki/minio.crt + {%- endif %} + +# Create a cert for minio +/etc/pki/minio.crt: + x509.certificate_managed: + - ca_server: {{ ca_server }} + - signing_policy: registry + - public_key: /etc/pki/minio.key + - CN: {{ manager }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/minio.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' + +miniokeyperms: + file.managed: + - replace: False + - name: /etc/pki/minio.key + - mode: 640 + - group: 939 + {% endif %} +# Create a cert for elasticsearch +/etc/pki/elasticsearch.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%} + - prereq: + - x509: /etc/pki/elasticsearch.crt + {%- endif %} + +/etc/pki/elasticsearch.crt: + x509.certificate_managed: + - ca_server: {{ ca_server }} + - signing_policy: registry + - public_key: /etc/pki/elasticsearch.key + - CN: {{ manager }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' + cmd.run: + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nodes -passout pass:" + - onchanges: + - x509: /etc/pki/elasticsearch.key + +ealstickeyperms: + file.managed: + - replace: False + - name: /etc/pki/elasticsearch.key + - mode: 640 + - group: 930 + +elasticp12perms: + file.managed: + - replace: False + - name: /etc/pki/elasticsearch.p12 + - mode: 640 + - group: 930 + +/etc/pki/managerssl.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/managerssl.key') -%} + - prereq: + - x509: /etc/pki/managerssl.crt + {%- endif %} + # Create a cert for the reverse proxy /etc/pki/managerssl.crt: x509.certificate_managed: @@ -146,10 +338,10 @@ regkeyperms: - days_remaining: 0 - days_valid: 820 - backup: True - - managed_private_key: - name: /etc/pki/managerssl.key - bits: 4096 - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/managerssl.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' msslkeyperms: file.managed: @@ -166,6 +358,11 @@ msslkeyperms: - days_remaining: 0 - days_valid: 820 - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/fleet.key') -%} + - prereq: + - x509: /etc/pki/fleet.crt + {%- endif %} /etc/pki/fleet.crt: x509.certificate_managed: @@ -175,10 +372,10 @@ msslkeyperms: - days_remaining: 0 - days_valid: 820 - backup: True - - managed_private_key: - name: /etc/pki/fleet.key - bits: 4096 - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/fleet.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' fleetkeyperms: file.managed: @@ -188,13 +385,26 @@ fleetkeyperms: - group: 939 {% endif %} -{% if grains['role'] in ['so-sensor', 'so-manager', 'so-node', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone'] %} - +{% if grains['role'] in ['so-sensor', 'so-manager', 'so-node', 'so-searchnode', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-import'] %} + fbcertdir: file.directory: - name: /opt/so/conf/filebeat/etc/pki - makedirs: True +/opt/so/conf/filebeat/etc/pki/filebeat.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/opt/so/conf/filebeat/etc/pki/filebeat.key') -%} + - prereq: + - x509: /opt/so/conf/filebeat/etc/pki/filebeat.crt + {%- endif %} + # Request a cert and drop it where it needs to go to be distributed /opt/so/conf/filebeat/etc/pki/filebeat.crt: x509.certificate_managed: @@ -209,15 +419,17 @@ fbcertdir: - days_remaining: 0 - days_valid: 820 - backup: True - - managed_private_key: - name: /opt/so/conf/filebeat/etc/pki/filebeat.key - bits: 4096 - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /opt/so/conf/filebeat/etc/pki/filebeat.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' # Convert the key to pkcs#8 so logstash will work correctly. filebeatpkcs: cmd.run: - name: "/usr/bin/openssl pkcs8 -in /opt/so/conf/filebeat/etc/pki/filebeat.key -topk8 -out /opt/so/conf/filebeat/etc/pki/filebeat.p8 -passout pass:" + - onchanges: + - x509: /opt/so/conf/filebeat/etc/pki/filebeat.key filebeatkeyperms: file.managed: @@ -238,6 +450,19 @@ chownfilebeatp8: {% if grains['role'] == 'so-fleet' %} +/etc/pki/managerssl.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/managerssl.key') -%} + - prereq: + - x509: /etc/pki/managerssl.crt + {%- endif %} + # Create a cert for the reverse proxy /etc/pki/managerssl.crt: x509.certificate_managed: @@ -249,10 +474,10 @@ chownfilebeatp8: - days_remaining: 0 - days_valid: 820 - backup: True - - managed_private_key: - name: /etc/pki/managerssl.key - bits: 4096 - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/managerssl.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' msslkeyperms: file.managed: @@ -264,11 +489,16 @@ msslkeyperms: # Create a private key and cert for Fleet /etc/pki/fleet.key: x509.private_key_managed: - - CN: {{ HOSTNAME }} + - CN: {{ manager }} - bits: 4096 - days_remaining: 0 - days_valid: 820 - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/fleet.key') -%} + - prereq: + - x509: /etc/pki/fleet.crt + {%- endif %} /etc/pki/fleet.crt: x509.certificate_managed: @@ -278,10 +508,10 @@ msslkeyperms: - days_remaining: 0 - days_valid: 820 - backup: True - - managed_private_key: - name: /etc/pki/fleet.key - bits: 4096 - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/fleet.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' fleetkeyperms: file.managed: @@ -290,4 +520,53 @@ fleetkeyperms: - mode: 640 - group: 939 -{% endif %} \ No newline at end of file +{% endif %} + +{% if grains['role'] in ['so-node', 'so-searchnode'] %} +# Create a cert for elasticsearch +/etc/pki/elasticsearch.key: + x509.private_key_managed: + - CN: {{ manager }} + - bits: 4096 + - days_remaining: 0 + - days_valid: 820 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%} + - prereq: + - x509: /etc/pki/elasticsearch.crt + {%- endif %} + +/etc/pki/elasticsearch.crt: + x509.certificate_managed: + - ca_server: {{ ca_server }} + - signing_policy: registry + - public_key: /etc/pki/elasticsearch.key + - CN: {{ HOSTNAME }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - unless: + # https://github.com/saltstack/salt/issues/52167 + # Will trigger 5 days (432000 sec) from cert expiration + - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]' + cmd.run: + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nodes -passout pass:" + - onchanges: + - x509: /etc/pki/elasticsearch.key + +elasticp12perms: + file.managed: + - replace: False + - name: /etc/pki/elasticsearch.p12 + - mode: 640 + - group: 930 + +elastickeyperms: + file.managed: + - replace: False + - name: /etc/pki/elasticsearch.key + - mode: 640 + - group: 930 + +{%- endif %} diff --git a/salt/strelka/files/backend/backend.yaml b/salt/strelka/files/backend/backend.yaml index b25e5630d..96aa450b7 100644 --- a/salt/strelka/files/backend/backend.yaml +++ b/salt/strelka/files/backend/backend.yaml @@ -1,8 +1,8 @@ -{%- if grains.role == 'so-sensor' -%} +{%- if grains.role in ['so-sensor', 'so-heavynode'] -%} {%- set mainint = salt['pillar.get']('sensor:mainint') %} {%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %} {%- else %} - {%- set ip = salt['pillar.get']('static:managerip') %} + {%- set ip = salt['pillar.get']('global:managerip') %} {%- endif -%} logging_cfg: '/etc/strelka/logging.yaml' limits: diff --git a/salt/strelka/files/filestream/filestream.yaml b/salt/strelka/files/filestream/filestream.yaml index 539e4314c..681aad222 100644 --- a/salt/strelka/files/filestream/filestream.yaml +++ b/salt/strelka/files/filestream/filestream.yaml @@ -1,8 +1,8 @@ -{%- if grains.role == 'so-sensor' -%} +{%- if grains.role in ['so-sensor', 'so-heavynode'] -%} {%- set mainint = salt['pillar.get']('sensor:mainint') %} {%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %} {%- else %} - {%- set ip = salt['pillar.get']('static:managerip') %} + {%- set ip = salt['pillar.get']('global:managerip') %} {%- endif -%} conn: server: '{{ ip }}:57314' diff --git a/salt/strelka/files/frontend/frontend.yaml b/salt/strelka/files/frontend/frontend.yaml index 5d72f1e0d..1233aadad 100644 --- a/salt/strelka/files/frontend/frontend.yaml +++ b/salt/strelka/files/frontend/frontend.yaml @@ -1,8 +1,8 @@ -{%- if grains.role == 'so-sensor' -%} +{%- if grains.role in ['so-sensor', 'so-heavynode'] -%} {%- set mainint = salt['pillar.get']('sensor:mainint') %} {%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %} {%- else %} - {%- set ip = salt['pillar.get']('static:managerip') %} + {%- set ip = salt['pillar.get']('global:managerip') %} {%- endif -%} server: ":57314" coordinator: diff --git a/salt/strelka/files/manager/manager.yaml b/salt/strelka/files/manager/manager.yaml index db9dd7f91..466b94a8a 100644 --- a/salt/strelka/files/manager/manager.yaml +++ b/salt/strelka/files/manager/manager.yaml @@ -1,8 +1,8 @@ -{%- if grains.role == 'so-sensor' -%} +{%- if grains.role in ['so-sensor', 'so-heavynode'] -%} {%- set mainint = salt['pillar.get']('sensor:mainint') %} {%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %} {%- else %} - {%- set ip = salt['pillar.get']('static:managerip') %} + {%- set ip = salt['pillar.get']('global:managerip') %} {%- endif -%} coordinator: addr: '{{ ip }}:6380' diff --git a/salt/strelka/init.sls b/salt/strelka/init.sls index c6a900e8e..e85b62f83 100644 --- a/salt/strelka/init.sls +++ b/salt/strelka/init.sls @@ -13,9 +13,9 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . {%- set MANAGER = salt['grains.get']('master') %} -{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {%- set STRELKA_RULES = salt['pillar.get']('strelka:rules', '1') -%} # Strelka config diff --git a/salt/suricata/cron/surirotate b/salt/suricata/cron/surirotate new file mode 100644 index 000000000..4da651d0e --- /dev/null +++ b/salt/suricata/cron/surirotate @@ -0,0 +1,4 @@ +#!/bin/bash + +# Gzip the eve logs +/usr/sbin/logrotate -f /opt/so/conf/suricata/suri-rotate.conf > /dev/null 2>&1 diff --git a/salt/suricata/defaults.yaml b/salt/suricata/defaults.yaml index 3945573a2..a9dccdf46 100644 --- a/salt/suricata/defaults.yaml +++ b/salt/suricata/defaults.yaml @@ -174,7 +174,6 @@ suricata: logging: default-log-level: notice #default-log-format: "[%i] %t - (%f:%l) <%d> (%n) -- " - default-output-filter: outputs: - console: enabled: "yes" diff --git a/salt/suricata/files/suri-rotate.conf b/salt/suricata/files/suri-rotate.conf new file mode 100644 index 000000000..e8461c48f --- /dev/null +++ b/salt/suricata/files/suri-rotate.conf @@ -0,0 +1,12 @@ +/opt/so/log/suricata/stats.log +{ + daily + rotate 2 + missingok + nocompress + create + sharedscripts + postrotate + docker exec -d so-suricata bash -c 'kill -HUP $(cat /var/run/suricata.pid)' + endscript +} diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls index c0677db16..79e06db66 100644 --- a/salt/suricata/init.sls +++ b/salt/suricata/init.sls @@ -14,15 +14,16 @@ # along with this program. If not, see . {% set interface = salt['pillar.get']('sensor:interface', 'bond0') %} -{% set ZEEKVER = salt['pillar.get']('static:zeekversion', '') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set ZEEKVER = salt['pillar.get']('global:zeekversion', '') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set BPF_NIDS = salt['pillar.get']('nids:bpf') %} {% set BPF_STATUS = 0 %} {# import_yaml 'suricata/files/defaults2.yaml' as suricata #} {% from 'suricata/suricata_config.map.jinja' import suricata_defaults as suricata_config with context %} +{% from "suricata/map.jinja" import START with context %} # Suricata @@ -78,6 +79,12 @@ surilogscript: - source: salt://suricata/cron/surilogcompress - mode: 755 +surirotatescript: + file.managed: + - name: /usr/local/bin/surirotate + - source: salt://suricata/cron/surirotate + - mode: 755 + /usr/local/bin/surilogcompress: cron.present: - user: suricata @@ -134,6 +141,7 @@ suribpf: so-suricata: docker_container.running: - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} + - start: {{ START }} - privileged: True - environment: - INTERFACE={{ interface }} @@ -150,3 +158,18 @@ so-suricata: - file: surithresholding - file: /opt/so/conf/suricata/rules/ - file: /opt/so/conf/suricata/bpf + +surilogrotate: + file.managed: + - name: /opt/so/conf/suricata/suri-rotate.conf + - source: salt://suricata/files/suri-rotate.conf + - mode: 644 + +/usr/local/bin/surirotate: + cron.present: + - user: root + - minute: '11' + - hour: '*' + - daymonth: '*' + - month: '*' + - dayweek: '*' diff --git a/salt/suricata/map.jinja b/salt/suricata/map.jinja new file mode 100644 index 000000000..ad4d70e80 --- /dev/null +++ b/salt/suricata/map.jinja @@ -0,0 +1,6 @@ +# don't start the docker container if it is an import node +{% if grains.id.split('_')|last == 'import' %} + {% set START = False %} +{% else %} + {% set START = True %} +{% endif %} \ No newline at end of file diff --git a/salt/suricata/suricata_config.map.jinja b/salt/suricata/suricata_config.map.jinja index 9fb3c9a7f..a544f6d96 100644 --- a/salt/suricata/suricata_config.map.jinja +++ b/salt/suricata/suricata_config.map.jinja @@ -11,7 +11,7 @@ HOME_NET: "[{{salt['pillar.get']('sensor:hnsensor')}}]" {% endload %} {% else %} {% load_yaml as homenet %} -HOME_NET: "[{{salt['pillar.get']('static:hnmanager', '')}}]" +HOME_NET: "[{{salt['pillar.get']('global:hnmanager', '')}}]" {% endload %} {% endif %} @@ -44,7 +44,7 @@ HOME_NET: "[{{salt['pillar.get']('static:hnmanager', '')}}]" {% endfor %} {% set surimeta_evelog_index = surimeta_evelog_index[0] %} -{% if salt['pillar.get']('static:zeekversion', 'ZEEK') == 'SURICATA' %} +{% if salt['pillar.get']('global:zeekversion', 'ZEEK') == 'SURICATA' %} {% do suricata_defaults.suricata.config.outputs[default_evelog_index]['eve-log'].types.extend(suricata_meta.suricata.config.outputs[surimeta_evelog_index]['eve-log'].types) %} {% endif %} diff --git a/salt/tcpreplay/init.sls b/salt/tcpreplay/init.sls index 7247e4505..a828c72f1 100644 --- a/salt/tcpreplay/init.sls +++ b/salt/tcpreplay/init.sls @@ -1,6 +1,6 @@ {% if grains['role'] == 'so-sensor' or grains['role'] == 'so-eval' %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} so-tcpreplay: diff --git a/salt/telegraf/init.sls b/salt/telegraf/init.sls index 668a8839a..c252cdb5b 100644 --- a/salt/telegraf/init.sls +++ b/salt/telegraf/init.sls @@ -1,6 +1,6 @@ {% set MANAGER = salt['grains.get']('master') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} # Add Telegraf to monitor all the things. tgraflogdir: diff --git a/salt/thehive/etc/application.conf b/salt/thehive/etc/application.conf index f06c3f7c6..675c5222c 100644 --- a/salt/thehive/etc/application.conf +++ b/salt/thehive/etc/application.conf @@ -1,10 +1,11 @@ -{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %} -{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %} +{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} +{%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %} +{%- set HIVEPLAYSECRET = salt['pillar.get']('global:hiveplaysecret', '') %} # Secret Key # The secret key is used to secure cryptographic functions. # WARNING: If you deploy your application on several servers, make sure to use the same key. -play.http.secret.key="letsdewdis" +play.http.secret.key="{{ HIVEPLAYSECRET }}" play.http.context=/thehive/ search.uri = "http://{{ MANAGERIP }}:9400" # Elasticsearch diff --git a/salt/thehive/etc/cortex-application.conf b/salt/thehive/etc/cortex-application.conf index b9cbe20cc..d84566068 100644 --- a/salt/thehive/etc/cortex-application.conf +++ b/salt/thehive/etc/cortex-application.conf @@ -1,9 +1,10 @@ -{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %} +{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} +{%- set CORTEXPLAYSECRET = salt['pillar.get']('global:cortexplaysecret', '') %} # Secret Key # The secret key is used to secure cryptographic functions. # WARNING: If you deploy your application on several servers, make sure to use the same key. -play.http.secret.key="letsdewdis" +play.http.secret.key="{{ CORTEXPLAYSECRET }}" play.http.context=/cortex/ search.uri = "http://{{ MANAGERIP }}:9400" diff --git a/salt/thehive/init.sls b/salt/thehive/init.sls index 062637855..ffbb50f0c 100644 --- a/salt/thehive/init.sls +++ b/salt/thehive/init.sls @@ -1,6 +1,6 @@ {% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} thehiveconfdir: file.directory: diff --git a/salt/thehive/scripts/cortex_init b/salt/thehive/scripts/cortex_init index 9fc1caf25..6f5d890ae 100644 --- a/salt/thehive/scripts/cortex_init +++ b/salt/thehive/scripts/cortex_init @@ -1,18 +1,18 @@ #!/bin/bash -# {%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %} -# {%- set CORTEXUSER = salt['pillar.get']('static:cortexuser', 'cortexadmin') %} -# {%- set CORTEXPASSWORD = salt['pillar.get']('static:cortexpassword', 'cortexchangeme') %} -# {%- set CORTEXKEY = salt['pillar.get']('static:cortexkey', '') %} -# {%- set CORTEXORGNAME = salt['pillar.get']('static:cortexorgname', '') %} -# {%- set CORTEXORGUSER = salt['pillar.get']('static:cortexorguser', '') %} -# {%- set CORTEXORGUSERKEY = salt['pillar.get']('static:cortexorguserkey', '') %} +# {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} +# {%- set CORTEXUSER = salt['pillar.get']('global:cortexuser', 'cortexadmin') %} +# {%- set CORTEXPASSWORD = salt['pillar.get']('global:cortexpassword', 'cortexchangeme') %} +# {%- set CORTEXKEY = salt['pillar.get']('global:cortexkey', '') %} +# {%- set CORTEXORGNAME = salt['pillar.get']('global:cortexorgname', '') %} +# {%- set CORTEXORGUSER = salt['pillar.get']('global:cortexorguser', 'soadmin') %} +# {%- set CORTEXORGUSERKEY = salt['pillar.get']('global:cortexorguserkey', '') %} default_salt_dir=/opt/so/saltstack/default cortex_clean(){ - sed -i '/^ cortexuser:/d' /opt/so/saltstack/local/pillar/static.sls - sed -i '/^ cortexpassword:/d' /opt/so/saltstack/local/pillar/static.sls - sed -i '/^ cortexorguser:/d' /opt/so/saltstack/local/pillar/static.sls + sed -i '/^ cortexuser:/d' /opt/so/saltstack/local/pillar/global.sls + sed -i '/^ cortexpassword:/d' /opt/so/saltstack/local/pillar/global.sls + sed -i '/^ cortexorguser:/d' /opt/so/saltstack/local/pillar/global.sls } cortex_init(){ diff --git a/salt/thehive/scripts/hive_init b/salt/thehive/scripts/hive_init index 0caff6e2d..c44af6339 100755 --- a/salt/thehive/scripts/hive_init +++ b/salt/thehive/scripts/hive_init @@ -1,12 +1,12 @@ #!/bin/bash -# {%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %} -# {%- set THEHIVEUSER = salt['pillar.get']('static:hiveuser', 'hiveadmin') %} -# {%- set THEHIVEPASSWORD = salt['pillar.get']('static:hivepassword', 'hivechangeme') %} -# {%- set THEHIVEKEY = salt['pillar.get']('static:hivekey', '') %} +# {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} +# {%- set THEHIVEUSER = salt['pillar.get']('global:hiveuser', 'hiveadmin') %} +# {%- set THEHIVEPASSWORD = salt['pillar.get']('global:hivepassword', 'hivechangeme') %} +# {%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %} thehive_clean(){ - sed -i '/^ hiveuser:/d' /opt/so/saltstack/local/pillar/static.sls - sed -i '/^ hivepassword:/d' /opt/so/saltstack/local/pillar/static.sls + sed -i '/^ hiveuser:/d' /opt/so/saltstack/local/pillar/global.sls + sed -i '/^ hivepassword:/d' /opt/so/saltstack/local/pillar/global.sls } thehive_init(){ diff --git a/salt/top.sls b/salt/top.sls index 5f316dd15..19c1c77dc 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -1,28 +1,36 @@ -{%- set ZEEKVER = salt['pillar.get']('static:zeekversion', '') -%} -{%- set WAZUH = salt['pillar.get']('static:wazuh', '0') -%} +{%- set ZEEKVER = salt['pillar.get']('global:zeekversion', '') -%} +{%- set WAZUH = salt['pillar.get']('global:wazuh', '0') -%} {%- set THEHIVE = salt['pillar.get']('manager:thehive', '0') -%} {%- set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') -%} {%- set FREQSERVER = salt['pillar.get']('manager:freq', '0') -%} {%- set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') -%} -{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%} -{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%} +{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%} +{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%} {%- set STRELKA = salt['pillar.get']('strelka:enabled', '0') -%} +{% import_yaml 'salt/minion.defaults.yaml' as salt %} +{% set saltversion = salt.salt.minion.version %} base: - 'os:CentOS': - - match: grain + 'not G@saltversion:{{saltversion}}': + - match: compound + - salt.minion + + 'G@os:CentOS and G@saltversion:{{saltversion}}': + - match: compound - yum - yum.packages - '*': - - salt + '* and G@saltversion:{{saltversion}}': + - match: compound + - salt.minion - docker - patch.os.schedule - motd - '*_helix': + '*_helix and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - registry @@ -39,7 +47,8 @@ base: - filebeat - schedule - '*_sensor': + '*_sensor and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - common @@ -61,7 +70,8 @@ base: {%- endif %} - schedule - '*_eval': + '*_eval and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - registry @@ -117,7 +127,8 @@ base: {%- endif %} - '*_manager': + '*_manager and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - registry @@ -131,7 +142,6 @@ base: - manager - idstools - suricata.manager - - redis {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %} - mysql {%- endif %} @@ -139,6 +149,8 @@ base: - wazuh {%- endif %} - logstash + - minio + - redis - kibana - elastalert - filebeat @@ -147,6 +159,7 @@ base: {%- if FLEETMANAGER or FLEETNODE %} - fleet - fleet.install_package + - redis {%- endif %} - soctopus {%- if THEHIVE != 0 %} @@ -162,7 +175,8 @@ base: - domainstats {%- endif %} - '*_standalone': + '*_standalone and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - registry @@ -177,7 +191,6 @@ base: - idstools - suricata.manager - healthcheck - - redis {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %} - mysql {%- endif %} @@ -185,6 +198,7 @@ base: - wazuh {%- endif %} - logstash + - minio - kibana - pcap - suricata @@ -220,7 +234,7 @@ base: # Search node logic - '*_node and I@node:node_type:parser': + '*_node and I@node:node_type:parser and G@saltversion:{{saltversion}}': - match: compound - common - firewall @@ -230,7 +244,7 @@ base: {%- endif %} - schedule - '*_node and I@node:node_type:hot': + '*_node and I@node:node_type:hot and G@saltversion:{{saltversion}}': - match: compound - common - firewall @@ -241,7 +255,7 @@ base: {%- endif %} - schedule - '*_node and I@node:node_type:warm': + '*_node and I@node:node_type:warm and G@saltversion:{{saltversion}}': - match: compound - common - firewall @@ -251,7 +265,8 @@ base: {%- endif %} - schedule - '*_searchnode': + '*_searchnode and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - common @@ -269,7 +284,8 @@ base: {%- endif %} - schedule - '*_managersensor': + '*_managersensor and G@saltversion:{{saltversion}}': + - match: compound - common - nginx - telegraf @@ -283,7 +299,8 @@ base: {%- endif %} - schedule - '*_managersearch': + '*_managersearch and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - registry @@ -297,7 +314,7 @@ base: - manager - idstools - suricata.manager - - redis + - minio {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %} - mysql {%- endif %} @@ -313,6 +330,7 @@ base: - schedule {%- if FLEETMANAGER or FLEETNODE %} - fleet + - redis - fleet.install_package {%- endif %} - soctopus @@ -329,21 +347,27 @@ base: - domainstats {%- endif %} - '*_heavynode': + '*_heavynode and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - common + - nginx - telegraf - firewall - - redis + - minio {%- if WAZUH != 0 %} - wazuh {%- endif %} - logstash - curator - filebeat + {%- if STRELKA %} + - strelka + {%- endif %} {%- if FLEETMANAGER or FLEETNODE %} - fleet.install_package + - redis {%- endif %} - pcap - suricata @@ -353,7 +377,8 @@ base: - filebeat - schedule - '*_fleet': + '*_fleet and G@saltversion:{{saltversion}}': + - match: compound - ca - ssl - common @@ -365,3 +390,24 @@ base: - fleet - fleet.install_package - filebeat + + '*_import and G@saltversion:{{saltversion}}': + - match: compound + - ca + - ssl + - registry + - manager + - common + - nginx + - soc + - firewall + - idstools + - suricata.manager + - pcap + - elasticsearch + - kibana + - filebeat + - utility + - suricata + - zeek + - schedule diff --git a/salt/utility/bin/crossthestreams b/salt/utility/bin/crossthestreams index d21e3c1a4..e67ce9f57 100644 --- a/salt/utility/bin/crossthestreams +++ b/salt/utility/bin/crossthestreams @@ -1,6 +1,8 @@ #!/bin/bash {% set ES = salt['pillar.get']('manager:mainip', '') %} {%- set MANAGER = salt['grains.get']('master') %} +{% set FEATURES = salt['pillar.get']('elastic:features', False) %} + # Wait for ElasticSearch to come up, so that we can query for version infromation echo -n "Waiting for ElasticSearch..." @@ -35,6 +37,6 @@ echo "Applying cross cluster search config..." {%- if salt['pillar.get']('nodestab', {}) %} {%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} -curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SNDATA.ip }}:9300"]}}}}}' +curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}' {%- endfor %} {%- endif %} diff --git a/salt/utility/init.sls b/salt/utility/init.sls index 00899f69a..4779f9c1d 100644 --- a/salt/utility/init.sls +++ b/salt/utility/init.sls @@ -10,7 +10,7 @@ crossclusterson: - template: jinja {% endif %} -{% if grains['role'] == 'so-eval' %} +{% if grains['role'] in ['so-eval', 'so-import'] %} fixsearch: cmd.script: - shell: /bin/bash diff --git a/salt/wazuh/files/agent/ossec.conf b/salt/wazuh/files/agent/ossec.conf index 8d38868ef..7e33f5599 100644 --- a/salt/wazuh/files/agent/ossec.conf +++ b/salt/wazuh/files/agent/ossec.conf @@ -1,5 +1,5 @@ {%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %} - {%- set ip = salt['pillar.get']('static:managerip', '') %} + {%- set ip = salt['pillar.get']('global:managerip', '') %} {%- elif grains['role'] == 'so-node' or grains['role'] == 'so-heavynode' %} {%- set ip = salt['pillar.get']('elasticsearch:mainip', '') %} {%- elif grains['role'] == 'so-sensor' %} diff --git a/salt/wazuh/files/agent/wazuh-register-agent b/salt/wazuh/files/agent/wazuh-register-agent index f2fd8693f..c6411b492 100755 --- a/salt/wazuh/files/agent/wazuh-register-agent +++ b/salt/wazuh/files/agent/wazuh-register-agent @@ -1,5 +1,5 @@ {%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %} - {%- set ip = salt['pillar.get']('static:managerip', '') %} + {%- set ip = salt['pillar.get']('global:managerip', '') %} {%- elif grains['role'] == 'so-node' or grains['role'] == 'so-heavynode' %} {%- set ip = salt['pillar.get']('elasticsearch:mainip', '') %} {%- elif grains['role'] == 'so-sensor' %} @@ -135,7 +135,7 @@ shift $(($OPTIND - 1)) # fi # Default action -> try to register the agent -sleep 10s +sleep 30s STATUS=$(curl -s -k -u $USER:$PASSWORD $PROTOCOL://$API_IP:$API_PORT/agents/$AGENT_ID | jq .data.status | sed s'/"//g') if [[ $STATUS == "Active" ]]; then echo "Agent $AGENT_ID already registered!" diff --git a/salt/wazuh/files/wazuh-manager-whitelist b/salt/wazuh/files/wazuh-manager-whitelist index d39d68e36..c3ecf31a9 100755 --- a/salt/wazuh/files/wazuh-manager-whitelist +++ b/salt/wazuh/files/wazuh-manager-whitelist @@ -1,5 +1,5 @@ -{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %} -{%- set WAZUH_ENABLED = salt['pillar.get']('static:wazuh', '0') %} +{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} +{%- set WAZUH_ENABLED = salt['pillar.get']('global:wazuh', '0') %} #!/bin/bash local_salt_dir=/opt/so/saltstack/local @@ -20,7 +20,7 @@ local_salt_dir=/opt/so/saltstack/local # Check if Wazuh enabled if [ {{ WAZUH_ENABLED }} ]; then - WAZUH_MGR_CFG="/opt/so/wazuh/etc/ossec.conf" + WAZUH_MGR_CFG="/nsm/wazuh/etc/ossec.conf" if ! grep -q "{{ MANAGERIP }}" $WAZUH_MGR_CFG ; then DATE=`date` sed -i 's/<\/ossec_config>//' $WAZUH_MGR_CFG diff --git a/salt/wazuh/init.sls b/salt/wazuh/init.sls index 2ae4ea715..94b16b199 100644 --- a/salt/wazuh/init.sls +++ b/salt/wazuh/init.sls @@ -1,6 +1,6 @@ {%- set HOSTNAME = salt['grains.get']('host', '') %} -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} # Add ossec group ossecgroup: @@ -13,7 +13,7 @@ ossecm: user.present: - uid: 943 - gid: 945 - - home: /opt/so/conf/wazuh + - home: /nsm/wazuh - createhome: False - allow_uid_change: True - allow_gid_change: True @@ -23,7 +23,7 @@ ossecr: user.present: - uid: 944 - gid: 945 - - home: /opt/so/conf/wazuh + - home: /nsm/wazuh - createhome: False - allow_uid_change: True - allow_gid_change: True @@ -33,7 +33,7 @@ ossec: user.present: - uid: 945 - gid: 945 - - home: /opt/so/conf/wazuh + - home: /nsm/wazuh - createhome: False - allow_uid_change: True - allow_gid_change: True @@ -42,16 +42,18 @@ wazuhpkgs: pkg.installed: - skip_suggestions: False - pkgs: - - wazuh-agent: 3.10.2-1 + - wazuh-agent: 3.13.1-1 - hold: True - update_holds: True -wazuhdir: +wazuhvarossecdir: file.directory: - - name: /opt/so/wazuh - - user: 945 - - group: 945 - - makedirs: True + - name: /var/ossec + - user: ossec + - group: ossec + - recurse: + - user + - group # Add Wazuh agent conf wazuhagentconf: @@ -62,6 +64,13 @@ wazuhagentconf: - group: 945 - template: jinja +wazuhdir: + file.directory: + - name: /nsm/wazuh + - user: 945 + - group: 945 + - makedirs: True + # Wazuh agent registration script wazuhagentregister: file.managed: @@ -94,7 +103,7 @@ so-wazuh: - 0.0.0.0:1515:1515/tcp - 0.0.0.0:55000:55000 - binds: - - /opt/so/wazuh:/var/ossec/data:rw + - /nsm/wazuh:/var/ossec/data:rw # Register the agent registertheagent: @@ -113,3 +122,22 @@ wazuhagentservice: service.running: - name: wazuh-agent - enable: True + +/opt/so/conf/wazuh: + file.symlink: + - target: /nsm/wazuh/etc + +hidsruledir: + file.directory: + - name: /opt/so/rules/hids + - user: 939 + - group: 939 + - makedirs: True + +/opt/so/rules/hids/local_rules.xml: + file.symlink: + - target: /nsm/wazuh/etc/rules/local_rules.xml + +/opt/so/rules/hids/ruleset: + file.symlink: + - target: /nsm/wazuh/ruleset diff --git a/salt/yum/etc/yum.conf.jinja b/salt/yum/etc/yum.conf.jinja index aab63550b..22449083e 100644 --- a/salt/yum/etc/yum.conf.jinja +++ b/salt/yum/etc/yum.conf.jinja @@ -11,6 +11,6 @@ installonly_limit={{ salt['pillar.get']('yum:config:installonly_limit', 2) }} bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum distroverpkg=centos-release -{% if salt['pillar.get']('static:managerupdate', '0') %} +{% if salt['pillar.get']('global:managerupdate', '0') %} proxy=http://{{ salt['pillar.get']('yum:config:proxy', salt['config.get']('master')) }}:3142 {% endif %} \ No newline at end of file diff --git a/salt/zeek/init.sls b/salt/zeek/init.sls index 68908a2ce..f6e1e999e 100644 --- a/salt/zeek/init.sls +++ b/salt/zeek/init.sls @@ -1,5 +1,7 @@ -{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %} +{% from "zeek/map.jinja" import START with context %} + +{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} +{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set BPF_ZEEK = salt['pillar.get']('zeek:bpf', {}) %} {% set BPF_STATUS = 0 %} @@ -167,6 +169,7 @@ localzeeksync: so-zeek: docker_container.running: - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-zeek:{{ VERSION }} + - start: {{ START }} - privileged: True - binds: - /nsm/zeek/logs:/nsm/zeek/logs:rw diff --git a/salt/zeek/map.jinja b/salt/zeek/map.jinja new file mode 100644 index 000000000..ad4d70e80 --- /dev/null +++ b/salt/zeek/map.jinja @@ -0,0 +1,6 @@ +# don't start the docker container if it is an import node +{% if grains.id.split('_')|last == 'import' %} + {% set START = False %} +{% else %} + {% set START = True %} +{% endif %} \ No newline at end of file diff --git a/setup/automation/aws_eval_defaults b/setup/automation/aws_eval_defaults new file mode 100644 index 000000000..e038bf29d --- /dev/null +++ b/setup/automation/aws_eval_defaults @@ -0,0 +1,77 @@ +#!/bin/bash + +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +TESTING=true + +address_type=DHCP +ADMINUSER=onionuser +ADMINPASS1=onionuser +ADMINPASS2=onionuser +ALLOW_CIDR=0.0.0.0/0 +ALLOW_ROLE=a +BASICZEEK=7 +BASICSURI=7 +# BLOGS= +BNICS=ens6 +ZEEKVERSION=ZEEK +# CURCLOSEDAYS= +# EVALADVANCED=BASIC +GRAFANA=1 +# HELIXAPIKEY= +HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 +HNSENSOR=inherit +HOSTNAME=eval-aws +install_type=EVAL +# LSINPUTBATCHCOUNT= +# LSINPUTTHREADS= +# LSPIPELINEBATCH= +# LSPIPELINEWORKERS= +MANAGERADV=BASIC +MANAGERUPDATES=1 +# MDNS= +# MGATEWAY= +# MIP= +# MMASK= +MNIC=ens5 +# MSEARCH= +# MSRV= +# MTU= +NIDS=Suricata +# NODE_ES_HEAP_SIZE= +# NODE_LS_HEAP_SIZE= +NODESETUP=NODEBASIC +NSMSETUP=BASIC +NODEUPDATES=MANAGER +# OINKCODE= +OSQUERY=1 +# PATCHSCHEDULEDAYS= +# PATCHSCHEDULEHOURS= +PATCHSCHEDULENAME=auto +PLAYBOOK=1 +# REDIRECTHOST= +REDIRECTINFO=HOSTNAME +RULESETUP=ETOPEN +# SHARDCOUNT= +SKIP_REBOOT=0 +SOREMOTEPASS1=onionuser +SOREMOTEPASS2=onionuser +STRELKA=1 +THEHIVE=1 +WAZUH=1 +WEBUSER=onionuser@somewhere.invalid +WEBPASSWD1=0n10nus3r +WEBPASSWD2=0n10nus3r diff --git a/setup/automation/aws_forwardnode_defaults b/setup/automation/aws_forwardnode_defaults new file mode 100644 index 000000000..99d8f21be --- /dev/null +++ b/setup/automation/aws_forwardnode_defaults @@ -0,0 +1,78 @@ +#!/bin/bash + +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +TESTING=true + +address_type=DHCP +ADMINUSER=onionuser +ADMINPASS1=onionuser +ADMINPASS2=onionuser +#ALLOW_CIDR=0.0.0.0/0 +#ALLOW_ROLE=a +BASICZEEK=1 +BASICSURI=1 +# BLOGS= +BNICS=ens6 +ZEEKVERSION=ZEEK +# CURCLOSEDAYS= +# EVALADVANCED=BASIC +#GRAFANA=1 +# HELIXAPIKEY= +HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 +HNSENSOR=inherit +HOSTNAME=forwardnode-aws +install_type=SENSOR +# LSINPUTBATCHCOUNT= +# LSINPUTTHREADS= +# LSPIPELINEBATCH= +# LSPIPELINEWORKERS= +#MANAGERADV=BASIC +MANAGERUPDATES=1 +# MDNS= +# MGATEWAY= +# MIP= +# MMASK= +MNIC=ens5 +# MSEARCH= +MSRV=manager-aws +MSRVIP=172.16.163.10 +# MTU= +#NIDS=Suricata +# NODE_ES_HEAP_SIZE= +# NODE_LS_HEAP_SIZE= +#NODESETUP=NODEBASIC +NSMSETUP=BASIC +NODEUPDATES=MANAGER +# OINKCODE= +#OSQUERY=1 +# PATCHSCHEDULEDAYS= +# PATCHSCHEDULEHOURS= +PATCHSCHEDULENAME=auto +#PLAYBOOK=1 +# REDIRECTHOST= +#REDIRECTINFO=HOSTNAME +#RULESETUP=ETOPEN +# SHARDCOUNT= +SKIP_REBOOT=0 +SOREMOTEPASS1=onionuser +SOREMOTEPASS2=onionuser +STRELKA=1 +#THEHIVE=1 +WAZUH=1 +WEBUSER=onionuser@somewhere.invalid +WEBPASSWD1=0n10nus3r +WEBPASSWD2=0n10nus3r diff --git a/setup/automation/aws_manager_defaults b/setup/automation/aws_manager_defaults new file mode 100644 index 000000000..2ca5c2a04 --- /dev/null +++ b/setup/automation/aws_manager_defaults @@ -0,0 +1,77 @@ +#!/bin/bash + +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +TESTING=true + +address_type=DHCP +ADMINUSER=onionuser +ADMINPASS1=onionuser +ADMINPASS2=onionuser +ALLOW_CIDR=0.0.0.0/0 +ALLOW_ROLE=a +BASICZEEK=7 +BASICSURI=7 +# BLOGS= +BNICS=ens6 +ZEEKVERSION=ZEEK +# CURCLOSEDAYS= +# EVALADVANCED=BASIC +GRAFANA=1 +# HELIXAPIKEY= +HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 +HNSENSOR=inherit +HOSTNAME=manager-aws +install_type=MANAGER +# LSINPUTBATCHCOUNT= +# LSINPUTTHREADS= +# LSPIPELINEBATCH= +# LSPIPELINEWORKERS= +MANAGERADV=BASIC +MANAGERUPDATES=1 +# MDNS= +# MGATEWAY= +# MIP= +# MMASK= +MNIC=ens5 +# MSEARCH= +# MSRV= +# MTU= +NIDS=Suricata +# NODE_ES_HEAP_SIZE= +# NODE_LS_HEAP_SIZE= +NODESETUP=NODEBASIC +NSMSETUP=BASIC +NODEUPDATES=MANAGER +# OINKCODE= +OSQUERY=1 +# PATCHSCHEDULEDAYS= +# PATCHSCHEDULEHOURS= +PATCHSCHEDULENAME=auto +PLAYBOOK=1 +# REDIRECTHOST= +REDIRECTINFO=HOSTNAME +RULESETUP=ETOPEN +# SHARDCOUNT= +SKIP_REBOOT=0 +SOREMOTEPASS1=onionuser +SOREMOTEPASS2=onionuser +STRELKA=1 +THEHIVE=1 +WAZUH=1 +WEBUSER=onionuser@somewhere.invalid +WEBPASSWD1=0n10nus3r +WEBPASSWD2=0n10nus3r diff --git a/setup/automation/aws_searchnode_defaults b/setup/automation/aws_searchnode_defaults new file mode 100644 index 000000000..3c2ff4df5 --- /dev/null +++ b/setup/automation/aws_searchnode_defaults @@ -0,0 +1,78 @@ +#!/bin/bash + +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +TESTING=true + +address_type=DHCP +ADMINUSER=onionuser +ADMINPASS1=onionuser +ADMINPASS2=onionuser +#ALLOW_CIDR=0.0.0.0/0 +ALLOW_ROLE=a +#BASICZEEK=7 +#BASICSURI=7 +# BLOGS= +#BNICS=ens6 +#ZEEKVERSION=ZEEK +# CURCLOSEDAYS= +# EVALADVANCED=BASIC +#GRAFANA=1 +# HELIXAPIKEY= +HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 +HNSENSOR=inherit +HOSTNAME=searchnode-aws +install_type=SEARCHNODE +# LSINPUTBATCHCOUNT= +# LSINPUTTHREADS= +# LSPIPELINEBATCH= +# LSPIPELINEWORKERS= +#MANAGERADV=BASIC +MANAGERUPDATES=1 +# MDNS= +# MGATEWAY= +# MIP= +# MMASK= +MNIC=ens5 +# MSEARCH= +MSRV=manager-aws +MSRVIP=172.16.163.10 +# MTU= +#NIDS=Suricata +# NODE_ES_HEAP_SIZE= +# NODE_LS_HEAP_SIZE= +NODESETUP=NODEBASIC +NSMSETUP=BASIC +NODEUPDATES=MANAGER +# OINKCODE= +#OSQUERY=1 +# PATCHSCHEDULEDAYS= +# PATCHSCHEDULEHOURS= +PATCHSCHEDULENAME=auto +#PLAYBOOK=1 +# REDIRECTHOST= +#REDIRECTINFO=HOSTNAME +#RULESETUP=ETOPEN +# SHARDCOUNT= +SKIP_REBOOT=0 +SOREMOTEPASS1=onionuser +SOREMOTEPASS2=onionuser +#STRELKA=1 +#THEHIVE=1 +WAZUH=1 +WEBUSER=onionuser@somewhere.invalid +WEBPASSWD1=0n10nus3r +WEBPASSWD2=0n10nus3r diff --git a/setup/automation/aws_standalone_defaults b/setup/automation/aws_standalone_defaults new file mode 100644 index 000000000..25d3da0e0 --- /dev/null +++ b/setup/automation/aws_standalone_defaults @@ -0,0 +1,77 @@ +#!/bin/bash + +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +TESTING=true + +address_type=DHCP +ADMINUSER=onionuser +ADMINPASS1=onionuser +ADMINPASS2=onionuser +ALLOW_CIDR=0.0.0.0/0 +ALLOW_ROLE=a +BASICZEEK=7 +BASICSURI=7 +# BLOGS= +BNICS=ens6 +ZEEKVERSION=ZEEK +# CURCLOSEDAYS= +# EVALADVANCED=BASIC +GRAFANA=1 +# HELIXAPIKEY= +HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 +HNSENSOR=inherit +HOSTNAME=standalone-aws +install_type=STANDALONE +# LSINPUTBATCHCOUNT= +# LSINPUTTHREADS= +# LSPIPELINEBATCH= +# LSPIPELINEWORKERS= +MANAGERADV=BASIC +MANAGERUPDATES=1 +# MDNS= +# MGATEWAY= +# MIP= +# MMASK= +MNIC=ens5 +# MSEARCH= +# MSRV= +# MTU= +NIDS=Suricata +# NODE_ES_HEAP_SIZE= +# NODE_LS_HEAP_SIZE= +NODESETUP=NODEBASIC +NSMSETUP=BASIC +NODEUPDATES=MANAGER +# OINKCODE= +OSQUERY=1 +# PATCHSCHEDULEDAYS= +# PATCHSCHEDULEHOURS= +PATCHSCHEDULENAME=auto +PLAYBOOK=1 +# REDIRECTHOST= +REDIRECTINFO=HOSTNAME +RULESETUP=ETOPEN +# SHARDCOUNT= +SKIP_REBOOT=0 +SOREMOTEPASS1=onionuser +SOREMOTEPASS2=onionuser +STRELKA=1 +THEHIVE=1 +WAZUH=1 +WEBUSER=onionuser@somewhere.invalid +WEBPASSWD1=0n10nus3r +WEBPASSWD2=0n10nus3r diff --git a/setup/so-functions b/setup/so-functions index 1f868ae98..be450bad3 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -21,13 +21,48 @@ source ./so-common-functions SOVERSION=$(cat ../VERSION) +log() { + msg=$1 + level=${2:-I} + now=$(TZ=GMT date +"%Y-%m-%dT%H:%M:%SZ") + echo -e "$now | $level | $msg" >> "$setup_log" 2>&1 +} + +error() { + log "$1" "E" +} + +info() { + log "$1" "I" +} + +title() { + echo -e "\n-----------------------------\n $1\n-----------------------------\n" >> "$setup_log" 2>&1 +} + +logCmd() { + cmd=$1 + info "Executing command: $cmd" + $cmd >> "$setup_log" 2>&1 +} + +analyze_system() { + title "System Characteristics" + logCmd "uptime" + logCmd "uname -a" + logCmd "free -h" + logCmd "lscpu" + logCmd "df -h" + logCmd "ip a" +} + accept_salt_key_remote() { systemctl restart salt-minion echo "Accept the key remotely on the manager" >> "$setup_log" 2>&1 # Delete the key just in case. ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -d "$MINION_ID" -y - salt-call state.apply ca + salt-call state.apply ca >> /dev/null 2>&1 ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -a "$MINION_ID" -y } @@ -142,65 +177,7 @@ secrets_pillar(){ fi } -# Enable Bro Logs -zeek_logs_enabled() { - echo "Enabling Bro Logs" >> "$setup_log" 2>&1 - local zeeklogs_pillar=./pillar/zeeklogs.sls - - printf '%s\n'\ - "zeeklogs:"\ - " enabled:" > "$zeeklogs_pillar" - - if [ "$MANAGERADV" = 'ADVANCED' ]; then - for BLOG in "${BLOGS[@]}"; do - echo " - $BLOG" | tr -d '"' >> "$zeeklogs_pillar" - done - else - printf '%s\n'\ - " - conn"\ - " - dce_rpc"\ - " - dhcp"\ - " - dhcpv6"\ - " - dnp3"\ - " - dns"\ - " - dpd"\ - " - files"\ - " - ftp"\ - " - http"\ - " - intel"\ - " - irc"\ - " - kerberos"\ - " - modbus"\ - " - mqtt"\ - " - notice"\ - " - ntlm"\ - " - openvpn"\ - " - pe"\ - " - radius"\ - " - rfb"\ - " - rdp"\ - " - signatures"\ - " - sip"\ - " - smb_files"\ - " - smb_mapping"\ - " - smtp"\ - " - snmp"\ - " - software"\ - " - ssh"\ - " - ssl"\ - " - syslog"\ - " - telnet"\ - " - tunnel"\ - " - weird"\ - " - mysql"\ - " - socks"\ - " - x509" >> "$zeeklogs_pillar" - fi - - printf '%s\n' '----' >> "$setup_log" 2>&1 - cat "$zeeklogs_pillar" >> "$setup_log" 2>&1 -} check_admin_pass() { check_pass_match "$ADMINPASS1" "$ADMINPASS2" "APMATCH" @@ -253,6 +230,51 @@ check_pass_match() { fi } +check_service_status() { + + local service_name=$1 + echo "Checking service $service_name status" >> "$setup_log" 2>&1 + systemctl status $service_name > /dev/null 2>&1 + local status=$? + #true if there is an issue with the service false if it is running properly + if [ $status -gt 0 ]; then + echo "$service_name is not running" >> "$setup_log" 2>&1 + echo 1; + else + echo "$service_name is running" >> "$setup_log" 2>&1 + echo 0; + fi + +} + +check_salt_master_status() { + echo "Checking if we can talk to the salt master" >> "$setup_log" 2>&1 + salt-call state.show_top > /dev/null 2>&1 + local status=$? + #true if there is an issue talking to salt master + if [ $status -gt 0 ]; then + echo 1; + else + echo "Can talk to salt master" >> "$setup_log" 2>&1 + echo 0; + fi + +} + +check_salt_minion_status() { + echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 + salt "$MINION_ID" test.ping >> "$setup_log" 2>&1 + local status=$? + #true if there is an issue getting a job response from the minion + if [ $status -gt 0 ]; then + echo 1; + else + echo "Received job response from salt minion" >> "$setup_log" 2>&1 + echo 0; + fi + +} + check_soremote_pass() { check_pass_match "$SOREMOTEPASS1" "$SOREMOTEPASS2" "SCMATCH" } @@ -360,7 +382,7 @@ configure_minion() { 'helix') echo "master: $HOSTNAME" >> "$minion_config" ;; - 'manager' | 'eval' | 'managersearch' | 'standalone') + 'manager' | 'eval' | 'managersearch' | 'standalone' | 'import') printf '%s\n'\ "master: $HOSTNAME"\ "mysql.host: '$MAINIP'"\ @@ -411,17 +433,40 @@ check_requirements() { req_mem=12 req_cores=4 req_nics=2 - req_storage=100 elif [[ "$standalone_or_dist" == 'dist' ]]; then req_mem=8 req_cores=4 - req_storage=40 if [[ "$node_type" == 'sensor' ]]; then req_nics=2; else req_nics=1; fi if [[ "$node_type" == 'fleet' ]]; then req_mem=4; fi + elif [[ "$standalone_or_dist" == 'import' ]]; then + req_mem=4 + req_cores=2 + req_nics=1 fi - if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then - whiptail_requirements_error "disk space" "${free_space_root} GB" "${req_storage} GB" + if [[ $setup_type == 'network' ]] ; then + if [[ -n $nsm_mount ]]; then + if [[ "$standalone_or_dist" == 'import' ]]; then + req_storage=50 + else + req_storage=100 + fi + if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then + whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB" + fi + if (( $(echo "$free_space_nsm < $req_storage" | bc -l) )); then + whiptail_storage_requirements "/nsm" "${free_space_nsm} GB" "${req_storage} GB" + fi + else + if [[ "$standalone_or_dist" == 'import' ]]; then + req_storage=50 + else + req_storage=200 + fi + if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then + whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB" + fi + fi fi if [[ $num_nics -lt $req_nics ]]; then @@ -452,7 +497,7 @@ copy_salt_master_config() { copy_minion_tmp_files() { case "$install_type" in - 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE') + 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') echo "Copying pillar and salt files in $temp_install_dir to $local_salt_dir" cp -Rv "$temp_install_dir"/pillar/ $local_salt_dir/ >> "$setup_log" 2>&1 if [ -d "$temp_install_dir"/salt ] ; then @@ -611,24 +656,27 @@ detect_os() { echo "Installing required packages to run installer..." >> "$setup_log" 2>&1 # Install bind-utils so the host command exists - if ! command -v host > /dev/null 2>&1; then + if [[ ! $is_iso ]]; then + if ! command -v host > /dev/null 2>&1; then yum -y install bind-utils >> "$setup_log" 2>&1 - fi - if ! command -v nmcli > /dev/null 2>&1; then + fi + if ! command -v nmcli > /dev/null 2>&1; then { yum -y install NetworkManager; systemctl enable NetworkManager; systemctl start NetworkManager; } >> "$setup_log" 2<&1 - fi - if ! command -v bc > /dev/null 2>&1; then + fi + if ! command -v bc > /dev/null 2>&1; then yum -y install bc >> "$setup_log" 2>&1 - fi - if ! yum versionlock > /dev/null 2>&1; then + fi + if ! yum versionlock > /dev/null 2>&1; then yum -y install yum-plugin-versionlock >> "$setup_log" 2>&1 - fi - - + fi + else + logCmd "systemctl enable NetworkManager" + logCmd "systemctl start NetworkManager" + fi elif [ -f /etc/os-release ]; then OS=ubuntu if grep -q "UBUNTU_CODENAME=bionic" /etc/os-release; then @@ -663,10 +711,11 @@ detect_os() { disable_auto_start() { # Remove the automated setup script from crontab, if it exists - crontab -u $INSTALLUSERNAME -r + logCmd "crontab -u $INSTALLUSERNAME -r" # Truncate last line of the bash profile - sed -i '$ d' /home/$INSTALLUSERNAME/.bash_profile + info "Removing auto-run of setup from bash profile" + sed -i '$ d' /home/$INSTALLUSERNAME/.bash_profile >> "$setup_log" 2>&1 } @@ -701,15 +750,19 @@ docker_install() { if [ $OS = 'centos' ]; then { yum clean expire-cache; - yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo; - yum -y install docker-ce-19.03.11-3.el7 containerd.io-1.2.13-3.2.el7; + if [[ ! $is_airgap ]]; then + yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo; + fi + if [[ ! $is_iso ]]; then + yum -y install docker-ce-19.03.11-3.el7 containerd.io-1.2.13-3.2.el7; + fi yum versionlock docker-ce-19.03.11-3.el7; yum versionlock containerd.io-1.2.13-3.2.el7 } >> "$setup_log" 2>&1 else case "$install_type" in - 'MANAGER' | 'EVAL') + 'MANAGER' | 'EVAL' | 'STANDALONE' | 'MANAGERSEARCH' | 'IMPORT') apt-get update >> "$setup_log" 2>&1 ;; *) @@ -753,6 +806,22 @@ docker_seed_registry() { local VERSION="$SOVERSION" if ! [ -f /nsm/docker-registry/docker/registry.tar ]; then + if [ "$install_type" == 'IMPORT' ]; then + local TRUSTED_CONTAINERS=(\ + "so-idstools:$VERSION" \ + "so-nginx:$VERSION" \ + "so-filebeat:$VERSION" \ + "so-suricata:$VERSION" \ + "so-soc:$VERSION" \ + "so-steno:$VERSION" \ + "so-elasticsearch:$VERSION" \ + "so-kibana:$VERSION" \ + "so-kratos:$VERSION" \ + "so-suricata:$VERSION" \ + "so-pcaptools:$VERSION" \ + "so-zeek:$VERSION" + ) + else local TRUSTED_CONTAINERS=(\ "so-nginx:$VERSION" \ "so-filebeat:$VERSION" \ @@ -764,7 +833,8 @@ docker_seed_registry() { "so-telegraf:$VERSION" \ "so-zeek:$VERSION" ) - if [ "$install_type" != 'HELIXSENSOR' ]; then + fi + if [ "$install_type" != 'HELIXSENSOR' ] && [ "$install_type" != 'IMPORT' ]; then TRUSTED_CONTAINERS=("${TRUSTED_CONTAINERS[@]}" \ "so-acng:$VERSION" \ "so-thehive-cortex:$VERSION" \ @@ -778,6 +848,7 @@ docker_seed_registry() { "so-grafana:$VERSION" \ "so-influxdb:$VERSION" \ "so-kibana:$VERSION" \ + "so-minio:$VERSION" \ "so-mysql:$VERSION" \ "so-pcaptools:$VERSION" \ "so-playbook:$VERSION" \ @@ -863,9 +934,13 @@ generate_passwords(){ PLAYBOOKPASS=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) FLEETPASS=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) FLEETJWT=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) - HIVEKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) - CORTEXKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) - CORTEXORGUSERKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + if [[ "$THEHIVE" == "1" ]]; then + HIVEKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + HIVEPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + CORTEXKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + CORTEXORGUSERKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + CORTEXPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + fi SENSORONIKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) KRATOSKEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) } @@ -889,7 +964,7 @@ got_root() { get_minion_type() { local minion_type case "$install_type" in - 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'FLEET' | 'STANDALONE') + 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'FLEET' | 'STANDALONE' | 'IMPORT') minion_type=$(echo "$install_type" | tr '[:upper:]' '[:lower:]') ;; 'HELIXSENSOR') @@ -922,6 +997,15 @@ install_cleanup() { } +import_registry_docker() { + if [ -f /nsm/docker-registry/docker/registry_image.tar ]; then + logCmd "service docker start" + logCmd "docker load -i /nsm/docker-registry/docker/registry_image.tar" + else + info "Need to download registry" + fi +} + manager_pillar() { local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls @@ -953,7 +1037,6 @@ manager_pillar() { " osquery: $OSQUERY"\ " thehive: $THEHIVE"\ " playbook: $PLAYBOOK"\ - " url_base: $REDIRECTIT"\ ""\ "elasticsearch:"\ " mainip: $MAINIP"\ @@ -993,35 +1076,46 @@ manager_pillar() { cat "$pillar_file" >> "$setup_log" 2>&1 } -manager_static() { - local static_pillar="$local_salt_dir/pillar/static.sls" +manager_global() { + local global_pillar="$local_salt_dir/pillar/global.sls" if [ -z "$SENSOR_CHECKIN_INTERVAL_MS" ]; then SENSOR_CHECKIN_INTERVAL_MS=10000 - if [ "$install_type" = 'EVAL' ] || [ "$install_type" = 'STANDALONE' ]; then + if [ "$install_type" = 'EVAL' ] || [ "$install_type" = 'STANDALONE' ] || [ "$install_type" = 'IMPORT' ]; then SENSOR_CHECKIN_INTERVAL_MS=1000 fi fi - # Create a static file for global values + # Create a global file for global values + printf '%s\n'\ + "global:"\ + " soversion: $SOVERSION"\ + " hnmanager: $HNMANAGER"\ + " ntpserver: $NTPSERVER"\ + " proxy: $PROXY"\ + " zeekversion: $ZEEKVERSION"\ + " ids: $NIDS"\ + " url_base: $REDIRECTIT"\ + " managerip: $MAINIP" > "$global_pillar" + + # Check if TheHive is enabled. If so, add creds and other details + if [[ "$THEHIVE" == "1" ]]; then + printf '%s\n'\ + " hiveuser: $WEBUSER"\ + " hivepassword: '$WEBPASSWD1'"\ + " hivekey: $HIVEKEY"\ + " hiveplaysecret: $HIVEPLAYSECRET"\ + " cortexuser: $WEBUSER"\ + " cortexpassword: '$WEBPASSWD1'"\ + " cortexkey: $CORTEXKEY"\ + " cortexorgname: SecurityOnion"\ + " cortexorguser: soadmin"\ + " cortexorguserkey: $CORTEXORGUSERKEY"\ + " cortexplaysecret: $CORTEXPLAYSECRET" >> "$global_pillar" + fi + + # Continue adding other details printf '%s\n'\ - "static:"\ - " soversion: $SOVERSION"\ - " hnmanager: $HNMANAGER"\ - " ntpserver: $NTPSERVER"\ - " proxy: $PROXY"\ - " zeekversion: $ZEEKVERSION"\ - " ids: $NIDS"\ - " managerip: $MAINIP"\ - " hiveuser: $WEBUSER"\ - " hivepassword: '$WEBPASSWD1'"\ - " hivekey: $HIVEKEY"\ - " cortexuser: $WEBUSER"\ - " cortexpassword: '$WEBPASSWD1'"\ - " cortexkey: $CORTEXKEY"\ - " cortexorgname: SecurityOnion"\ - " cortexorguser: $WEBUSER"\ - " cortexorguserkey: $CORTEXORGUSERKEY"\ " fleet_custom_hostname: "\ " fleet_manager: False"\ " fleet_node: False"\ @@ -1031,8 +1125,9 @@ manager_static() { " fleet_ip: N/A"\ " sensoronikey: $SENSORONIKEY"\ " wazuh: $WAZUH"\ - " managerupdate: $MANAGERUPDATES"\ + " managerupdate: $MANAGERUPDATES"\ " imagerepo: $IMAGEREPO"\ + " pipeline: redis"\ "pcap:"\ " sensor_checkin_interval_ms: $SENSOR_CHECKIN_INTERVAL_MS"\ "strelka:"\ @@ -1049,9 +1144,9 @@ manager_static() { " discovery_nodes: 1"\ " hot_warm_enabled: False"\ " cluster_routing_allocation_disk.threshold_enabled: true"\ - " cluster_routing_allocation_disk_watermark_low: 95%"\ - " cluster_routing_allocation_disk_watermark_high: 98%"\ - " cluster_routing_allocation_disk_watermark_flood_stage: 98%"\ + " cluster_routing_allocation_disk_watermark_low: 95%"\ + " cluster_routing_allocation_disk_watermark_high: 98%"\ + " cluster_routing_allocation_disk_watermark_flood_stage: 98%"\ " index_settings:"\ " so-beats:"\ " shards: 1"\ @@ -1102,18 +1197,26 @@ manager_static() { " shards: 5"\ " warm: 7"\ " close: 365"\ - " delete: 45" > "$static_pillar" - + " delete: 45"\ + "minio:"\ + " access_key: '$ACCESS_KEY'"\ + " access_secret: '$ACCESS_SECRET'"\ + "s3_settings:"\ + " size_file: 2048"\ + " time_file: 1"\ + " upload_queue_size: 4"\ + " encoding: gzip"\ + " interval: 5" >> "$global_pillar" + printf '%s\n' '----' >> "$setup_log" 2>&1 - cat "$static_pillar" >> "$setup_log" 2>&1 } minio_generate_keys() { local charSet="[:graph:]" - ACCESS_KEY=$(tr -cd "$charSet" < /dev/urandom | tr -d \' | tr -d \" | head -c 20) - ACCESS_SECRET=$(tr -cd "$charSet" < /dev/urandom | tr -d \' | tr -d \" | head -c 40) + ACCESS_KEY=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1) + ACCESS_SECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 40 | head -n 1) } @@ -1236,52 +1339,67 @@ reserve_group_ids() { groupadd -g 946 cyberchef } +# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and salt/salt/master.defaults.yaml and salt/salt/minion.defaults.yaml saltify() { # Install updates and Salt if [ $OS = 'centos' ]; then set_progress_str 5 'Installing Salt repo' { - sudo rpm --import https://repo.saltstack.com/py3/redhat/7/x86_64/archive/2019.2.5/SALTSTACK-GPG-KEY.pub; - cp ./yum_repos/salt-2019-2-5.repo /etc/yum.repos.d/salt-2019-2-5.repo; + sudo rpm --import https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001.1/SALTSTACK-GPG-KEY.pub; + cp ./yum_repos/saltstack.repo /etc/yum.repos.d/saltstack.repo; } >> "$setup_log" 2>&1 set_progress_str 6 'Installing various dependencies' - yum -y install wget nmap-ncat >> "$setup_log" 2>&1 + if [[ ! $is_iso ]]; then + logCmd "yum -y install wget nmap-ncat" + fi case "$install_type" in - 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE') + 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORT') reserve_group_ids >> "$setup_log" 2>&1 - yum -y install epel-release >> "$setup_log" 2>&1 - yum -y install sqlite argon2 curl mariadb-devel >> "$setup_log" 2>&1 + if [[ ! $is_iso ]]; then + logCmd "yum -y install epel-release" + logCmd "yum -y install sqlite argon2 curl mariadb-devel" + fi # Download Ubuntu Keys in case manager updates = 1 mkdir -p /opt/so/gpg >> "$setup_log" 2>&1 - wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 - wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1 - wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1 - cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo >> "$setup_log" 2>&1 + if [[ ! $is_airgap ]]; then + logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3001.1/SALTSTACK-GPG-KEY.pub" + logCmd "wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg" + logCmd "wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH" + logCmd "cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo" + fi set_progress_str 7 'Installing salt-master' - yum -y install salt-master-2019.2.5 >> "$setup_log" 2>&1 + if [[ ! $is_iso ]]; then + logCmd "yum -y install salt-master-3001.1" + fi systemctl enable salt-master >> "$setup_log" 2>&1 ;; *) if [ "$MANAGERUPDATES" = '1' ]; then { - # Create the GPG Public Key for the Salt Repo - cp ./public_keys/salt.pem /etc/pki/rpm-gpg/saltstack-signing-key; + if [[ ! $is_airgap ]]; then + # Create the GPG Public Key for the Salt Repo + cp ./public_keys/salt.pem /etc/pki/rpm-gpg/saltstack-signing-key; - # Copy repo files over - cp ./yum_repos/salt-latest.repo /etc/yum.repos.d/salt-latest.repo; - cp ./yum_repos/salt-2019-2-5.repo /etc/yum.repos.d/salt-2019-2-5.repo; + # Copy repo files over + cp ./yum_repos/saltstack.repo /etc/yum.repos.d/saltstack.repo; + else + info "This is airgap" + fi } >> "$setup_log" 2>&1 fi ;; esac - cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo >> "$setup_log" 2>&1 - yum clean expire-cache >> "$setup_log" 2>&1 + if [[ ! $is_airgap ]]; then + cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo >> "$setup_log" 2>&1 + yum clean expire-cache >> "$setup_log" 2>&1 + fi set_progress_str 8 'Installing salt-minion & python modules' { - yum -y install epel-release - yum -y install salt-minion-2019.2.5\ - python3\ + if [[ ! $is_iso ]]; then + yum -y install epel-release + yum -y install salt-minion-3001.1\ + python3\ python36-docker\ python36-dateutil\ python36-m2crypto\ @@ -1291,7 +1409,8 @@ saltify() { lvm2\ openssl\ jq; - yum -y update exclude=salt*; + yum -y update --exclude=salt*; + fi systemctl enable salt-minion; } >> "$setup_log" 2>&1 yum versionlock salt* @@ -1320,11 +1439,11 @@ saltify() { 'FLEET') if [ "$OSVER" != 'xenial' ]; then apt-get -y install python3-mysqldb >> "$setup_log" 2>&1; else apt-get -y install python-mysqldb >> "$setup_log" 2>&1; fi ;; - 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE') # TODO: should this also be HELIXSENSOR? + 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') # TODO: should this also be HELIXSENSOR? # Add saltstack repo(s) - wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/2019.2.5/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1 - echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/2019.2.5 $OSVER main" > /etc/apt/sources.list.d/saltstack2019.list 2>> "$setup_log" + wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3001.1/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1 + echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3001.1 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" # Add Docker repo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - >> "$setup_log" 2>&1 @@ -1332,7 +1451,7 @@ saltify() { # Get gpg keys mkdir -p /opt/so/gpg >> "$setup_log" 2>&1 - wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/latest/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 + wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/archive/3001.1/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1 wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1 @@ -1345,7 +1464,7 @@ saltify() { set_progress_str 6 'Installing various dependencies' apt-get -y install sqlite3 argon2 libssl-dev >> "$setup_log" 2>&1 set_progress_str 7 'Installing salt-master' - apt-get -y install salt-master=2019.2.5+ds-1 >> "$setup_log" 2>&1 + apt-get -y install salt-master=3001.1+ds-1 >> "$setup_log" 2>&1 apt-mark hold salt-master >> "$setup_log" 2>&1 ;; *) @@ -1356,14 +1475,14 @@ saltify() { echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1 apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1 - echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/2019.2.5/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" + echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3001.1/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log" ;; esac apt-get update >> "$setup_log" 2>&1 set_progress_str 8 'Installing salt-minion & python modules' - apt-get -y install salt-minion=2019.2.5+ds-1\ - salt-common=2019.2.5+ds-1 >> "$setup_log" 2>&1 + apt-get -y install salt-minion=3001.1+ds-1\ + salt-common=3001.1+ds-1 >> "$setup_log" 2>&1 apt-mark hold salt-minion salt-common >> "$setup_log" 2>&1 if [ "$OSVER" != 'xenial' ]; then apt-get -y install python3-dateutil python3-m2crypto python3-mysqldb >> "$setup_log" 2>&1 @@ -1377,22 +1496,80 @@ saltify() { salt_checkin() { case "$install_type" in - 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE') # Fix Mine usage + 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') # Fix Mine usage { echo "Building Certificate Authority"; salt-call state.apply ca; echo " *** Restarting Salt to fix any SSL errors. ***"; - systemctl restart salt-master; + + local SALT_SERVICES=(\ + "salt-master" \ + "salt-minion" + ) + local LOOP_COUNT=0 + for service in "${SALT_SERVICES[@]}"; do + echo "Stopping service $service" >> "$setup_log" 2>&1 + systemctl stop "$service" >> "$setup_log" 2>&1 + LOOP_COUNT=0 + while ! (( $(check_service_status $service) )); do + echo "$service still running" >> "$setup_log" 2>&1 + if [ $LOOP_COUNT -gt 60 ]; then + echo "$service could not be stopped in 60 seconds, exiting" >> "$setup_log" 2>&1 + exit 1 + fi + sleep 1; + ((LOOP_COUNT+=1)) + done + done + sleep 5; - systemctl restart salt-minion; - sleep 15; + + for service in "${SALT_SERVICES[@]}"; do + echo "Starting service $service" >> "$setup_log" 2>&1 + systemctl start "$service" >> "$setup_log" 2>&1 + LOOP_COUNT=0 + while (( $(check_service_status $service) )); do + echo "$service still not running" >> "$setup_log" 2>&1 + if [ $LOOP_COUNT -gt 60 ]; then + echo "$service could not be started in 60 seconds, exiting" >> "$setup_log" 2>&1 + exit 1 + fi + sleep 1; + ((LOOP_COUNT+=1)) + done + done + + sleep 5; + + LOOP_COUNT=0 + while (( $(check_salt_master_status) )); do + echo "salt minion cannot talk to salt master" >> "$setup_log" 2>&1 + if [ $LOOP_COUNT -gt 30 ]; then + echo "salt minion could not talk to salt master after 30 attempts, exiting" >> "$setup_log" 2>&1 + exit 1 + fi + sleep 1; + ((LOOP_COUNT+=1)) + done + + LOOP_COUNT=0 + while (( $(check_salt_minion_status) )); do + echo "salt master did not get a job response from salt minion" >> "$setup_log" 2>&1 + if [ $LOOP_COUNT -gt 30 ]; then + echo "salt master did not get a job response from salt minion after 30 attempts, exiting" >> "$setup_log" 2>&1 + exit 1 + fi + sleep 1; + ((LOOP_COUNT+=1)) + done + echo " Confirming existence of the CA certificate" cat /etc/pki/ca.crt echo " Applyng a mine hack"; - salt '*' mine.send x509.get_pem_entries glob_path=/etc/pki/ca.crt; - salt '*' mine.update; + salt "$MINION_ID" mine.send x509.get_pem_entries glob_path=/etc/pki/ca.crt; + salt "$MINION_ID" mine.update; echo " Confirming salt mine now contain the certificate"; - salt '*' mine.get '*' x509.get_pem_entries; + salt "$MINION_ID" mine.get '*' x509.get_pem_entries; echo " Applying SSL state"; salt-call state.apply ssl; } >> "$setup_log" 2>&1 @@ -1499,19 +1676,12 @@ sensor_pillar() { echo " suriprocs: $BASICSURI" >> "$pillar_file" fi printf '%s\n'\ - " zeekbpf:"\ - " pcapbpf:"\ - " nidsbpf:"\ " manager: $MSRV"\ " mtu: $MTU"\ " uniqueid: $(date '+%s')" >> "$pillar_file" if [ "$HNSENSOR" != 'inherit' ]; then echo " hnsensor: $HNSENSOR" >> "$pillar_file" fi - printf '%s\n'\ - " access_key: $ACCESS_KEY"\ - " access_secret: $ACCESS_SECRET"\ - "" >> "$pillar_file" printf '%s\n' '----' >> "$setup_log" 2>&1 cat "$pillar_file" >> "$setup_log" 2>&1 @@ -1548,7 +1718,7 @@ set_hostname() { set_hostname_iso - if [[ ! $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE)$ ]]; then + if [[ ! $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE|IMPORT)$ ]]; then if ! getent hosts "$MSRV"; then echo "$MSRVIP $MSRV" >> /etc/hosts fi @@ -1580,20 +1750,20 @@ set_initial_firewall_policy() { $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost minion "$MAINIP" $default_salt_dir/pillar/data/addtotab.sh managertab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" ;; - 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE') + 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') $default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP" $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP" $default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP" $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP" case "$install_type" in 'EVAL') - $default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE True + $default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE" True ;; 'MANAGERSEARCH') $default_salt_dir/pillar/data/addtotab.sh managersearchtab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" ;; 'STANDALONE') - $default_salt_dir/pillar/data/addtotab.sh standalonetab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE + $default_salt_dir/pillar/data/addtotab.sh standalonetab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE" ;; esac ;; @@ -1607,7 +1777,7 @@ set_initial_firewall_policy() { case "$install_type" in 'SENSOR') ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost sensor "$MAINIP" - ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE + ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE" ;; 'SEARCHNODE') ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP" @@ -1616,7 +1786,7 @@ set_initial_firewall_policy() { 'HEAVYNODE') ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP" ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP" - ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE + ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE" ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" ;; 'FLEET') @@ -1785,8 +1955,68 @@ es_heapsize() { fi export ES_HEAP_SIZE - if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE)$ ]]; then + if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE|IMPORT)$ ]]; then NODE_ES_HEAP_SIZE=ES_HEAP_SIZE export NODE_ES_HEAP_SIZE fi } + +# Enable Bro Logs +zeek_logs_enabled() { + echo "Enabling Bro Logs" >> "$setup_log" 2>&1 + + local zeeklogs_pillar=./pillar/zeeklogs.sls + + printf '%s\n'\ + "zeeklogs:"\ + " enabled:" > "$zeeklogs_pillar" + + if [ "$MANAGERADV" = 'ADVANCED' ]; then + for BLOG in "${BLOGS[@]}"; do + echo " - $BLOG" | tr -d '"' >> "$zeeklogs_pillar" + done + else + printf '%s\n'\ + " - conn"\ + " - dce_rpc"\ + " - dhcp"\ + " - dhcpv6"\ + " - dnp3"\ + " - dns"\ + " - dpd"\ + " - files"\ + " - ftp"\ + " - http"\ + " - intel"\ + " - irc"\ + " - kerberos"\ + " - modbus"\ + " - mqtt"\ + " - notice"\ + " - ntlm"\ + " - openvpn"\ + " - pe"\ + " - radius"\ + " - rfb"\ + " - rdp"\ + " - signatures"\ + " - sip"\ + " - smb_files"\ + " - smb_mapping"\ + " - smtp"\ + " - snmp"\ + " - software"\ + " - ssh"\ + " - ssl"\ + " - syslog"\ + " - telnet"\ + " - tunnel"\ + " - weird"\ + " - mysql"\ + " - socks"\ + " - x509" >> "$zeeklogs_pillar" + fi + + printf '%s\n' '----' >> "$setup_log" 2>&1 + cat "$zeeklogs_pillar" >> "$setup_log" 2>&1 +} diff --git a/setup/so-setup b/setup/so-setup index 6a432fc9d..e89ed38d4 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -48,9 +48,11 @@ done # Begin Installation pre-processing parse_install_username -echo "Installing as the $INSTALLUSERNAME user." >> $setup_log 2>&1 -echo "---- Starting setup at $(date -u) ----" >> $setup_log 2>&1 +title "Initializing Setup" +info "Installing as the $INSTALLUSERNAME user" + +analyze_system automated=no function progress() { @@ -76,9 +78,6 @@ if [[ -f automation/$automation && $(basename $automation) == $automation ]]; th source automation/$automation automated=yes - echo "Checking network configuration" >> $setup_log 2>&1 - ip a >> $setup_log 2>&1 - attempt=1 attempts=60 ip a | grep "$MNIC:" | grep "state UP" >> $setup_log 2>&1 @@ -118,8 +117,21 @@ if [ "$OS" == ubuntu ]; then update-alternatives --set newt-palette /etc/newt/palette.original >> $setup_log 2>&1 fi -if [ $automated == no ]; then - setterm -blank 0 >> $setup_log 2>&1 +# Kernel consoleblank is causing whiptail progress screen to appear to hang #1084 +# https://github.com/Security-Onion-Solutions/securityonion/issues/1084 +if [ "$automated" == no ]; then + TTY=$(tty) + echo "Setup is running on TTY $TTY" >> $setup_log 2>&1 + if echo $TTY | grep -q "/dev/tty"; then + CONSOLEBLANK=$(cat /sys/module/kernel/parameters/consoleblank) + echo "Kernel consoleblank value before: $CONSOLEBLANK" >> $setup_log 2>&1 + if [ $CONSOLEBLANK -gt 0 ]; then + echo "Running 'setterm -blank 0' for TTY $TTY" >> $setup_log 2>&1 + TERM=linux setterm -blank 0 >$TTY <$TTY + CONSOLEBLANK=$(cat /sys/module/kernel/parameters/consoleblank) + echo "Kernel consoleblank value after: $CONSOLEBLANK" >> $setup_log 2>&1 + fi + fi fi if [ "$setup_type" == 'iso' ] || (whiptail_you_sure); then @@ -136,6 +148,7 @@ if [ "$install_type" = 'EVAL' ]; then is_manager=true is_sensor=true is_eval=true + STRELKARULES=1 elif [ "$install_type" = 'STANDALONE' ]; then is_manager=true is_distmanager=true @@ -165,19 +178,41 @@ elif [ "$install_type" = 'FLEET' ]; then OSQUERY=1 elif [ "$install_type" = 'HELIXSENSOR' ]; then is_helix=true +elif [ "$install_type" = 'IMPORT' ]; then + is_import=true fi +# Say yes to the dress if its an ISO install +if [[ "$setup_type" == 'iso' ]]; then + is_iso=true +fi + +#Check if this is an airgap install + +#if [[ $is_manager ]]; then +# if [[ $is_iso ]]; then +# whiptail_airgap +# if [[ "$INTERWEBS" == 'AIRGAP' ]]; then +# is_airgap=true +# fi +# fi +#fi + if [[ $is_manager && $is_sensor ]]; then check_requirements "standalone" elif [[ $is_fleet_standalone ]]; then check_requirements "dist" "fleet" elif [[ $is_sensor && ! $is_eval ]]; then check_requirements "dist" "sensor" -elif [[ $is_distmanager || $is_minion ]]; then +elif [[ $is_distmanager || $is_minion ]] && [[ ! $is_import ]]; then check_requirements "dist" +elif [[ $is_import ]]; then + check_requirements "import" fi -whiptail_patch_schedule +if [[ ! $is_import ]]; then + whiptail_patch_schedule +fi case "$setup_type" in 'iso') @@ -239,17 +274,44 @@ if [[ $is_node ]]; then CURCLOSEDAYS=30 fi +if [[ $is_import ]]; then + PATCHSCHEDULENAME=auto + MTU=1500 + RULESETUP=ETOPEN + NSMSETUP=BASIC + HNSENSOR=inherit + MANAGERUPDATES=0 + MANAGERADV=BASIC + INTERFACE=bond0 + ZEEKVERSION=ZEEK + NIDS=Suricata + RULESETUP=ETOPEN + GRAFANA=0 + OSQUERY=0 + WAZUH=0 + THEHIVE=0 + PLAYBOOK=0 + STRELKA=0 + +fi + + # Start user prompts + + if [[ $is_helix || $is_sensor ]]; then whiptail_sensor_nics +fi + +if [[ $is_helix || $is_sensor || $is_import ]]; then calculate_useable_cores fi -if [[ $is_helix || $is_manager ]]; then +if [[ $is_helix || $is_manager || $is_import ]]; then whiptail_homenet_manager fi -if [[ $is_helix || $is_manager || $is_node ]]; then +if [[ $is_helix || $is_manager || $is_node || $is_import ]]; then set_base_heapsizes fi @@ -263,6 +325,10 @@ if [[ $is_manager && ! $is_eval ]]; then whiptail_oinkcode fi + if [[ $STRELKA == 1 ]]; then + STRELKARULES=1 + fi + if [ "$MANAGERADV" = 'ADVANCED' ] && [ "$ZEEKVERSION" != 'SURICATA' ]; then whiptail_manager_adv_service_zeeklogs fi @@ -271,9 +337,9 @@ fi if [[ $is_manager ]]; then whiptail_components_adv_warning whiptail_enable_components - if [[ $STRELKA == 1 ]]; then - whiptail_strelka_rules - fi +fi + +if [[ $is_manager || $is_import ]]; then collect_webuser_inputs get_redirect fi @@ -335,7 +401,7 @@ else FLEETNODEPASSWD1=$WEBPASSWD1 fi -if [[ $is_manager ]]; then whiptail_so_allow; fi +if [[ $is_manager || $is_import ]]; then whiptail_so_allow; fi whiptail_make_changes @@ -359,7 +425,7 @@ fi } >> $setup_log 2>&1 -if [[ $is_manager ]]; then +if [[ $is_manager || $is_import ]]; then { generate_passwords; secrets_pillar; @@ -378,9 +444,9 @@ fi host_pillar >> $setup_log 2>&1 -if [[ $is_minion ]]; then +if [[ $is_minion || $is_import ]]; then set_updates >> $setup_log 2>&1 - copy_ssh_key >> $setup_log 2>&1 + [ "$automated" == no ] && copy_ssh_key >> $setup_log 2>&1 fi # Begin install @@ -399,6 +465,9 @@ fi if [[ $is_sensor || $is_helix ]]; then set_progress_str 3 'Configuring sensor interface' configure_network_sensor >> $setup_log 2>&1 + fi + + if [[ $is_sensor || $is_helix || $is_import ]]; then set_progress_str 4 'Generating sensor pillar' sensor_pillar >> $setup_log 2>&1 fi @@ -415,7 +484,7 @@ fi set_progress_str 9 'Initializing Salt minion' configure_minion "$minion_type" >> $setup_log 2>&1 - if [[ $is_manager || $is_helix ]]; then + if [[ $is_manager || $is_helix || $is_import ]]; then set_progress_str 10 'Configuring Salt master' { create_local_directories; @@ -428,8 +497,9 @@ fi set_progress_str 11 'Updating sudoers file for soremote user' update_sudoers >> $setup_log 2>&1 - set_progress_str 12 'Generating manager static pillar' - manager_static >> $setup_log 2>&1 + set_progress_str 12 'Generating manager global pillar' + minio_generate_keys + manager_global >> $setup_log 2>&1 set_progress_str 13 'Generating manager pillar' manager_pillar >> $setup_log 2>&1 @@ -459,7 +529,7 @@ fi accept_salt_key_remote >> $setup_log 2>&1 fi - if [[ $is_manager ]]; then + if [[ $is_manager || $is_import ]]; then set_progress_str 20 'Accepting Salt key' salt-key -ya "$MINION_ID" >> $setup_log 2>&1 fi @@ -467,10 +537,15 @@ fi set_progress_str 21 'Copying minion pillars to manager' copy_minion_tmp_files >> $setup_log 2>&1 - set_progress_str 22 'Generating CA and checking in' + if [[ $is_minion ]]; then + set_progress_str 22 'Checking if the Salt Minion needs to be updated' + salt-call state.apply salt.minion -l info >> $setup_log 2>&1 + fi + + set_progress_str 23 'Generating CA and checking in' salt_checkin >> $setup_log 2>&1 - if [[ $is_manager || $is_helix ]]; then + if [[ $is_manager || $is_helix || $is_import ]]; then set_progress_str 25 'Configuring firewall' set_initial_firewall_policy >> $setup_log 2>&1 @@ -479,7 +554,7 @@ fi else set_progress_str 26 'Downloading containers from the internet' fi - + import_registry_docker >> $setup_log 2>&1 salt-call state.apply -l info registry >> $setup_log 2>&1 docker_seed_registry 2>> "$setup_log" # ~ 60% when finished @@ -508,15 +583,17 @@ fi set_progress_str 64 "$(print_salt_state_apply 'nginx')" salt-call state.apply -l info nginx >> $setup_log 2>&1 - if [[ $is_manager || $is_node ]]; then + if [[ $is_manager || $is_node || $is_import ]]; then set_progress_str 64 "$(print_salt_state_apply 'elasticsearch')" salt-call state.apply -l info elasticsearch >> $setup_log 2>&1 fi - if [[ $is_sensor ]]; then + if [[ $is_sensor || $is_import ]]; then set_progress_str 65 "$(print_salt_state_apply 'pcap')" salt-call state.apply -l info pcap >> $setup_log 2>&1 + fi + if [[ $is_sensor || $is_import ]]; then set_progress_str 66 "$(print_salt_state_apply 'suricata')" salt-call state.apply -l info suricata >> $setup_log 2>&1 @@ -529,13 +606,15 @@ fi salt-call state.apply -l info curator >> $setup_log 2>&1 fi - if [[ $is_manager ]]; then + if [[ $is_manager || $is_import ]]; then set_progress_str 69 "$(print_salt_state_apply 'soc')" salt-call state.apply -l info soc >> $setup_log 2>&1 set_progress_str 70 "$(print_salt_state_apply 'kibana')" salt-call state.apply -l info kibana >> $setup_log 2>&1 + fi + if [[ $is_manager ]]; then set_progress_str 71 "$(print_salt_state_apply 'elastalert')" salt-call state.apply -l info elastalert >> $setup_log 2>&1 @@ -566,7 +645,7 @@ fi if [[ $is_fleet_standalone && $FLEETCUSTOMHOSTNAME != '' ]]; then set_progress_str 77 "$(print_salt_state_apply 'fleet.event_update-custom-hostname')" - pillar_override="{\"static\":{\"fleet_custom_hostname\": \"$FLEETCUSTOMHOSTNAME\"}}" + pillar_override="{\"global\":{\"fleet_custom_hostname\": \"$FLEETCUSTOMHOSTNAME\"}}" salt-call state.apply -l info fleet.event_update-custom-hostname pillar="$pillar_override" >> $setup_log 2>&1 fi @@ -593,7 +672,7 @@ fi fi fi - if [[ $is_manager || $is_helix ]]; then + if [[ $is_manager || $is_helix || $is_import ]]; then set_progress_str 81 "$(print_salt_state_apply 'utility')" salt-call state.apply -l info utility >> $setup_log 2>&1 fi @@ -610,7 +689,7 @@ fi filter_unused_nics >> $setup_log 2>&1 network_setup >> $setup_log 2>&1 - if [[ $is_manager ]]; then + if [[ $is_manager || $is_import ]]; then set_progress_str 87 'Adding user to SOC' add_web_user >> $setup_log 2>&1 fi @@ -626,12 +705,15 @@ fi success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}') if [[ $success != 0 ]]; then SO_ERROR=1; fi +# Check entire setup log for errors or unexpected salt states and ensure cron jobs are not reporting errors to root's mailbox +if grep -q -E "ERROR|Result: False" $setup_log || [[ -s /var/spool/mail/root && "$setup_type" == "iso" ]]; then SO_ERROR=1; fi if [[ -n $SO_ERROR ]]; then echo "Errors detected during setup; skipping post-setup steps to allow for analysis of failures." >> $setup_log 2>&1 SKIP_REBOOT=1 whiptail_setup_failed else + echo "Successfully completed setup! Continuing with post-installation steps" >> $setup_log 2>&1 { export percentage=95 # set to last percentage used in previous subshell if [[ -n $ALLOW_ROLE && -n $ALLOW_CIDR ]]; then @@ -639,12 +721,14 @@ else IP=$ALLOW_CIDR so-allow -$ALLOW_ROLE >> $setup_log 2>&1 fi - set_progress_str 99 'Waiting for TheHive to start up' - if [[ $THEHIVE == 1 ]]; then check_hive_init >> $setup_log 2>&1; fi - + if [[ $THEHIVE == 1 ]]; then + set_progress_str 99 'Waiting for TheHive to start up' + check_hive_init >> $setup_log 2>&1 + fi } | whiptail_gauge_post_setup "Running post-installation steps..." whiptail_setup_complete + echo "Post-installation steps have completed." >> $setup_log 2>&1 fi install_cleanup >> $setup_log 2>&1 diff --git a/setup/so-variables b/setup/so-variables index 07f7aa71b..057c67ff2 100644 --- a/setup/so-variables +++ b/setup/so-variables @@ -28,9 +28,24 @@ mkdir -p /nsm filesystem_nsm=$(df /nsm | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }') export filesystem_nsm -free_space_root=$(df -Pk /nsm | sed 1d | grep -v used | awk '{ print $4 / 1048576 }' | awk '{ printf("%.0f", $1) }') +free_space_nsm=$(df -Pk /nsm | sed 1d | grep -v used | awk '{ print $4 / 1048576 }' | awk '{ printf("%.0f", $1) }') +export free_space_nsm + +free_space_root=$(df -Pk / | sed 1d | grep -v used | awk '{ print $4 / 1048576 }' | awk '{ printf("%.0f", $1) }') export free_space_root +readarray -t mountpoints <<< "$(lsblk -nlo MOUNTPOINT)" +readarray -t partitions <<< "$(lsblk -nlo NAME)" +index=0 +for item in "${mountpoints[@]}"; do + if [[ $item == '/' ]]; then export root_part="${partitions[$index]}" + elif [[ $item == '/nsm' ]]; then + export nsm_mount=1 + export nsm_part="${partitions[$index]}" + fi + ((index++)) +done + mkdir -p /root/installtmp/pillar/minions export temp_install_dir=/root/installtmp diff --git a/setup/so-whiptail b/setup/so-whiptail index f0f1fb7b5..7ed300939 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -18,6 +18,18 @@ source ./so-variables source ./so-common-functions +whiptail_airgap() { + + [ -n "$TESTING" ] && return + + INTERWEBS=$(whiptail --title "Security Onion Setup" --radiolist \ + "Choose your install conditions:" 20 75 4 \ + "STANDARD" "This manager has internet accesss" ON \ + "AIRGAP" "This manager does not have internet access" OFF 3>&1 1>&2 2>&3 ) + + local exitstatus=$? + whiptail_check_exitstatus $exitstatus +} whiptail_basic_zeek() { @@ -315,6 +327,32 @@ whiptail_requirements_error() { whiptail_check_exitstatus $exitstatus } +whiptail_storage_requirements() { + local mount=$1 + local current_val=$2 + local needed_val=$3 + + [ -n "$TESTING" ] && return + + read -r -d '' message <<- EOM + Free space on mount point '${mount}' is currently ${current_val}. + + You need ${needed_val} to meet minimum requirements. + + Visit https://docs.securityonion.net/en/2.1/hardware.html for more information. + + Press YES to continue anyway, or press NO to cancel. + EOM + + whiptail \ + --title "Security Onion Setup" \ + --yesno "$message" \ + 14 75 + + local exitstatus=$? + whiptail_check_exitstatus $exitstatus +} + whiptail_invalid_pass_warning() { [ -n "$TESTING" ] && return @@ -445,10 +483,11 @@ whiptail_install_type() { # What kind of install are we doing? install_type=$(whiptail --title "Security Onion Setup" --radiolist \ - "Choose install type:" 10 65 3 \ + "Choose install type:" 10 65 4 \ "EVAL" "Evaluation mode (not for production) " ON \ "STANDALONE" "Standalone production install " OFF \ "DISTRIBUTED" "Distributed install submenu " OFF \ + "IMPORT" "Standalone to import PCAP or log files " OFF \ 3>&1 1>&2 2>&3 ) @@ -1037,17 +1076,21 @@ whiptail_setup_complete() { [ -n "$TESTING" ] && return - if [[ -n $ALLOW_CIDR ]]; then - local sentence_prefix="Access" + if [[ -n "$REDIRECTIT" && $is_manager = true ]]; then + if [[ -n $ALLOW_CIDR ]]; then + local sentence_prefix="Access" + else + local sentence_prefix="Run so-allow after reboot to access" + fi + local accessMessage="\n${sentence_prefix} the web interface at: https://${REDIRECTIT}\n" else - local sentence_prefix="Run so-allow after reboot to access" + local accessMessage="" fi + read -r -d '' message <<- EOM Finished ${install_type} installation. - - ${sentence_prefix} the web interface at: https://${REDIRECTIT} - + $accessMessage Press ENTER to reboot. EOM @@ -1092,28 +1135,28 @@ whiptail_so_allow() { export ALLOW_ROLE='a' export ALLOW_CIDR fi - - whiptail_check_exitstatus $exitstatus } whiptail_gauge_post_setup() { - [ -n "$TESTING" ] && return - - local msg=$1 + if [ -n "$TESTING" ]; then + cat >> $setup_log 2>&1 + else + local msg=$1 - whiptail --title "Security Onion Setup" --gauge "$msg" 6 60 96 + whiptail --title "Security Onion Setup" --gauge "$msg" 6 60 96 + fi } whiptail_strelka_rules() { - [ -n "$TESTING" ] && return + [ -n "$TESTING" ] && return - whiptail --title "Security Onion Setup" --yesno "Do you want to enable the default YARA rules for Strelka?" 8 75 + whiptail --title "Security Onion Setup" --yesno "Do you want to enable the default YARA rules for Strelka?" 8 75 - local exitstatus=$? + local exitstatus=$? - if [[ $exitstatus == 0 ]]; then export STRELKARULES=1; fi + if [[ $exitstatus == 0 ]]; then export STRELKARULES=1; fi } whiptail_suricata_pins() { diff --git a/setup/yum_repos/salt-2019-2-5.repo b/setup/yum_repos/salt-2019-2-5.repo deleted file mode 100644 index e456fdd87..000000000 --- a/setup/yum_repos/salt-2019-2-5.repo +++ /dev/null @@ -1,6 +0,0 @@ -[saltstack-repo] -name=SaltStack repo for RHEL/CentOS $releasever PY3 -baseurl=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/2019.2.5/ -enabled=1 -gpgcheck=1 -gpgkey=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/2019.2.5/SALTSTACK-GPG-KEY.pub diff --git a/setup/yum_repos/salt-latest.repo b/setup/yum_repos/salt-latest.repo deleted file mode 100644 index 709053a9b..000000000 --- a/setup/yum_repos/salt-latest.repo +++ /dev/null @@ -1,7 +0,0 @@ -[salt-latest] -name=SaltStack Latest Release Channel for RHEL/Centos $releasever -baseurl=https://repo.saltstack.com/py3/redhat/7/$basearch/latest -failovermethod=priority -enabled=1 -gpgcheck=1 -gpgkey=https://repo.saltstack.com/py3/redhat/$releasever/$basearch/latest/SALTSTACK-GPG-KEY.pub \ No newline at end of file diff --git a/setup/yum_repos/saltstack.repo b/setup/yum_repos/saltstack.repo new file mode 100644 index 000000000..2e1b425fb --- /dev/null +++ b/setup/yum_repos/saltstack.repo @@ -0,0 +1,6 @@ +[saltstack] +name=SaltStack repo for RHEL/CentOS $releasever PY3 +baseurl=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001.1/ +enabled=1 +gpgcheck=1 +gpgkey=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001.1/SALTSTACK-GPG-KEY.pub \ No newline at end of file diff --git a/sigs/securityonion-2.1.0-rc2.iso.sig b/sigs/securityonion-2.1.0-rc2.iso.sig new file mode 100644 index 000000000..cc03c894d Binary files /dev/null and b/sigs/securityonion-2.1.0-rc2.iso.sig differ