diff --git a/salt/ca/init.sls b/salt/ca/init.sls index 485b01d3e..9c30b5c5e 100644 --- a/salt/ca/init.sls +++ b/salt/ca/init.sls @@ -24,8 +24,9 @@ pki_private_key: - x509: /etc/pki/ca.crt {%- endif %} -/etc/pki/ca.crt: +pki_public_ca_crt: x509.certificate_managed: + - name: /etc/pki/ca.crt - signing_private_key: /etc/pki/ca.key - CN: {{ manager }} - C: US @@ -66,4 +67,4 @@ cakeyperms: test.fail_without_changes: - name: {{sls}}_state_not_allowed -{% endif %} \ No newline at end of file +{% endif %} diff --git a/salt/domainstats/init.sls b/salt/domainstats/init.sls index 72ccf2f76..e2167d161 100644 --- a/salt/domainstats/init.sls +++ b/salt/domainstats/init.sls @@ -45,14 +45,15 @@ so-domainstatsimage: so-domainstats: docker_container.running: - - require: - - so-domainstatsimage - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-domainstats:{{ VERSION }} - hostname: domainstats - name: so-domainstats - user: domainstats - binds: - /opt/so/log/domainstats:/var/log/domain_stats + - require: + - file: dstatslogdir + - cmd: so-domainstatsimage append_so-domainstats_so-status.conf: file.append: @@ -65,4 +66,4 @@ append_so-domainstats_so-status.conf: test.fail_without_changes: - name: {{sls}}_state_not_allowed -{% endif %} \ No newline at end of file +{% endif %} diff --git a/salt/elastalert/init.sls b/salt/elastalert/init.sls index a5c3a3b67..f94e0c1d0 100644 --- a/salt/elastalert/init.sls +++ b/salt/elastalert/init.sls @@ -122,6 +122,10 @@ so-elastalert: - {{MANAGER_URL}}:{{MANAGER_IP}} - require: - cmd: wait_for_elasticsearch + - file: elastarules + - file: elastalogdir + - file: elastacustmodulesdir + - file: elastaconf - watch: - file: elastaconf diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 9f475c2c3..41a9f4504 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -15,7 +15,8 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls in allowed_states %} - +include: + - ssl {% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} @@ -280,6 +281,24 @@ so-elasticsearch: - file: esyml - file: esingestconf - file: so-elasticsearch-pipelines-file + - require: + - file: esyml + - file: eslog4jfile + - file: nsmesdir + - file: eslogdir + - file: cacertz + - x509: /etc/pki/elasticsearch.crt + - x509: /etc/pki/elasticsearch.key + - file: elasticp12perms + {% if ismanager %} + - x509: pki_public_ca_crt + {% else %} + - x509: trusttheca + {% endif %} + {% if salt['pillar.get']('elasticsearch:auth:enabled', False) %} + - cmd: auth_users_roles_inode + - cmd: auth_users_inode + {% endif %} append_so-elasticsearch_so-status.conf: file.append: diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls index f03d3dc1a..75beb66c9 100644 --- a/salt/filebeat/init.sls +++ b/salt/filebeat/init.sls @@ -25,9 +25,10 @@ {% from 'filebeat/map.jinja' import SO with context %} {% set ES_INCLUDED_NODES = ['so-eval', 'so-standalone', 'so-managersearch', 'so-node', 'so-heavynode', 'so-import'] %} +include: + - ssl #only include elastic state for certain nodes {% if grains.role in ES_INCLUDED_NODES %} -include: - elasticsearch {% endif %} @@ -66,7 +67,7 @@ fileregistrydir: - makedirs: True # This needs to be owned by root -filebeatconfsync: +filebeatconf: file.managed: - name: /opt/so/conf/filebeat/etc/filebeat.yml - source: salt://filebeat/etc/filebeat.yml @@ -78,7 +79,7 @@ filebeatconfsync: OUTPUT: {{ salt['pillar.get']('filebeat:config:output', {}) }} # Filebeat module config file -filebeatmoduleconfsync: +filebeatmoduleconf: file.managed: - name: /opt/so/conf/filebeat/etc/module-setup.yml - source: salt://filebeat/etc/module-setup.yml @@ -135,14 +136,21 @@ so-filebeat: {% endfor %} {% endfor %} - watch: - - file: /opt/so/conf/filebeat/etc/filebeat.yml + - file: filebeatconf + - require: + - file: filebeatconf + - file: filebeatmoduleconf + - file: filebeatmoduledir + - x509: conf_filebeat_crt + - x509: conf_filebeat_key + - x509: trusttheca {% if grains.role in ES_INCLUDED_NODES %} run_module_setup: cmd.run: - name: /usr/sbin/so-filebeat-module-setup - require: - - file: filebeatmoduleconfsync + - file: filebeatmoduleconf - docker_container: so-filebeat - onchanges: - docker_container: so-elasticsearch diff --git a/salt/fleet/init.sls b/salt/fleet/init.sls index 1bb4e73d6..2a0ac540b 100644 --- a/salt/fleet/init.sls +++ b/salt/fleet/init.sls @@ -17,6 +17,7 @@ include: + - ssl - mysql # Fleet Setup @@ -136,10 +137,13 @@ so-fleet: - /opt/so/conf/fleet/packs:/packs - watch: - /opt/so/conf/fleet/etc + - require: + - x509: fleet_key + - x509: fleet_crt append_so-fleet_so-status.conf: file.append: - name: /opt/so/conf/so-status/so-status.conf - text: so-fleet -{% endif %} \ No newline at end of file +{% endif %} diff --git a/salt/grafana/init.sls b/salt/grafana/init.sls index 42df29af4..9113cdbe1 100644 --- a/salt/grafana/init.sls +++ b/salt/grafana/init.sls @@ -132,6 +132,8 @@ so-grafana: - 0.0.0.0:3000:3000 - watch: - file: /opt/so/conf/grafana/* + - require: + - file: grafana-config append_so-grafana_so-status.conf: file.append: diff --git a/salt/influxdb/init.sls b/salt/influxdb/init.sls index f270c9f73..218d2d18e 100644 --- a/salt/influxdb/init.sls +++ b/salt/influxdb/init.sls @@ -17,6 +17,8 @@ include: - salt.minion - salt.python3-influxdb + - ssl + # Influx DB influxconfdir: file.directory: @@ -60,6 +62,10 @@ so-influxdb: - 0.0.0.0:8086:8086 - watch: - file: influxdbconf + - require: + - file: influxdbconf + - x509: influxdb_key + - x509: influxdb_crt append_so-influxdb_so-status.conf: file.append: diff --git a/salt/learn/logscan.sls b/salt/learn/logscan.sls index cc8bb2996..91f64420a 100644 --- a/salt/learn/logscan.sls +++ b/salt/learn/logscan.sls @@ -51,6 +51,8 @@ so-logscan: - /opt/so/log/logscan:/logscan/output:rw - /opt/so/log:/logscan/logs:ro - cpu_period: {{ logscan_cpu_period }} + - require: + - file: logscan_conf {% else %} - force: true {% endif %} diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index cd6a8918c..069b2f7bd 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -15,36 +15,37 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls in allowed_states %} -{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} -{% set MANAGER = salt['grains.get']('master') %} -{% set MANAGERIP = salt['pillar.get']('global:managerip') %} + {% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} + {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} + {% set MANAGER = salt['grains.get']('master') %} + {% set MANAGERIP = salt['pillar.get']('global:managerip') %} -# Logstash Section - Decide which pillar to use -{% set lsheap = salt['pillar.get']('logstash_settings:lsheap', '') %} -{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %} - {% set freq = salt['pillar.get']('manager:freq', '0') %} - {% set dstats = salt['pillar.get']('manager:domainstats', '0') %} - {% set nodetype = salt['grains.get']('role', '') %} -{% elif grains['role'] == 'so-helix' %} - {% set freq = salt['pillar.get']('manager:freq', '0') %} - {% set dstats = salt['pillar.get']('manager:domainstats', '0') %} - {% set nodetype = salt['grains.get']('role', '') %} -{% endif %} + # Logstash Section - Decide which pillar to use + {% set lsheap = salt['pillar.get']('logstash_settings:lsheap', '') %} + {% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %} + {% set freq = salt['pillar.get']('manager:freq', '0') %} + {% set dstats = salt['pillar.get']('manager:domainstats', '0') %} + {% set nodetype = salt['grains.get']('role', '') %} + {% elif grains['role'] == 'so-helix' %} + {% set freq = salt['pillar.get']('manager:freq', '0') %} + {% set dstats = salt['pillar.get']('manager:domainstats', '0') %} + {% set nodetype = salt['grains.get']('role', '') %} + {% endif %} -{% set PIPELINES = salt['pillar.get']('logstash:pipelines', {}) %} -{% set DOCKER_OPTIONS = salt['pillar.get']('logstash:docker_options', {}) %} -{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %} + {% set PIPELINES = salt['pillar.get']('logstash:pipelines', {}) %} + {% set DOCKER_OPTIONS = salt['pillar.get']('logstash:docker_options', {}) %} + {% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %} -{% if grains.role in ['so-heavynode'] %} - {% set EXTRAHOSTHOSTNAME = salt['grains.get']('host') %} - {% set EXTRAHOSTIP = salt['pillar.get']('sensor:mainip') %} -{% else %} - {% set EXTRAHOSTHOSTNAME = MANAGER %} - {% set EXTRAHOSTIP = MANAGERIP %} -{% endif %} + {% if grains.role in ['so-heavynode'] %} + {% set EXTRAHOSTHOSTNAME = salt['grains.get']('host') %} + {% set EXTRAHOSTIP = salt['pillar.get']('sensor:mainip') %} + {% else %} + {% set EXTRAHOSTHOSTNAME = MANAGER %} + {% set EXTRAHOSTIP = MANAGERIP %} + {% endif %} include: + - ssl - elasticsearch # Create the logstash group @@ -73,22 +74,22 @@ lspipelinedir: - user: 931 - group: 939 -{% for PL in PIPELINES %} - {% for CONFIGFILE in PIPELINES[PL].config %} + {% for PL in PIPELINES %} + {% for CONFIGFILE in PIPELINES[PL].config %} ls_pipeline_{{PL}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }}: file.managed: - source: salt://logstash/pipelines/config/{{CONFIGFILE}} - {% if 'jinja' in CONFIGFILE.split('.')[-1] %} + {% if 'jinja' in CONFIGFILE.split('.')[-1] %} - name: /opt/so/conf/logstash/pipelines/{{PL}}/{{CONFIGFILE.split('/')[1] | replace(".jinja", "")}} - template: jinja - {% else %} + {% else %} - name: /opt/so/conf/logstash/pipelines/{{PL}}/{{CONFIGFILE.split('/')[1]}} - {% endif %} + {% endif %} - user: 931 - group: 939 - mode: 660 - makedirs: True - {% endfor %} + {% endfor %} ls_pipeline_{{PL}}: file.directory: @@ -96,12 +97,12 @@ ls_pipeline_{{PL}}: - user: 931 - group: 939 - require: - {% for CONFIGFILE in PIPELINES[PL].config %} + {% for CONFIGFILE in PIPELINES[PL].config %} - file: ls_pipeline_{{PL}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }} - {% endfor %} + {% endfor %} - clean: True -{% endfor %} + {% endfor %} lspipelinesyml: file.managed: @@ -157,50 +158,60 @@ so-logstash: - environment: - LS_JAVA_OPTS=-Xms{{ lsheap }} -Xmx{{ lsheap }} - port_bindings: -{% for BINDING in DOCKER_OPTIONS.port_bindings %} + {% for BINDING in DOCKER_OPTIONS.port_bindings %} - {{ BINDING }} -{% endfor %} + {% endfor %} - binds: - /opt/so/conf/elasticsearch/templates/:/templates/:ro - - /opt/so/conf/logstash/etc/log4j2.properties:/usr/share/logstash/config/log4j2.properties:ro - - /opt/so/conf/logstash/etc/logstash.yml:/usr/share/logstash/config/logstash.yml:ro - - /opt/so/conf/logstash/etc/pipelines.yml:/usr/share/logstash/config/pipelines.yml + - /opt/so/conf/logstash/etc/:/usr/share/logstash/config/:ro - /opt/so/conf/logstash/pipelines:/usr/share/logstash/pipelines:ro - /opt/so/rules:/etc/nsm/rules:ro - /nsm/import:/nsm/import:ro - /nsm/logstash:/usr/share/logstash/data:rw - /opt/so/log/logstash:/var/log/logstash:rw - /sys/fs/cgroup:/sys/fs/cgroup:ro + {% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode'] %} - /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro - /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro + {% endif %} - /opt/so/conf/logstash/etc/certs:/usr/share/logstash/certs:ro - {% if grains['role'] == 'so-heavynode' %} + {% if grains['role'] == 'so-heavynode' %} - /etc/ssl/certs/intca.crt:/usr/share/filebeat/ca.crt:ro - {% else %} + {% else %} - /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro - {% endif %} + {% endif %} - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro - - /etc/pki/ca.cer:/ca/ca.crt:ro - {%- if grains['role'] == 'so-eval' %} + {%- if grains['role'] == 'so-eval' %} - /nsm/zeek:/nsm/zeek:ro - /nsm/suricata:/suricata:ro - /nsm/wazuh/logs/alerts:/wazuh/alerts:ro - /nsm/wazuh/logs/archives:/wazuh/archives:ro - /opt/so/log/fleet/:/osquery/logs:ro - /opt/so/log/strelka:/strelka:ro - {%- endif %} + {%- endif %} - watch: - file: lsetcsync -{% for PL in PIPELINES %} + {% for PL in PIPELINES %} - file: ls_pipeline_{{PL}} - {% for CONFIGFILE in PIPELINES[PL].config %} + {% for CONFIGFILE in PIPELINES[PL].config %} - file: ls_pipeline_{{PL}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }} + {% endfor %} {% endfor %} -{% endfor %} -{% for TEMPLATE in TEMPLATES %} + {% for TEMPLATE in TEMPLATES %} - file: es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }} -{% endfor %} + {% endfor %} + - require: + {% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode'] %} + - x509: etc_filebeat_crt + {% endif %} + {% if grains['role'] == 'so-heavynode' %} + - x509: trusttheca + {% else %} + - x509: pki_public_ca_crt + {% endif %} + - file: cacertz + - file: capemz append_so-logstash_so-status.conf: file.append: diff --git a/salt/manager/init.sls b/salt/manager/init.sls index 1d6577e5f..4a3769e1e 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -60,8 +60,7 @@ aptcacherlogdir: - group: 939 - makedirs: true -# Copy the config -acngcopyconf: +acngconf: file.managed: - name: /opt/so/conf/aptcacher-ng/etc/acng.conf - source: salt://manager/files/acng/acng.conf @@ -80,6 +79,8 @@ so-aptcacherng: - /opt/so/conf/aptcacher-ng/cache:/var/cache/apt-cacher-ng:rw - /opt/so/log/aptcacher-ng:/var/log/apt-cacher-ng:rw - /opt/so/conf/aptcacher-ng/etc/acng.conf:/etc/apt-cacher-ng/acng.conf:ro + - require: + - file: acngconf append_so-aptcacherng_so-status.conf: file.append: diff --git a/salt/minio/init.sls b/salt/minio/init.sls index f61209c8b..8e4d548f6 100644 --- a/salt/minio/init.sls +++ b/salt/minio/init.sls @@ -21,6 +21,9 @@ {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} +include: + - ssl + # Minio Setup minioconfdir: file.directory: @@ -59,6 +62,9 @@ so-minio: - /etc/pki/minio.key:/.minio/certs/private.key:ro - /etc/pki/minio.crt:/.minio/certs/public.crt:ro - entrypoint: "/usr/bin/docker-entrypoint.sh server --certs-dir /.minio/certs --address :9595 /data" + - require: + - file: minio_key + - file: minio_crt append_so-minio_so-status.conf: file.append: @@ -71,4 +77,4 @@ append_so-minio_so-status.conf: test.fail_without_changes: - name: {{sls}}_state_not_allowed -{% endif %} \ No newline at end of file +{% endif %} diff --git a/salt/mysql/etc/mypass b/salt/mysql/etc/mypass index f5f781c10..b38bf75ec 100644 --- a/salt/mysql/etc/mypass +++ b/salt/mysql/etc/mypass @@ -1,2 +1 @@ -{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%} {{ MYSQLPASS }} diff --git a/salt/mysql/init.sls b/salt/mysql/init.sls index 46e62fcc0..cb9586984 100644 --- a/salt/mysql/init.sls +++ b/salt/mysql/init.sls @@ -45,13 +45,22 @@ mysqlpiddir: - group: 939 - makedirs: True -mysqletcsync: - file.recurse: - - name: /opt/so/conf/mysql/etc - - source: salt://mysql/etc +mysqlcnf: + file.managed: + - name: /opt/so/conf/mysql/etc/my.cnf + - source: salt://mysql/etc/my.cnf + - user: 939 + - group: 939 + +mysqlpass: + file.managed: + - name: /opt/so/conf/mysql/etc/mypass + - source: salt://mysql/etc/mypass - user: 939 - group: 939 - template: jinja + - defaults: + MYSQLPASS: {{ MYSQLPASS }} mysqllogdir: file.directory: @@ -94,6 +103,9 @@ so-mysql: - /opt/so/log/mysql:/var/log/mysql:rw - watch: - /opt/so/conf/mysql/etc + - require: + - file: mysqlcnf + - file: mysqlpass cmd.run: - name: until nc -z {{ MAINIP }} 3306; do sleep 1; done - timeout: 600 @@ -118,4 +130,4 @@ append_so-mysql_so-status.conf: test.fail_without_changes: - name: {{sls}}_state_not_allowed -{% endif %} \ No newline at end of file +{% endif %} diff --git a/salt/nginx/init.sls b/salt/nginx/init.sls index 15c1acc8e..c732db636 100644 --- a/salt/nginx/init.sls +++ b/salt/nginx/init.sls @@ -8,6 +8,9 @@ {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set ISAIRGAP = salt['pillar.get']('global:airgap') %} +include: + - ssl + # Drop the correct nginx config based on role nginxconfdir: file.directory: @@ -73,28 +76,38 @@ so-nginx: - /opt/so/log/nginx/:/var/log/nginx:rw - /opt/so/tmp/nginx/:/var/lib/nginx:rw - /opt/so/tmp/nginx/:/run:rw + - /opt/so/conf/fleet/packages:/opt/socore/html/packages + {% if grains.role in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone', 'so-import'] %} - /etc/pki/managerssl.crt:/etc/pki/nginx/server.crt:ro - /etc/pki/managerssl.key:/etc/pki/nginx/server.key:ro - - /opt/so/conf/fleet/packages:/opt/socore/html/packages - {% if ISAIRGAP is sameas true %} - - /nsm/repo:/opt/socore/html/repo:ro - {% endif %} # ATT&CK Navigator binds - /opt/so/conf/navigator/navigator_config.json:/opt/socore/html/navigator/assets/config.json:ro - /opt/so/conf/navigator/nav_layer_playbook.json:/opt/socore/html/navigator/assets/playbook.json:ro + {% endif %} + {% if ISAIRGAP is sameas true %} + - /nsm/repo:/opt/socore/html/repo:ro + {% endif %} - cap_add: NET_BIND_SERVICE - port_bindings: - 80:80 - 443:443 - {% if ISAIRGAP is sameas true %} + {% if ISAIRGAP is sameas true %} - 7788:7788 - {% endif %} - {%- if FLEETMANAGER or FLEETNODE %} + {% endif %} + {%- if FLEETMANAGER or FLEETNODE %} - 8090:8090 - {%- endif %} + {%- endif %} - watch: - file: nginxconf - file: nginxconfdir + - require: + - file: nginxconf + {% if grains.role in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone', 'so-import'] %} + - x509: managerssl_key + - x509: managerssl_crt + - file: navigatorconfig + - file: navigatordefaultlayer + {% endif %} append_so-nginx_so-status.conf: file.append: @@ -107,4 +120,4 @@ append_so-nginx_so-status.conf: test.fail_without_changes: - name: {{sls}}_state_not_allowed -{% endif %} \ No newline at end of file +{% endif %} diff --git a/salt/pcap/init.sls b/salt/pcap/init.sls index 44e7323ad..7b56fbd1f 100644 --- a/salt/pcap/init.sls +++ b/salt/pcap/init.sls @@ -125,7 +125,9 @@ so-steno: - /nsm/pcaptmp:/tmp:rw - /opt/so/log/stenographer:/var/log/stenographer:rw - watch: - - file: /opt/so/conf/steno/config + - file: stenoconf + - require: + - file: stenoconf {% else %} {# if stenographer isn't enabled, then stop and remove the container #} - force: True {% endif %} diff --git a/salt/redis/init.sls b/salt/redis/init.sls index a99df219f..d52c49d5b 100644 --- a/salt/redis/init.sls +++ b/salt/redis/init.sls @@ -19,6 +19,9 @@ {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} +include: + - ssl + # Redis Setup redisconfdir: file.directory: @@ -41,10 +44,10 @@ redislogdir: - group: 939 - makedirs: True -redisconfsync: - file.recurse: - - name: /opt/so/conf/redis/etc - - source: salt://redis/etc +redisconf: + file.managed: + - name: /opt/so/conf/redis/etc/redis.conf + - source: salt://redis/etc/redis.conf - user: 939 - group: 939 - template: jinja @@ -67,6 +70,11 @@ so-redis: - entrypoint: "redis-server /usr/local/etc/redis/redis.conf" - watch: - file: /opt/so/conf/redis/etc + - require: + - file: redisconf + - x509: redis_crt + - x509: redis_key + - x509: pki_public_ca_crt append_so-redis_so-status.conf: file.append: @@ -79,4 +87,4 @@ append_so-redis_so-status.conf: test.fail_without_changes: - name: {{sls}}_state_not_allowed -{% endif %} \ No newline at end of file +{% endif %} diff --git a/salt/registry/init.sls b/salt/registry/init.sls index eb0c2df0c..76ccbf070 100644 --- a/salt/registry/init.sls +++ b/salt/registry/init.sls @@ -1,6 +1,9 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls in allowed_states %} +include: + - ssl + # Create the config directory for the docker registry dockerregistryconfdir: file.directory: @@ -47,6 +50,10 @@ so-dockerregistry: - retry: attempts: 5 interval: 30 + - require: + - file: dockerregistryconf + - x509: registry_crt + - x509: registry_key append_so-dockerregistry_so-status.conf: file.append: diff --git a/salt/sensoroni/init.sls b/salt/sensoroni/init.sls index a55049c06..1405c72bf 100644 --- a/salt/sensoroni/init.sls +++ b/salt/sensoroni/init.sls @@ -38,8 +38,10 @@ so-sensoroni: - /opt/so/log/sensoroni:/opt/sensoroni/logs:rw - watch: - file: /opt/so/conf/sensoroni/sensoroni.json + - require: + - file: sensoroniagentconf append_so-sensoroni_so-status.conf: file.append: - name: /opt/so/conf/so-status/so-status.conf - - text: so-sensoroni \ No newline at end of file + - text: so-sensoroni diff --git a/salt/soctopus/init.sls b/salt/soctopus/init.sls index 724e5a617..576cc573d 100644 --- a/salt/soctopus/init.sls +++ b/salt/soctopus/init.sls @@ -8,6 +8,9 @@ {% set MANAGER_IP = salt['pillar.get']('global:managerip', '') %} {% set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %} +include: + - nginx + soctopusdir: file.directory: - name: /opt/so/conf/soctopus/sigma-import @@ -71,6 +74,9 @@ so-soctopus: - 0.0.0.0:7000:7000 - extra_hosts: - {{MANAGER_URL}}:{{MANAGER_IP}} + - require: + - file: soctopusconf + - file: navigatordefaultlayer append_so-soctopus_so-status.conf: file.append: @@ -83,4 +89,4 @@ append_so-soctopus_so-status.conf: test.fail_without_changes: - name: {{sls}}_state_not_allowed -{% endif %} \ No newline at end of file +{% endif %} diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 9d4026b56..a9aa66703 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -30,6 +30,9 @@ {% set ca_server = global_ca_server[0] %} {% endif %} +include: + - ca + # Trust the CA trusttheca: x509.pem_managed: @@ -64,8 +67,9 @@ removeesp12dir: - name: /etc/pki/elasticsearch.p12 - onlyif: "[ -d /etc/pki/elasticsearch.p12 ]" -/etc/pki/influxdb.key: +influxdb_key: x509.private_key_managed: + - name: /etc/pki/influxdb.key - CN: {{ manager }} - bits: 4096 - days_remaining: 0 @@ -82,8 +86,9 @@ removeesp12dir: interval: 30 # Create a cert for the talking to influxdb -/etc/pki/influxdb.crt: +influxdb_crt: x509.certificate_managed: + - name: /etc/pki/influxdb.crt - ca_server: {{ ca_server }} - signing_policy: influxdb - public_key: /etc/pki/influxdb.key @@ -112,8 +117,9 @@ influxkeyperms: {% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-fleet'] %} # Create a cert for Redis encryption -/etc/pki/redis.key: +redis_key: x509.private_key_managed: + - name: /etc/pki/redis.key - CN: {{ COMMONNAME }} - bits: 4096 - days_remaining: 0 @@ -129,8 +135,9 @@ influxkeyperms: attempts: 5 interval: 30 -/etc/pki/redis.crt: +redis_crt: x509.certificate_managed: + - name: /etc/pki/redis.crt - ca_server: {{ ca_server }} - signing_policy: registry - public_key: /etc/pki/redis.key @@ -158,8 +165,9 @@ rediskeyperms: {% endif %} {% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode'] %} -/etc/pki/filebeat.key: +etc_filebeat_key: x509.private_key_managed: + - name: /etc/pki/filebeat.key - CN: {{ COMMONNAME }} - bits: 4096 - days_remaining: 0 @@ -168,7 +176,7 @@ rediskeyperms: - new: True {% if salt['file.file_exists']('/etc/pki/filebeat.key') -%} - prereq: - - x509: /etc/pki/filebeat.crt + - x509: etc_filebeat_crt {%- endif %} - timeout: 30 - retry: @@ -176,8 +184,9 @@ rediskeyperms: interval: 30 # Request a cert and drop it where it needs to go to be distributed -/etc/pki/filebeat.crt: +etc_filebeat_crt: x509.certificate_managed: + - name: /etc/pki/filebeat.crt - ca_server: {{ ca_server }} - signing_policy: filebeat - public_key: /etc/pki/filebeat.key @@ -198,7 +207,7 @@ rediskeyperms: cmd.run: - name: "/usr/bin/openssl pkcs8 -in /etc/pki/filebeat.key -topk8 -out /etc/pki/filebeat.p8 -nocrypt" - onchanges: - - x509: /etc/pki/filebeat.key + - x509: etc_filebeat_key fbperms: @@ -237,8 +246,9 @@ fbcrtlink: - user: socore - group: socore -/etc/pki/registry.key: +registry_key: x509.private_key_managed: + - name: /etc/pki/registry.key - CN: {{ manager }} - bits: 4096 - days_remaining: 0 @@ -255,8 +265,9 @@ fbcrtlink: interval: 30 # Create a cert for the docker registry -/etc/pki/registry.crt: +registry_crt: x509.certificate_managed: + - name: /etc/pki/registry.crt - ca_server: {{ ca_server }} - signing_policy: registry - public_key: /etc/pki/registry.key @@ -280,8 +291,9 @@ regkeyperms: - mode: 640 - group: 939 -/etc/pki/minio.key: +minio_key: x509.private_key_managed: + - name: /etc/pki/minio.key - CN: {{ manager }} - bits: 4096 - days_remaining: 0 @@ -298,8 +310,9 @@ regkeyperms: interval: 30 # Create a cert for minio -/etc/pki/minio.crt: +minio_crt: x509.certificate_managed: + - name: /etc/pki/minio.crt - ca_server: {{ ca_server }} - signing_policy: registry - public_key: /etc/pki/minio.key @@ -379,8 +392,9 @@ elasticp12perms: - mode: 640 - group: 930 -/etc/pki/managerssl.key: +managerssl_key: x509.private_key_managed: + - name: /etc/pki/managerssl.key - CN: {{ manager }} - bits: 4096 - days_remaining: 0 @@ -397,8 +411,9 @@ elasticp12perms: interval: 30 # Create a cert for the reverse proxy -/etc/pki/managerssl.crt: +managerssl_crt: x509.certificate_managed: + - name: /etc/pki/managerssl.crt - ca_server: {{ ca_server }} - signing_policy: managerssl - public_key: /etc/pki/managerssl.key @@ -424,8 +439,9 @@ msslkeyperms: - group: 939 # Create a private key and cert for OSQuery -/etc/pki/fleet.key: +fleet_key: x509.private_key_managed: + - name: /etc/pki/fleet.key - CN: {{ manager }} - bits: 4096 - days_remaining: 0 @@ -441,8 +457,9 @@ msslkeyperms: attempts: 5 interval: 30 -/etc/pki/fleet.crt: +fleet_crt: x509.certificate_managed: + - name: /etc/pki/fleet.crt - signing_private_key: /etc/pki/fleet.key - CN: {{ manager }} - subjectAltName: DNS:{{ manager }},IP:{{ managerip }}{% if CUSTOM_FLEET_HOSTNAME != None %},DNS:{{ CUSTOM_FLEET_HOSTNAME }}{% endif %} @@ -473,8 +490,9 @@ fbcertdir: - name: /opt/so/conf/filebeat/etc/pki - makedirs: True -/opt/so/conf/filebeat/etc/pki/filebeat.key: +conf_filebeat_key: x509.private_key_managed: + - name: /opt/so/conf/filebeat/etc/pki/filebeat.key - CN: {{ COMMONNAME }} - bits: 4096 - days_remaining: 0 @@ -483,7 +501,7 @@ fbcertdir: - new: True {% if salt['file.file_exists']('/opt/so/conf/filebeat/etc/pki/filebeat.key') -%} - prereq: - - x509: /opt/so/conf/filebeat/etc/pki/filebeat.crt + - x509: conf_filebeat_crt {%- endif %} - timeout: 30 - retry: @@ -491,8 +509,9 @@ fbcertdir: interval: 30 # Request a cert and drop it where it needs to go to be distributed -/opt/so/conf/filebeat/etc/pki/filebeat.crt: +conf_filebeat_crt: x509.certificate_managed: + - name: /opt/so/conf/filebeat/etc/pki/filebeat.crt - ca_server: {{ ca_server }} - signing_policy: filebeat - public_key: /opt/so/conf/filebeat/etc/pki/filebeat.key @@ -516,7 +535,7 @@ filebeatpkcs: cmd.run: - name: "/usr/bin/openssl pkcs8 -in /opt/so/conf/filebeat/etc/pki/filebeat.key -topk8 -out /opt/so/conf/filebeat/etc/pki/filebeat.p8 -passout pass:" - onchanges: - - x509: /opt/so/conf/filebeat/etc/pki/filebeat.key + - x509: conf_filebeat_key filebeatkeyperms: file.managed: @@ -537,8 +556,9 @@ chownfilebeatp8: {% if grains['role'] == 'so-fleet' %} -/etc/pki/managerssl.key: +managerssl_key: x509.private_key_managed: + - name: /etc/pki/managerssl.key - CN: {{ manager }} - bits: 4096 - days_remaining: 0 @@ -555,8 +575,9 @@ chownfilebeatp8: interval: 30 # Create a cert for the reverse proxy -/etc/pki/managerssl.crt: +managerssl_crt: x509.certificate_managed: + - name: /etc/pki/managerssl.crt - ca_server: {{ ca_server }} - signing_policy: managerssl - public_key: /etc/pki/managerssl.key @@ -582,8 +603,9 @@ msslkeyperms: - group: 939 # Create a private key and cert for Fleet -/etc/pki/fleet.key: +fleet_key: x509.private_key_managed: + - name: /etc/pki/fleet.key - CN: {{ manager }} - bits: 4096 - days_remaining: 0 @@ -599,8 +621,9 @@ msslkeyperms: attempts: 5 interval: 30 -/etc/pki/fleet.crt: +fleet_crt: x509.certificate_managed: + - name: /etc/pki/fleet.crt - signing_private_key: /etc/pki/fleet.key - CN: {{ HOSTNAME }} - subjectAltName: DNS:{{ HOSTNAME }}, IP:{{ MAINIP }} {% if CUSTOM_FLEET_HOSTNAME != None %},DNS:{{ CUSTOM_FLEET_HOSTNAME }} {% endif %} diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls index 73c4d2395..f83f54c5c 100644 --- a/salt/suricata/init.sls +++ b/salt/suricata/init.sls @@ -93,7 +93,7 @@ surilogscript: - month: '*' - dayweek: '*' -suriconfigsync: +suriconfig: file.managed: - name: /opt/so/conf/suricata/suricata.yaml - source: salt://suricata/files/suricata.yaml.jinja @@ -155,10 +155,14 @@ so-suricata: - /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro - network_mode: host - watch: - - file: /opt/so/conf/suricata/suricata.yaml + - file: suriconfig - file: surithresholding - file: /opt/so/conf/suricata/rules/ - file: /opt/so/conf/suricata/bpf + - require: + - file: suriconfig + - file: surithresholding + - file: suribpf {% else %} {# if Suricata isn't enabled, then stop and remove the container #} - force: True diff --git a/salt/telegraf/etc/telegraf.conf b/salt/telegraf/etc/telegraf.conf index 59d806fe0..fa7dabe09 100644 --- a/salt/telegraf/etc/telegraf.conf +++ b/salt/telegraf/etc/telegraf.conf @@ -16,10 +16,13 @@ {%- set MANAGER = salt['grains.get']('master') %} {%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %} {%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %} -{% set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') %} -{% set HELIX_API_KEY = salt['pillar.get']('fireeye:helix:api_key', '') %} -{% set UNIQUEID = salt['pillar.get']('sensor:uniqueid', '') %} +{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') %} +{%- set HELIX_API_KEY = salt['pillar.get']('fireeye:helix:api_key', '') %} +{%- set UNIQUEID = salt['pillar.get']('sensor:uniqueid', '') %} {%- set TRUE_CLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %} +{%- set ZEEK_ENABLED = salt['pillar.get']('zeek:enabled', True) %} +{%- set MDENGINE = salt['pillar.get']('global:mdengine', 'ZEEK') %} + # Global tags can be specified here in key="value" format. [global_tags] @@ -621,23 +624,25 @@ # # Read stats from one or more Elasticsearch servers or clusters -{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %} +{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %} [[inputs.elasticsearch]] servers = ["https://{{ MANAGER }}:9200"] -{% if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %} + cluster_stats = true +{%- if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %} username = "{{ ES_USER }}" password = "{{ ES_PASS }}" -{% endif %} +{%- endif %} insecure_skip_verify = true -{% elif grains['role'] in ['so-node', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %} +{%- elif grains['role'] in ['so-node', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %} [[inputs.elasticsearch]] servers = ["https://{{ NODEIP }}:9200"] -{% if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %} + cluster_stats = true +{%- if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %} username = "{{ ES_USER }}" password = "{{ ES_PASS }}" -{% endif %} +{%- endif %} insecure_skip_verify = true -{% endif %} +{%- endif %} # # ## Timeout for HTTP requests to the elastic search server(s) @@ -738,10 +743,10 @@ "/scripts/stenoloss.sh", "/scripts/suriloss.sh", "/scripts/checkfiles.sh", - {% if salt['pillar.get']('global:mdengine', 'ZEEK') == 'ZEEK' %} + {%- if MDENGINE == 'ZEEK' and ZEEK_ENABLED %} "/scripts/zeekloss.sh", "/scripts/zeekcaptureloss.sh", - {% endif %} + {%- endif %} "/scripts/oldpcap.sh", "/scripts/raid.sh", "/scripts/beatseps.sh" @@ -755,10 +760,10 @@ "/scripts/stenoloss.sh", "/scripts/suriloss.sh", "/scripts/checkfiles.sh", - {% if salt['pillar.get']('global:mdengine', 'ZEEK') == 'ZEEK' %} + {%- if MDENGINE == 'ZEEK' and ZEEK_ENABLED %} "/scripts/zeekloss.sh", "/scripts/zeekcaptureloss.sh", - {% endif %} + {%- endif %} "/scripts/oldpcap.sh", "/scripts/eps.sh", "/scripts/raid.sh", @@ -774,10 +779,10 @@ "/scripts/stenoloss.sh", "/scripts/suriloss.sh", "/scripts/checkfiles.sh", - {% if salt['pillar.get']('global:mdengine', 'ZEEK') == 'ZEEK' %} + {%- if MDENGINE == 'ZEEK' and ZEEK_ENABLED %} "/scripts/zeekloss.sh", "/scripts/zeekcaptureloss.sh", - {% endif %} + {%- endif %} "/scripts/oldpcap.sh", "/scripts/eps.sh", "/scripts/raid.sh", @@ -792,10 +797,10 @@ "/scripts/stenoloss.sh", "/scripts/suriloss.sh", "/scripts/checkfiles.sh", - {% if salt['pillar.get']('global:mdengine', 'ZEEK') == 'ZEEK' %} + {%- if MDENGINE == 'ZEEK' and ZEEK_ENABLED %} "/scripts/zeekloss.sh", "/scripts/zeekcaptureloss.sh", - {% endif %} + {%- endif %} "/scripts/oldpcap.sh", "/scripts/influxdbsize.sh", "/scripts/raid.sh", @@ -809,10 +814,10 @@ "/scripts/stenoloss.sh", "/scripts/suriloss.sh", "/scripts/checkfiles.sh", - {% if salt['pillar.get']('global:mdengine', 'ZEEK') == 'ZEEK' %} + {%- if MDENGINE == 'ZEEK' and ZEEK_ENABLED %} "/scripts/zeekloss.sh", "/scripts/zeekcaptureloss.sh", - {% endif %} + {%- endif %} "/scripts/oldpcap.sh", "/scripts/helixeps.sh" ] diff --git a/salt/telegraf/init.sls b/salt/telegraf/init.sls index 615cfc237..2c188367b 100644 --- a/salt/telegraf/init.sls +++ b/salt/telegraf/init.sls @@ -5,6 +5,9 @@ {% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} +include: + - ssl + # Add Telegraf to monitor all the things. tgraflogdir: file.directory: @@ -88,7 +91,16 @@ so-telegraf: - file: tgrafconf - file: tgrafsyncscripts - file: node_config - + - require: + - file: tgrafconf + - file: node_config + {% if grains['role'] == 'so-manager' or grains['role'] == 'so-eval' or grains['role'] == 'so-managersearch' %} + - x509: pki_public_ca_crt + {% else %} + - x509: trusttheca + {% endif %} + - x509: influxdb_crt + - x509: influxdb_key append_so-telegraf_so-status.conf: file.append: - name: /opt/so/conf/so-status/so-status.conf diff --git a/salt/thehive/init.sls b/salt/thehive/init.sls index d1ee8a4bf..d7050e226 100644 --- a/salt/thehive/init.sls +++ b/salt/thehive/init.sls @@ -73,6 +73,14 @@ thehiveesdata: - user: 939 - group: 939 +thehive_elasticsearch_yml: + file.exists: + - name: /opt/so/conf/thehive/etc/es/elasticsearch.yml + +log4j2_properties: + file.exists: + - name: /opt/so/conf/thehive/etc/es/log4j2.properties + so-thehive-es: docker_container.running: - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive-es:{{ VERSION }} @@ -91,12 +99,23 @@ so-thehive-es: - port_bindings: - 0.0.0.0:9400:9400 - 0.0.0.0:9500:9500 + - require: + - file: thehive_elasticsearch_yml + - file: log4j2_properties append_so-thehive-es_so-status.conf: file.append: - name: /opt/so/conf/so-status/so-status.conf - text: so-thehive-es +cortex_application_conf: + file.exists: + - name: /opt/so/conf/thehive/etc/cortex-application.conf + +application_conf: + file.exists: + - name: /opt/so/conf/thehive/etc/application.conf + # Install Cortex so-cortex: docker_container.running: @@ -110,6 +129,8 @@ so-cortex: - /opt/so/conf/cortex/custom-responders:/custom-responders:ro - port_bindings: - 0.0.0.0:9001:9001 + - require: + - file: cortex_application_conf append_so-cortex_so-status.conf: file.append: @@ -135,6 +156,8 @@ so-thehive: - /opt/so/conf/thehive/etc/application.conf:/opt/thehive/conf/application.conf:ro - port_bindings: - 0.0.0.0:9000:9000 + - require: + - file: application_conf append_so-thehive_so-status.conf: file.append: diff --git a/salt/thehive/scripts/cortex_init b/salt/thehive/scripts/cortex_init index c2f00390d..fd0387131 100644 --- a/salt/thehive/scripts/cortex_init +++ b/salt/thehive/scripts/cortex_init @@ -29,7 +29,7 @@ cortex_init(){ CORTEX_ORG_USER_KEY="{{CORTEXORGUSERKEY}}" SOCTOPUS_CONFIG="$default_salt_dir/salt/soctopus/files/SOCtopus.conf" - if wait_for_web_response $CORTEX_URL "Cortex"; then + if wait_for_web_response $CORTEX_URL "Cortex" 120; then # Migrate DB curl -sk -XPOST -L "$CORTEX_API_URL/maintenance/migrate" @@ -65,7 +65,7 @@ if [ -f /opt/so/state/cortex.txt ]; then cortex_clean exit 0 else - if wait_for_web_response http://{{MANAGERIP}}:9400/_cluster/health '"status":"green"'; then + if wait_for_web_response http://{{MANAGERIP}}:9400/_cluster/health '"status":"green"' 120; then cortex_init cortex_clean else diff --git a/salt/thehive/scripts/hive_init b/salt/thehive/scripts/hive_init index ca6f7aa2b..7ace6137b 100755 --- a/salt/thehive/scripts/hive_init +++ b/salt/thehive/scripts/hive_init @@ -20,7 +20,7 @@ thehive_init(){ SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf" echo -n "Waiting for TheHive..." - if wait_for_web_response $THEHIVE_URL "TheHive"; then + if wait_for_web_response $THEHIVE_URL "TheHive" 120; then # Migrate DB curl -sk -XPOST -L "$THEHIVE_API_URL/maintenance/migrate" @@ -43,7 +43,7 @@ if [ -f /opt/so/state/thehive.txt ]; then thehive_clean exit 0 else - if wait_for_web_response http://{{MANAGERIP}}:9400/_cluster/health '"status":"green"'; then + if wait_for_web_response http://{{MANAGERIP}}:9400/_cluster/health '"status":"green"' 120; then thehive_init thehive_clean else diff --git a/salt/zeek/init.sls b/salt/zeek/init.sls index f83293742..fa4cf4f0b 100644 --- a/salt/zeek/init.sls +++ b/salt/zeek/init.sls @@ -119,7 +119,7 @@ zeekctlcfg: ZEEKCTL: {{ ZEEK.zeekctl | tojson }} # Sync node.cfg -nodecfgsync: +nodecfg: file.managed: - name: /opt/so/conf/zeek/node.cfg - source: salt://zeek/files/node.cfg @@ -149,7 +149,7 @@ plcronscript: - mode: 755 zeekpacketlosscron: - cron.present: + cron.{{ZEEKOPTIONS.pl_cron_state}}: - name: /usr/local/bin/packetloss.sh - user: root - minute: '*/10' @@ -185,7 +185,7 @@ zeekbpf: {% endif %} -localzeeksync: +localzeek: file.managed: - name: /opt/so/conf/zeek/local.zeek - source: salt://zeek/files/local.zeek.jinja @@ -222,6 +222,11 @@ so-zeek: - file: /opt/so/conf/zeek/zeekctl.cfg - file: /opt/so/conf/zeek/policy - file: /opt/so/conf/zeek/bpf + - require: + - file: localzeek + - file: nodecfg + - file: zeekctlcfg + - file: zeekbpf {% else %} {# if Zeek isn't enabled, then stop and remove the container #} - force: True {% endif %} diff --git a/salt/zeek/map.jinja b/salt/zeek/map.jinja index b5713c6d5..a0f92463e 100644 --- a/salt/zeek/map.jinja +++ b/salt/zeek/map.jinja @@ -1,15 +1,17 @@ {% set ZEEKOPTIONS = {} %} -{% set ENABLED = salt['pillar.get']('zeek:enabled', 'True') %} +{% set ENABLED = salt['pillar.get']('zeek:enabled', True) %} # don't start the docker container if it is an import node or disabled via pillar -{% if grains.id.split('_')|last == 'import' or ENABLED is sameas false %} +{% if grains.id.split('_')|last == 'import' or not ENABLED %} {% do ZEEKOPTIONS.update({'start': False}) %} + {% do ZEEKOPTIONS.update({'pl_cron_state': 'absent'}) %} {% else %} {% do ZEEKOPTIONS.update({'start': True}) %} + {% do ZEEKOPTIONS.update({'pl_cron_state': 'present'}) %} {% endif %} -{% if ENABLED is sameas false %} +{% if not ENABLED %} {% do ZEEKOPTIONS.update({'status': 'absent'}) %} {% else %} {% do ZEEKOPTIONS.update({'status': 'running'}) %} -{% endif %} \ No newline at end of file +{% endif %}