mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-07 17:52:46 +01:00
Merge pull request #9840 from Security-Onion-Solutions/reposync
Rocky 9 support
This commit is contained in:
@@ -10,7 +10,7 @@ def check():
|
||||
if path.exists('/var/run/reboot-required'):
|
||||
retval = 'True'
|
||||
|
||||
elif os == 'CentOS':
|
||||
elif os == 'Rocky':
|
||||
cmd = 'needs-restarting -r > /dev/null 2>&1'
|
||||
|
||||
try:
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
{% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
|
||||
{% set ELASTALERT = salt['pillar.get']('elastalert:enabled', True) %}
|
||||
{% set ELASTICSEARCH = salt['pillar.get']('elasticsearch:enabled', True) %}
|
||||
{% set FILEBEAT = salt['pillar.get']('filebeat:enabled', True) %}
|
||||
{% set KIBANA = salt['pillar.get']('kibana:enabled', True) %}
|
||||
{% set LOGSTASH = salt['pillar.get']('logstash:enabled', True) %}
|
||||
{% set CURATOR = salt['pillar.get']('curator:enabled', True) %}
|
||||
@@ -188,7 +187,6 @@
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'filebeat',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
@@ -204,10 +202,6 @@
|
||||
],
|
||||
}, grain='role') %}
|
||||
|
||||
{% if FILEBEAT and grains.role in ['so-helixsensor', 'so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import', 'so-receiver'] %}
|
||||
{% do allowed_states.append('filebeat') %}
|
||||
{% endif %}
|
||||
|
||||
{% if (PLAYBOOK != 0) and grains.role in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||
{% do allowed_states.append('mysql') %}
|
||||
{% endif %}
|
||||
@@ -257,7 +251,7 @@
|
||||
{% do allowed_states.append('redis') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.os == 'CentOS' %}
|
||||
{% if grains.os == 'Rocky' %}
|
||||
{% if not ISAIRGAP %}
|
||||
{% do allowed_states.append('yum') %}
|
||||
{% endif %}
|
||||
|
||||
@@ -235,7 +235,7 @@ soversionfile:
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.so_model %}
|
||||
{% if GLOBALS.os == 'CentOS' %}
|
||||
{% if GLOBALS.os == 'Rocky' %}
|
||||
# Install Raid tools
|
||||
raidpkgs:
|
||||
pkg.installed:
|
||||
|
||||
@@ -42,48 +42,14 @@ commonpkgs:
|
||||
- mariadb-devel
|
||||
- python3-dnf-plugin-versionlock
|
||||
- nmap-ncat
|
||||
- createrepo
|
||||
- python3-lxml
|
||||
- python3-packaging
|
||||
- python3-watchdog
|
||||
- yum-utils
|
||||
- device-mapper-persistent-data
|
||||
- lvm2
|
||||
- openssl
|
||||
- git
|
||||
- vim-enhanced
|
||||
- python3-docker
|
||||
{% else %}
|
||||
commonpkgs:
|
||||
pkg.installed:
|
||||
- skip_suggestions: True
|
||||
- pkgs:
|
||||
- wget
|
||||
- ntpdate
|
||||
- bind-utils
|
||||
- jq
|
||||
- tcpdump
|
||||
- httpd-tools
|
||||
- net-tools
|
||||
- curl
|
||||
- sqlite
|
||||
- mariadb-devel
|
||||
- nmap-ncat
|
||||
- python3
|
||||
- python36-packaging
|
||||
- python36-lxml
|
||||
- python36-docker
|
||||
- python36-dateutil
|
||||
- python36-m2crypto
|
||||
- python36-mysql
|
||||
- python36-packaging
|
||||
- python36-lxml
|
||||
- securityonion-python36-watchdog
|
||||
- yum-utils
|
||||
- device-mapper-persistent-data
|
||||
- lvm2
|
||||
- openssl
|
||||
- git
|
||||
- vim-enhanced
|
||||
- yum-plugin-versionlock
|
||||
- python3-m2crypto
|
||||
- rsync
|
||||
- python3-rich
|
||||
- python3-watchdog
|
||||
{% endif %}
|
||||
|
||||
@@ -6,8 +6,8 @@
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
{# we only want the script to install the workstation if it is CentOS -#}
|
||||
{% if grains.os == 'CentOS' -%}
|
||||
{# we only want the script to install the workstation if it is Rocky -#}
|
||||
{% if grains.os == 'Rocky' -%}
|
||||
{# if this is a manager -#}
|
||||
{% if grains.master == grains.id.split('_')|first -%}
|
||||
|
||||
@@ -80,12 +80,12 @@ echo "Since this is not a manager, the pillar values to enable analyst workstati
|
||||
{#- endif if this is a manager #}
|
||||
{% endif -%}
|
||||
|
||||
{#- if not CentOS #}
|
||||
{#- if not Rocky #}
|
||||
{%- else %}
|
||||
|
||||
echo "The Analyst Workstation can only be installed on CentOS. Please view the documentation at $doc_workstation_url."
|
||||
echo "The Analyst Workstation can only be installed on Rocky. Please view the documentation at $doc_workstation_url."
|
||||
|
||||
{#- endif grains.os == CentOS #}
|
||||
{#- endif grains.os == Rocky #}
|
||||
{% endif -%}
|
||||
|
||||
exit 0
|
||||
|
||||
@@ -187,14 +187,14 @@ get_random_value() {
|
||||
}
|
||||
|
||||
gpg_rpm_import() {
|
||||
if [[ "$OS" == "centos" ]]; then
|
||||
if [[ "$OS" == "rocky" ]]; then
|
||||
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
|
||||
local RPMKEYSLOC="../salt/repo/client/files/centos/keys"
|
||||
local RPMKEYSLOC="../salt/repo/client/files/rocky/keys"
|
||||
else
|
||||
local RPMKEYSLOC="$UPDATE_DIR/salt/repo/client/files/centos/keys"
|
||||
local RPMKEYSLOC="$UPDATE_DIR/salt/repo/client/files/rocky/keys"
|
||||
fi
|
||||
|
||||
RPMKEYS=('RPM-GPG-KEY-EPEL-7' 'docker.pub' 'SALTSTACK-GPG-KEY.pub' 'securityonion.pub')
|
||||
RPMKEYS=('RPM-GPG-KEY-EPEL-9' 'SALTSTACK-GPG-KEY2.pub' 'docker.pub' 'securityonion.pub')
|
||||
|
||||
for RPMKEY in "${RPMKEYS[@]}"; do
|
||||
rpm --import $RPMKEYSLOC/$RPMKEY
|
||||
@@ -366,17 +366,23 @@ run_check_net_err() {
|
||||
fi
|
||||
}
|
||||
|
||||
salt_minion_count() {
|
||||
local MINIONDIR="/opt/so/saltstack/local/pillar/minions"
|
||||
MINIONCOUNT=$(ls -la $MINIONDIR/*.sls | grep sls | wc -l)
|
||||
|
||||
}
|
||||
|
||||
set_cron_service_name() {
|
||||
if [[ "$OS" == "centos" ]]; then
|
||||
cron_service_name="crond"
|
||||
else
|
||||
cron_service_name="cron"
|
||||
fi
|
||||
if [[ "$OS" == "rocky" ]]; then
|
||||
cron_service_name="crond"
|
||||
else
|
||||
cron_service_name="cron"
|
||||
fi
|
||||
}
|
||||
|
||||
set_os() {
|
||||
if [ -f /etc/redhat-release ]; then
|
||||
OS=centos
|
||||
OS=rocky
|
||||
else
|
||||
OS=ubuntu
|
||||
fi
|
||||
|
||||
@@ -95,8 +95,6 @@ function soUserSync() {
|
||||
$(dirname $0)/so-user sync
|
||||
printf "\nApplying logstash state to the appropriate nodes.\n\n"
|
||||
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-searchnode or G@role:so-heavynode' state.apply logstash queue=True
|
||||
printf "\nApplying filebeat state to the appropriate nodes.\n\n"
|
||||
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-searchnode or G@role:so-heavynode or G@role:so-sensor or G@role:so-fleet' state.apply filebeat queue=True
|
||||
printf "\nApplying kibana state to the appropriate nodes.\n\n"
|
||||
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch' state.apply kibana queue=True
|
||||
printf "\nApplying curator state to the appropriate nodes.\n\n"
|
||||
|
||||
@@ -54,17 +54,10 @@ if [ $SKIP -ne 1 ]; then
|
||||
if [ "$INPUT" != "AGREE" ] ; then exit 0; fi
|
||||
fi
|
||||
|
||||
# Check to see if Logstash/Filebeat are running
|
||||
# Check to see if Logstash are running
|
||||
LS_ENABLED=$(so-status | grep logstash)
|
||||
FB_ENABLED=$(so-status | grep filebeat)
|
||||
EA_ENABLED=$(so-status | grep elastalert)
|
||||
|
||||
if [ ! -z "$FB_ENABLED" ]; then
|
||||
|
||||
/usr/sbin/so-filebeat-stop
|
||||
|
||||
fi
|
||||
|
||||
if [ ! -z "$LS_ENABLED" ]; then
|
||||
|
||||
/usr/sbin/so-logstash-stop
|
||||
@@ -86,13 +79,7 @@ do
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config-XDELETE -k -L https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
|
||||
done
|
||||
|
||||
#Start Logstash/Filebeat
|
||||
if [ ! -z "$FB_ENABLED" ]; then
|
||||
|
||||
/usr/sbin/so-filebeat-start
|
||||
|
||||
fi
|
||||
|
||||
#Start Logstash
|
||||
if [ ! -z "$LS_ENABLED" ]; then
|
||||
|
||||
/usr/sbin/so-logstash-start
|
||||
|
||||
@@ -22,10 +22,6 @@
|
||||
/usr/sbin/so-restart logstash $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode', 'so-sensor']%}
|
||||
/usr/sbin/so-restart filebeat $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
|
||||
/usr/sbin/so-restart curator $1
|
||||
{%- endif %}
|
||||
|
||||
@@ -22,10 +22,6 @@
|
||||
/usr/sbin/so-start logstash $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode', 'so-sensor']%}
|
||||
/usr/sbin/so-start filebeat $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
|
||||
/usr/sbin/so-start curator $1
|
||||
{%- endif %}
|
||||
|
||||
@@ -22,10 +22,6 @@
|
||||
/usr/sbin/so-stop logstash $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode', 'so-sensor']%}
|
||||
/usr/sbin/so-stop filebeat $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
|
||||
/usr/sbin/so-stop curator $1
|
||||
{%- endif %}
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
{%- set mainint = salt['pillar.get']('host:mainint') %}
|
||||
{%- set MYIP = salt['grains.get']('ip_interfaces:' ~ mainint)[0] %}
|
||||
|
||||
default_conf_dir=/opt/so/conf
|
||||
ELASTICSEARCH_HOST="{{ MYIP }}"
|
||||
ELASTICSEARCH_PORT=9200
|
||||
#ELASTICSEARCH_AUTH=""
|
||||
|
||||
# Define a default directory to load pipelines from
|
||||
FB_MODULE_YML="/usr/share/filebeat/module-setup.yml"
|
||||
|
||||
|
||||
# Wait for ElasticSearch to initialize
|
||||
echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
break
|
||||
else
|
||||
((COUNT+=1))
|
||||
sleep 1
|
||||
echo -n "."
|
||||
fi
|
||||
done
|
||||
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
||||
echo
|
||||
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
|
||||
echo
|
||||
fi
|
||||
echo "Testing to see if the pipelines are already applied"
|
||||
ESVER=$(curl -K /opt/so/conf/elasticsearch/curl.config -sk https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" |jq .version.number |tr -d \")
|
||||
PIPELINES=$(curl -K /opt/so/conf/elasticsearch/curl.config -sk https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"/_ingest/pipeline/filebeat-$ESVER-elasticsearch-server-pipeline | jq . | wc -c)
|
||||
|
||||
if [[ "$PIPELINES" -lt 5 ]] || [ "$2" != "--force" ]; then
|
||||
echo "Setting up ingest pipeline(s)"
|
||||
{% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %}
|
||||
{%- for module in MODULESMERGED.modules.keys() %}
|
||||
{%- for fileset in MODULESMERGED.modules[module] %}
|
||||
echo "{{ module }}.{{ fileset}}"
|
||||
docker exec -i so-filebeat filebeat setup --pipelines --modules {{ module }} -M "{{ module }}.{{ fileset }}.enabled=true" -c $FB_MODULE_YML
|
||||
sleep 0.5
|
||||
{% endfor %}
|
||||
{%- endfor %}
|
||||
else
|
||||
exit 0
|
||||
fi
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-start filebeat $1
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-stop filebeat $1
|
||||
12
salt/common/tools/sbin/so-filebeat-restart → salt/common/tools/sbin/so-repo-sync
Executable file → Normal file
12
salt/common/tools/sbin/so-filebeat-restart → salt/common/tools/sbin/so-repo-sync
Executable file → Normal file
@@ -1,12 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-restart filebeat $1
|
||||
set_version
|
||||
set_os
|
||||
salt_minion_count
|
||||
|
||||
curl --retry 5 --retry-delay 60 -A 'checkin/$VERSION/$OS/$(uname -r)/$MINIONCOUNT' https://sigs.securityonion.net/checkup --output /tmp/checkup
|
||||
dnf reposync --norepopath -n -g --delete -m -c /root/repodownload.conf --repoid=securityonionsync --download-metadata -p /nsm/repo/
|
||||
createrepo /nsm/repo
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
|
||||
|
||||
# Usage: so-restart filebeat | kibana | playbook
|
||||
# Usage: so-restart kibana | playbook
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
@@ -27,5 +27,5 @@ if [ $# -ge 1 ]; then
|
||||
*) docker stop so-$1 ; docker rm so-$1 ; salt-call state.apply $1 queue=True;;
|
||||
esac
|
||||
else
|
||||
echo -e "\nPlease provide an argument by running like so-restart $component, or by using the component-specific script.\nEx. so-restart filebeat, or so-filebeat-restart\n"
|
||||
echo -e "\nPlease provide an argument by running like so-restart $component, or by using the component-specific script.\nEx. so-restart logstash, or so-logstash-restart\n"
|
||||
fi
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
|
||||
|
||||
# Usage: so-start all | filebeat | kibana | playbook
|
||||
# Usage: so-start all | kibana | playbook
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
@@ -27,5 +27,5 @@ if [ $# -ge 1 ]; then
|
||||
*) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
|
||||
esac
|
||||
else
|
||||
echo -e "\nPlease provide an argument by running like so-start $component, or by using the component-specific script.\nEx. so-start filebeat, or so-filebeat-start\n"
|
||||
echo -e "\nPlease provide an argument by running like so-start $component, or by using the component-specific script.\nEx. so-start logstash, or so-logstash-start\n"
|
||||
fi
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
|
||||
|
||||
# Usage: so-stop filebeat | kibana | playbook | thehive
|
||||
# Usage: so-stop kibana | playbook | thehive
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
@@ -20,6 +20,6 @@ if [ $# -ge 1 ]; then
|
||||
*) docker stop so-$1 ; docker rm so-$1 ;;
|
||||
esac
|
||||
else
|
||||
echo -e "\nPlease provide an argument by running like so-stop $component, or by using the component-specific script.\nEx. so-stop filebeat, or so-filebeat-stop\n"
|
||||
echo -e "\nPlease provide an argument by running like so-stop $component, or by using the component-specific script.\nEx. so-stop logstash, or so-logstash-stop\n"
|
||||
fi
|
||||
|
||||
|
||||
@@ -1026,8 +1026,6 @@ upgrade_salt() {
|
||||
else
|
||||
echo "Salt upgrade success."
|
||||
echo ""
|
||||
echo "Removing /opt/so/state files for patched Salt InfluxDB module and state. This is due to Salt being upgraded and needing to patch the files again."
|
||||
rm -vrf /opt/so/state/influxdb_continuous_query.py.patched /opt/so/state/influxdb_retention_policy.py.patched /opt/so/state/influxdbmod.py.patched
|
||||
fi
|
||||
|
||||
}
|
||||
@@ -1138,9 +1136,7 @@ fix_wazuh() {
|
||||
# Update the repo files so it can actually upgrade
|
||||
upgrade_salt
|
||||
fi
|
||||
rm -f /opt/so/state/influxdb_continuous_query.py.patched /opt/so/state/influxdbmod.py.patched /opt/so/state/influxdb_retention_policy.py.patched
|
||||
systemctl_func "start" "salt-master"
|
||||
salt-call state.apply salt.python3-influxdb -l info
|
||||
systemctl_func "start" "salt-minion"
|
||||
systemctl_func "start" "$cron_service_name"
|
||||
|
||||
@@ -1309,11 +1305,6 @@ main() {
|
||||
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
|
||||
set -e
|
||||
|
||||
echo ""
|
||||
echo "Ensuring python modules for Salt are installed and patched."
|
||||
salt-call state.apply salt.python3-influxdb -l info queue=True
|
||||
echo ""
|
||||
|
||||
# update the salt-minion configs here and start the minion
|
||||
# since highstate are disabled above, minion start should not trigger a highstate
|
||||
echo ""
|
||||
|
||||
@@ -13,8 +13,6 @@ read lastPID < $lf
|
||||
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
|
||||
echo $$ > $lf
|
||||
|
||||
{% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %}
|
||||
|
||||
/usr/sbin/so-curator-closed-delete > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-zeek-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-beats-close.yml > /dev/null 2>&1;
|
||||
@@ -27,6 +25,3 @@ docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/cur
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-syslog-close.yml > /dev/null 2>&1;
|
||||
{% for INDEX in MODULESMERGED.modules.keys() -%}
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-close.yml > /dev/null 2>&1{% if not loop.last %};{% endif %}
|
||||
{% endfor -%}
|
||||
|
||||
@@ -13,8 +13,6 @@ read lastPID < $lf
|
||||
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
|
||||
echo $$ > $lf
|
||||
|
||||
{% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %}
|
||||
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-zeek-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-beats-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-close.yml > /dev/null 2>&1;
|
||||
@@ -25,6 +23,3 @@ docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/cur
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-close.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-syslog-close.yml > /dev/null 2>&1;
|
||||
{% for INDEX in MODULESMERGED.modules.keys() -%}
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-close.yml > /dev/null 2>&1{% if not loop.last %};{% endif %}
|
||||
{% endfor -%}
|
||||
|
||||
@@ -13,8 +13,6 @@ read lastPID < $lf
|
||||
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
|
||||
echo $$ > $lf
|
||||
|
||||
{% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %}
|
||||
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-zeek-delete.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-beats-delete.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-delete.yml > /dev/null 2>&1;
|
||||
@@ -25,6 +23,3 @@ docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/cur
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-delete.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-delete.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-syslog-delete.yml > /dev/null 2>&1;
|
||||
{% for INDEX in MODULESMERGED.modules.keys() -%}
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-delete.yml > /dev/null 2>&1{% if not loop.last %};{% endif %}
|
||||
{% endfor -%}
|
||||
|
||||
@@ -14,8 +14,6 @@ read lastPID < $lf
|
||||
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
|
||||
echo $$ > $lf
|
||||
|
||||
{% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %}
|
||||
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-zeek-warm.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-beats-warm.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-warm.yml > /dev/null 2>&1;
|
||||
@@ -26,6 +24,3 @@ docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/cur
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-warm.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-warm.yml > /dev/null 2>&1;
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-syslog-warm.yml > /dev/null 2>&1;
|
||||
{% for INDEX in MODULESMERGED.modules.keys() -%}
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-warm.yml > /dev/null 2>&1{% if not loop.last %};{% endif %}
|
||||
{% endfor -%}
|
||||
|
||||
@@ -17,12 +17,6 @@ docker:
|
||||
port_bindings:
|
||||
- 0.0.0.0:9200:9200/tcp
|
||||
- 0.0.0.0:9300:9300/tcp
|
||||
'so-filebeat':
|
||||
final_octet: 23
|
||||
port_bindings:
|
||||
- 0.0.0.0:514:514/udp
|
||||
- 0.0.0.0:514:514/tcp
|
||||
- 0.0.0.0:5066:5066/tcp
|
||||
'so-idstools':
|
||||
final_octet: 25
|
||||
'so-influxdb':
|
||||
|
||||
@@ -26,10 +26,10 @@ dockerheldpackages:
|
||||
dockerheldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- containerd.io: 1.4.4-3.1.el7
|
||||
- docker-ce: 3:20.10.5-3.el7
|
||||
- docker-ce-cli: 1:20.10.5-3.el7
|
||||
- docker-ce-rootless-extras: 20.10.5-3.el7
|
||||
- containerd.io: 1.6.18-3.1.el9
|
||||
- docker-ce: 23.0.1-1.el9
|
||||
- docker-ce-cli: 23.0.1-1.el9
|
||||
- docker-ce-rootless-extras: 23.0.1-1.el9
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
{% endif %}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
{% set elastalert_pillar = salt['pillar.get']('elastalert:config', {}) %}
|
||||
|
||||
|
||||
{% do ELASTALERT.elastalert.config.update({'es_host': GLOBALS.manager_ip}) %}
|
||||
{% do ELASTALERT.elastalert.config.update({'es_host': GLOBALS.manager}) %}
|
||||
{% do ELASTALERT.elastalert.config.update({'es_username': pillar.elasticsearch.auth.users.so_elastic_user.user}) %}
|
||||
{% do ELASTALERT.elastalert.config.update({'es_password': pillar.elasticsearch.auth.users.so_elastic_user.pass}) %}
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ so-elastalert:
|
||||
- /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro
|
||||
- /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro
|
||||
- extra_hosts:
|
||||
- {{GLOBALS.url_base}}:{{GLOBALS.manager_ip}}
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
- require:
|
||||
- cmd: wait_for_elasticsearch
|
||||
- file: elastarules
|
||||
|
||||
@@ -52,7 +52,11 @@ so-elastic-fleet:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-elastic-fleet'].ip }}
|
||||
- extra_hosts:
|
||||
{% if GLOBALS.is_manager %}
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
{% else %}
|
||||
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-elastic-fleet'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
@@ -63,7 +67,7 @@ so-elastic-fleet:
|
||||
- environment:
|
||||
- FLEET_SERVER_ENABLE=true
|
||||
- FLEET_URL=https://{{ FLEETURL }}:8220
|
||||
- FLEET_SERVER_ELASTICSEARCH_HOST=https://{{ GLOBALS.manager_ip }}:9200
|
||||
- FLEET_SERVER_ELASTICSEARCH_HOST=https://{{ GLOBALS.manager }}:9200
|
||||
- FLEET_SERVER_SERVICE_TOKEN={{ SERVICETOKEN }}
|
||||
- FLEET_SERVER_POLICY_ID={{ FLEETSERVERPOLICY }}
|
||||
- FLEET_SERVER_ELASTICSEARCH_CA=/etc/pki/intca.crt
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
filebeat:
|
||||
config:
|
||||
zeek_logs_enabled:
|
||||
- conn
|
||||
- dce_rpc
|
||||
- dhcp
|
||||
- dnp3
|
||||
- dns
|
||||
- dpd
|
||||
- files
|
||||
- ftp
|
||||
- http
|
||||
- intel
|
||||
- irc
|
||||
- kerberos
|
||||
- modbus
|
||||
- mysql
|
||||
- notice
|
||||
- ntlm
|
||||
- pe
|
||||
- radius
|
||||
- rfb
|
||||
- rdp
|
||||
- sip
|
||||
- smb_files
|
||||
- smb_mapping
|
||||
- smtp
|
||||
- snmp
|
||||
- socks
|
||||
- software
|
||||
- ssh
|
||||
- ssl
|
||||
- tunnel
|
||||
- weird
|
||||
- x509
|
||||
- bacnet
|
||||
- bacnet_discovery
|
||||
- bacnet_property
|
||||
- bsap_ip_header
|
||||
- bsap_ip_rdb
|
||||
- bsap_ip_unknown
|
||||
- bsap_serial_header
|
||||
- bsap_serial_rdb
|
||||
- bsap_serial_rdb_ext
|
||||
- bsap_serial_unknown
|
||||
- cip
|
||||
- cip_identity
|
||||
- cip_io
|
||||
- cotp
|
||||
- dnp3_control
|
||||
- dnp3_objects
|
||||
- ecat_aoe_info
|
||||
- ecat_coe_info
|
||||
- ecat_dev_info
|
||||
- ecat_foe_info
|
||||
- ecat_log_address
|
||||
- ecat_registers
|
||||
- ecat_soe_info
|
||||
- enip
|
||||
- modbus_detailed
|
||||
- modbus_mask_write_register
|
||||
- modbus_read_write_multiple_registers
|
||||
- opcua_binary
|
||||
- opcua_binary_activate_session
|
||||
- opcua_binary_activate_session_client_software_cert
|
||||
- opcua_binary_activate_session_diagnostic_info
|
||||
- opcua_binary_activate_session_locale_id
|
||||
- opcua_binary_browse
|
||||
- opcua_binary_browse_description
|
||||
- opcua_binary_browse_diagnostic_info
|
||||
- opcua_binary_browse_request_continuation_point
|
||||
- opcua_binary_browse_response_references
|
||||
- opcua_binary_browse_result
|
||||
- opcua_binary_create_session
|
||||
- opcua_binary_create_session_discovery
|
||||
- opcua_binary_create_session_endpoints
|
||||
- opcua_binary_create_session_user_token
|
||||
- opcua_binary_create_subscription
|
||||
- opcua_binary_diag_info_detail
|
||||
- opcua_binary_get_endpoints
|
||||
- opcua_binary_get_endpoints_description
|
||||
- opcua_binary_get_endpoints_discovery
|
||||
- opcua_binary_get_endpoints_locale_id
|
||||
- opcua_binary_get_endpoints_profile_uri
|
||||
- opcua_binary_get_endpoints_user_token
|
||||
- opcua_binary_opensecure_channel
|
||||
- opcua_binary_read
|
||||
- opcua_binary_read_array_dims
|
||||
- opcua_binary_read_array_dims_link
|
||||
- opcua_binary_read_diagnostic_info
|
||||
- opcua_binary_read_extension_object
|
||||
- opcua_binary_read_extension_object_link
|
||||
- opcua_binary_read_nodes_to_read
|
||||
- opcua_binary_read_results
|
||||
- opcua_binary_read_results_link
|
||||
- opcua_binary_read_status_code
|
||||
- opcua_binary_read_variant_data
|
||||
- opcua_binary_read_variant_data_link
|
||||
- opcua_binary_status_code_detail
|
||||
- profinet
|
||||
- profinet_dce_rpc
|
||||
- profinet_debug
|
||||
- s7comm
|
||||
- s7comm_plus
|
||||
- s7comm_read_szl
|
||||
- s7comm_upload_download
|
||||
- stun
|
||||
- stun_nat
|
||||
- tds
|
||||
- tds_rpc
|
||||
- tds_sql_batch
|
||||
- wireguard
|
||||
@@ -1,674 +0,0 @@
|
||||
{%- if grains.role == 'so-heavynode' %}
|
||||
{%- set MANAGER = salt['grains.get']('host' '') %}
|
||||
{%- else %}
|
||||
{%- set MANAGER = salt['grains.get']('master') %}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
|
||||
{%- set HOSTNAME = salt['grains.get']('host', '') %}
|
||||
{%- set ZEEKVER = salt['pillar.get']('global:mdengine', '') %}
|
||||
{%- set STRELKAENABLED = salt['pillar.get']('strelka:enabled', '0') %}
|
||||
{%- set RITAENABLED = salt['pillar.get']('rita:enabled', False) -%}
|
||||
{%- set FBMEMEVENTS = salt['pillar.get']('filebeat:mem_events', 2048) -%}
|
||||
{%- set FBMEMFLUSHMINEVENTS = salt['pillar.get']('filebeat:mem_flush_min_events', 2048) -%}
|
||||
{%- set FBLSWORKERS = salt['pillar.get']('filebeat:ls_workers', 1) -%}
|
||||
{%- set FBLSBULKMAXSIZE = salt['pillar.get']('filebeat:ls_bulk_max_size', 2048) -%}
|
||||
{%- set FBLOGGINGLEVEL = salt['pillar.get']('filebeat:logging:level', 'warning') -%}
|
||||
|
||||
name: {{ HOSTNAME }}
|
||||
|
||||
#================================ Logging ======================================
|
||||
# There are four options for the log output: file, stderr, syslog, eventlog
|
||||
# The file output is the default.
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: error, warning, info, debug
|
||||
logging.level: {{ FBLOGGINGLEVEL }}
|
||||
|
||||
# Enable debug output for selected components. To enable all selectors use ["*"]
|
||||
# Other available selectors are "beat", "publish", "service"
|
||||
# Multiple selectors can be chained.
|
||||
#logging.selectors: [ ]
|
||||
|
||||
# Send all logging output to syslog. The default is false.
|
||||
#logging.to_syslog: false
|
||||
|
||||
# Send all logging output to Windows Event Logs. The default is false.
|
||||
#logging.to_eventlog: false
|
||||
|
||||
# If enabled, filebeat periodically logs its internal metrics that have changed
|
||||
# in the last period. For each metric that changed, the delta from the value at
|
||||
# the beginning of the period is logged. Also, the total values for
|
||||
# all non-zero internal metrics are logged on shutdown. The default is true.
|
||||
#logging.metrics.enabled: true
|
||||
|
||||
# The period after which to log the internal metrics. The default is 30s.
|
||||
#logging.metrics.period: 30s
|
||||
|
||||
# Logging to rotating files. Set logging.to_files to false to disable logging to
|
||||
# files.
|
||||
logging.to_files: true
|
||||
logging.files:
|
||||
# Configure the path where the logs are written. The default is the logs directory
|
||||
# under the home path (the binary location).
|
||||
path: /usr/share/filebeat/logs
|
||||
|
||||
# The name of the files where the logs are written to.
|
||||
name: filebeat.log
|
||||
|
||||
# Configure log file size limit. If limit is reached, log file will be
|
||||
# automatically rotated
|
||||
rotateeverybytes: 10485760 # = 10MB
|
||||
|
||||
# Rotate on startup
|
||||
rotateonstartup: false
|
||||
|
||||
# Number of rotated log files to keep. Oldest files will be deleted first.
|
||||
keepfiles: 7
|
||||
|
||||
# The permissions mask to apply when rotating log files. The default value is 0600.
|
||||
# Must be a valid Unix-style file permissions mask expressed in octal notation.
|
||||
#permissions: 0600
|
||||
|
||||
# Set to true to log messages in json format.
|
||||
#logging.json: false
|
||||
|
||||
|
||||
|
||||
#========================== Modules configuration ============================
|
||||
filebeat.config.modules:
|
||||
enabled: true
|
||||
path: ${path.config}/modules.d/*.yml
|
||||
|
||||
filebeat.modules:
|
||||
#=========================== Filebeat prospectors =============================
|
||||
|
||||
# List of prospectors to fetch data.
|
||||
filebeat.inputs:
|
||||
#------------------------------ Log prospector --------------------------------
|
||||
- type: udp
|
||||
enabled: true
|
||||
host: "0.0.0.0:514"
|
||||
fields:
|
||||
module: syslog
|
||||
dataset: syslog
|
||||
pipeline: "syslog"
|
||||
index: "so-syslog"
|
||||
processors:
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
fields_under_root: true
|
||||
|
||||
- type: tcp
|
||||
enabled: true
|
||||
host: "0.0.0.0:514"
|
||||
fields:
|
||||
module: syslog
|
||||
dataset: syslog
|
||||
pipeline: "syslog"
|
||||
index: "so-syslog"
|
||||
processors:
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
fields_under_root: true
|
||||
|
||||
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-manager', 'so-managersearch', 'so-import'] %}
|
||||
- type: filestream
|
||||
id: logscan
|
||||
paths:
|
||||
- /logs/logscan/alerts.log
|
||||
fields:
|
||||
module: logscan
|
||||
dataset: alert
|
||||
processors:
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
fields_under_root: true
|
||||
clean_removed: true
|
||||
close_removed: false
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-sensor', 'so-helix', 'so-heavynode', 'so-import'] %}
|
||||
{%- if ZEEKVER != 'SURICATA' %}
|
||||
{% import_yaml 'filebeat/defaults.yaml' as FBD with context %}
|
||||
|
||||
{% set FBCONFIG = salt['pillar.get']('filebeat:zeek_logs_enabled', default=FBD.filebeat, merge=True) %}
|
||||
|
||||
{%- for LOGNAME in FBCONFIG.zeek_logs_enabled %}
|
||||
- type: filestream
|
||||
id: zeek-{{ LOGNAME }}
|
||||
paths:
|
||||
- /nsm/zeek/logs/current/{{ LOGNAME }}.log
|
||||
fields:
|
||||
module: zeek
|
||||
dataset: {{ LOGNAME }}
|
||||
category: network
|
||||
processors:
|
||||
{%- if LOGNAME is match('^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*') %}
|
||||
- add_tags:
|
||||
tags: ["ics"]
|
||||
{%- endif %}
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
|
||||
fields_under_root: true
|
||||
clean_removed: true
|
||||
close_removed: false
|
||||
|
||||
- type: filestream
|
||||
id: import-zeek={{ LOGNAME }}
|
||||
paths:
|
||||
- /nsm/import/*/zeek/logs/{{ LOGNAME }}.log
|
||||
fields:
|
||||
module: zeek
|
||||
dataset: {{ LOGNAME }}
|
||||
category: network
|
||||
imported: true
|
||||
processors:
|
||||
{%- if LOGNAME is match('^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*') %}
|
||||
- add_tags:
|
||||
tags: ["ics"]
|
||||
{%- endif %}
|
||||
- add_tags:
|
||||
tags: ["import"]
|
||||
- dissect:
|
||||
tokenizer: "/nsm/import/%{import.id}/zeek/logs/%{import.file}"
|
||||
field: "log.file.path"
|
||||
target_prefix: ""
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
|
||||
fields_under_root: true
|
||||
clean_removed: false
|
||||
close_removed: false
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
||||
- type: filestream
|
||||
id: suricata-eve
|
||||
paths:
|
||||
- /nsm/suricata/eve*.json
|
||||
fields:
|
||||
module: suricata
|
||||
dataset: common
|
||||
category: network
|
||||
|
||||
processors:
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
|
||||
fields_under_root: true
|
||||
clean_removed: false
|
||||
close_removed: false
|
||||
|
||||
- type: filestream
|
||||
id: import-suricata
|
||||
paths:
|
||||
- /nsm/import/*/suricata/eve*.json
|
||||
fields:
|
||||
module: suricata
|
||||
dataset: common
|
||||
category: network
|
||||
imported: true
|
||||
processors:
|
||||
- add_tags:
|
||||
tags: ["import"]
|
||||
- dissect:
|
||||
tokenizer: "/nsm/import/%{import.id}/suricata/%{import.file}"
|
||||
field: "log.file.path"
|
||||
target_prefix: ""
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
|
||||
fields_under_root: true
|
||||
clean_removed: false
|
||||
close_removed: false
|
||||
{%- if STRELKAENABLED == 1 %}
|
||||
- type: filestream
|
||||
id: strelka
|
||||
paths:
|
||||
- /nsm/strelka/log/strelka.log
|
||||
fields:
|
||||
module: strelka
|
||||
category: file
|
||||
dataset: file
|
||||
|
||||
processors:
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
|
||||
fields_under_root: true
|
||||
clean_removed: false
|
||||
close_removed: false
|
||||
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
|
||||
{%- if RITAENABLED %}
|
||||
- type: filestream
|
||||
id: rita-beacon
|
||||
paths:
|
||||
- /nsm/rita/beacons.csv
|
||||
exclude_lines: ['^Score', '^Source', '^Domain', '^No results']
|
||||
fields:
|
||||
module: rita
|
||||
dataset: beacon
|
||||
category: network
|
||||
processors:
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
fields_under_root: true
|
||||
pipeline: "rita.beacon"
|
||||
index: "so-rita"
|
||||
|
||||
- type: filestream
|
||||
id: rita-connection
|
||||
paths:
|
||||
- /nsm/rita/long-connections.csv
|
||||
- /nsm/rita/open-connections.csv
|
||||
exclude_lines: ['^Source', '^No results']
|
||||
fields:
|
||||
module: rita
|
||||
dataset: connection
|
||||
category: network
|
||||
processors:
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
fields_under_root: true
|
||||
pipeline: "rita.connection"
|
||||
index: "so-rita"
|
||||
|
||||
- type: filestream
|
||||
id: rita-dns
|
||||
paths:
|
||||
- /nsm/rita/exploded-dns.csv
|
||||
exclude_lines: ['^Domain', '^No results']
|
||||
fields:
|
||||
module: rita
|
||||
dataset: dns
|
||||
category: network
|
||||
processors:
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
fields_under_root: true
|
||||
pipeline: "rita.dns"
|
||||
index: "so-rita"
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-manager', 'so-managersearch', 'so-import'] %}
|
||||
- type: filestream
|
||||
id: kratos
|
||||
paths:
|
||||
- /logs/kratos/kratos.log
|
||||
fields:
|
||||
module: kratos
|
||||
category: host
|
||||
processors:
|
||||
- decode_json_fields:
|
||||
fields: ["message"]
|
||||
target: ""
|
||||
add_error_key: true
|
||||
- rename:
|
||||
fields:
|
||||
- from: "audience"
|
||||
to: "event.dataset"
|
||||
ignore_missing: true
|
||||
- add_fields:
|
||||
when:
|
||||
not:
|
||||
has_fields: ['event.dataset']
|
||||
target: ''
|
||||
fields:
|
||||
event.dataset: access
|
||||
pipeline: "kratos"
|
||||
fields_under_root: true
|
||||
clean_removed: false
|
||||
close_removed: false
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains.role == 'so-idh' %}
|
||||
- type: filestream
|
||||
id: idh
|
||||
paths:
|
||||
- /nsm/idh/opencanary.log
|
||||
fields:
|
||||
module: opencanary
|
||||
dataset: idh
|
||||
category: host
|
||||
tags: beat-ext
|
||||
processors:
|
||||
- decode_json_fields:
|
||||
fields: ["message"]
|
||||
target: ""
|
||||
add_error_key: true
|
||||
- drop_fields:
|
||||
when:
|
||||
equals:
|
||||
logtype: 1001
|
||||
fields: ["src_host", "src_port", "dst_host", "dst_port" ]
|
||||
ignore_missing: true
|
||||
- rename:
|
||||
fields:
|
||||
- from: "src_host"
|
||||
to: "source.ip"
|
||||
- from: "src_port"
|
||||
to: "source.port"
|
||||
- from: "dst_host"
|
||||
to: "destination.host"
|
||||
- from: "dst_port"
|
||||
to: "destination.port"
|
||||
ignore_missing: true
|
||||
- convert:
|
||||
fields:
|
||||
- {from: "logtype", to: "event.code", type: "string"}
|
||||
ignore_missing: true
|
||||
- drop_fields:
|
||||
fields: '["prospector", "input", "offset", "beat"]'
|
||||
fields_under_root: true
|
||||
clean_removed: false
|
||||
close_removed: false
|
||||
{%- endif %}
|
||||
|
||||
{%- if INPUTS %}
|
||||
# USER PILLAR DEFINED INPUTS
|
||||
{{ INPUTS | yaml(False) }}
|
||||
{%- endif %}
|
||||
|
||||
{% if OUTPUT -%}
|
||||
# USER PILLAR DEFINED OUTPUT
|
||||
{%- set types = OUTPUT.keys() | list %}
|
||||
{%- set type = types[0] %}
|
||||
output.{{ type }}:
|
||||
{%- for i in OUTPUT[type].items() %}
|
||||
{{ i[0] }}: {{ i[1]}}
|
||||
{%- endfor %}
|
||||
{%- else %}
|
||||
#----------------------------- Elasticsearch/Logstash output ---------------------------------
|
||||
{%- if grains['role'] in ["so-eval", "so-import"] %}
|
||||
output.elasticsearch:
|
||||
enabled: true
|
||||
hosts: ["https://{{ MANAGER }}:9200"]
|
||||
username: "{{ ES_USER }}"
|
||||
password: "{{ ES_PASS }}"
|
||||
ssl.certificate_authorities: ["/usr/share/filebeat/intraca.crt"]
|
||||
pipelines:
|
||||
- pipeline: "%{[module]}.%{[dataset]}"
|
||||
indices:
|
||||
- index: "so-import"
|
||||
when.contains:
|
||||
tags: "import"
|
||||
- index: "so-zeek"
|
||||
when.contains:
|
||||
module: "zeek"
|
||||
- index: "so-ids"
|
||||
when.contains:
|
||||
module: "suricata"
|
||||
- index: "so-ossec"
|
||||
when.contains:
|
||||
module: "ossec"
|
||||
- index: "so-osquery"
|
||||
when.contains:
|
||||
module: "osquery"
|
||||
- index: "so-strelka"
|
||||
when.contains:
|
||||
module: "strelka"
|
||||
- index: "so-logscan"
|
||||
when.contains:
|
||||
module: "logscan"
|
||||
- index: "so-elasticsearch-%{+YYYY.MM.dd}"
|
||||
when.contains:
|
||||
event.module: "elasticsearch"
|
||||
- index: "so-kibana-%{+YYYY.MM.dd}"
|
||||
when.contains:
|
||||
event.module: "kibana"
|
||||
|
||||
setup.template.enabled: false
|
||||
{%- else %}
|
||||
|
||||
output.logstash:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
enabled: true
|
||||
|
||||
# The Logstash hosts
|
||||
hosts:
|
||||
{# dont let filebeat send to a node designated as dmz #}
|
||||
{% import_yaml 'logstash/dmz_nodes.yaml' as dmz_nodes -%}
|
||||
{% if dmz_nodes.logstash.dmz_nodes -%}
|
||||
{% set dmz_nodes = dmz_nodes.logstash.dmz_nodes -%}
|
||||
{% else -%}
|
||||
{% set dmz_nodes = [] -%}
|
||||
{% endif -%}
|
||||
{%- if grains.role in ['so-sensor', 'so-fleet', 'so-searchnode', 'so-idh'] %}
|
||||
{%- set LOGSTASH = namespace() %}
|
||||
{%- set LOGSTASH.count = 0 %}
|
||||
{%- set LOGSTASH.loadbalance = false %}
|
||||
{%- set node_data = salt['pillar.get']('logstash:nodes') %}
|
||||
{%- for node_type, node_details in node_data.items() | sort -%}
|
||||
{%- if node_type in ['manager', 'managersearch', 'standalone', 'receiver' ] %}
|
||||
{%- for hostname in node_data[node_type].keys() %}
|
||||
{%- if hostname not in dmz_nodes %}
|
||||
{%- set LOGSTASH.count = LOGSTASH.count + 1 %}
|
||||
- "{{ hostname }}:5644" #{{ node_details[hostname].ip }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if LOGSTASH.count > 1 %}
|
||||
{%- set LOGSTASH.loadbalance = true %}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
|
||||
loadbalance: {{ LOGSTASH.loadbalance | lower }}
|
||||
{%- else %}
|
||||
- "{{ grains.host }}:5644"
|
||||
{%- endif %}
|
||||
|
||||
# Number of workers per Logstash host.
|
||||
worker: {{ FBLSWORKERS }}
|
||||
|
||||
# Number of records to send to Logstash input at a time
|
||||
bulk_max_size: {{ FBLSBULKMAXSIZE }}
|
||||
|
||||
# Set gzip compression level.
|
||||
#compression_level: 3
|
||||
|
||||
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# Optional SSL configuration options. SSL is off by default.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
ssl.certificate_authorities: ["/usr/share/filebeat/intraca.crt"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
ssl.certificate: "/usr/share/filebeat/filebeat.crt"
|
||||
|
||||
# Client Certificate Key
|
||||
ssl.key: "/usr/share/filebeat/filebeat.key"
|
||||
|
||||
setup.template.enabled: false
|
||||
# A dictionary of settings to place into the settings.index dictionary
|
||||
# of the Elasticsearch template. For more details, please check
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html
|
||||
#index:
|
||||
#number_of_shards: 1
|
||||
#codec: best_compression
|
||||
#number_of_routing_shards: 30
|
||||
|
||||
# A dictionary of settings for the _source field. For more details, please check
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html
|
||||
#_source:
|
||||
#enabled: false
|
||||
{%- endif %}
|
||||
{% endif %}
|
||||
#============================== Kibana =====================================
|
||||
|
||||
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
|
||||
# This requires a Kibana endpoint configuration.
|
||||
#setup.kibana:
|
||||
|
||||
# Kibana Host
|
||||
# Scheme and port can be left out and will be set to the default (http and 5601)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
|
||||
#host: "localhost:5601"
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
#protocol: "https"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
# Optional HTTP Path
|
||||
#path: ""
|
||||
|
||||
# Use SSL settings for HTTPS. Default is true.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# SSL configuration. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#============================== Xpack Monitoring =====================================
|
||||
# filebeat can export internal metrics to a central Elasticsearch monitoring cluster.
|
||||
# This requires xpack monitoring to be enabled in Elasticsearch.
|
||||
# The reporting is disabled by default.
|
||||
|
||||
# Set to true to enable the monitoring reporter.
|
||||
#xpack.monitoring.enabled: false
|
||||
|
||||
# Uncomment to send the metrics to Elasticsearch. Most settings from the
|
||||
# Elasticsearch output are accepted here as well. Any setting that is not set is
|
||||
# automatically inherited from the Elasticsearch output configuration, so if you
|
||||
# have the Elasticsearch output configured, you can simply uncomment the
|
||||
# following line, and leave the rest commented out.
|
||||
#xpack.monitoring.elasticsearch:
|
||||
|
||||
# Array of hosts to connect to.
|
||||
# Scheme and port can be left out and will be set to the default (http and 9200)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
|
||||
#hosts: ["localhost:9200"]
|
||||
|
||||
# Set gzip compression level.
|
||||
#compression_level: 0
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
#protocol: "https"
|
||||
#username: "beats_system"
|
||||
#password: "changeme"
|
||||
|
||||
# Dictionary of HTTP parameters to pass within the url with index operations.
|
||||
#parameters:
|
||||
#param1: value1
|
||||
#param2: value2
|
||||
|
||||
# Custom HTTP headers to add to each request
|
||||
#headers:
|
||||
# X-My-Header: Contents of the header
|
||||
|
||||
# Proxy server url
|
||||
#proxy_url: http://proxy:3128
|
||||
|
||||
# The number of times a particular Elasticsearch index operation is attempted. If
|
||||
# the indexing operation doesn't succeed after this many retries, the events are
|
||||
# dropped. The default is 3.
|
||||
#max_retries: 3
|
||||
|
||||
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
|
||||
# The default is 50.
|
||||
#bulk_max_size: 50
|
||||
|
||||
# Configure http request timeout before failing an request to Elasticsearch.
|
||||
#timeout: 90
|
||||
|
||||
# Use SSL settings for HTTPS.
|
||||
#ssl.enabled: true
|
||||
|
||||
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# SSL configuration. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
# Optional passphrase for decrypting the Certificate Key.
|
||||
#ssl.key_passphrase: ''
|
||||
|
||||
# Configure cipher suites to be used for SSL connections
|
||||
#ssl.cipher_suites: []
|
||||
|
||||
# Configure curve types for ECDHE based cipher suites
|
||||
#ssl.curve_types: []
|
||||
|
||||
# Configure what types of renegotiation are supported. Valid options are
|
||||
# never, once, and freely. Default is never.
|
||||
#ssl.renegotiation: never
|
||||
|
||||
#================================ HTTP Endpoint ======================================
|
||||
# Each beat can expose internal metrics through a HTTP endpoint. For security
|
||||
# reasons the endpoint is disabled by default. This feature is currently experimental.
|
||||
# Stats can be access through http://localhost:5066/stats . For pretty JSON output
|
||||
# append ?pretty to the URL.
|
||||
|
||||
# Defines if the HTTP endpoint is enabled.
|
||||
http.enabled: true
|
||||
|
||||
# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost.
|
||||
http.host: 0.0.0.0
|
||||
|
||||
# Port on which the HTTP endpoint will bind. Default is 5066.
|
||||
http.port: 5066
|
||||
|
||||
queue.mem.events: {{ FBMEMEVENTS }}
|
||||
queue.mem.flush.min_events: {{ FBMEMFLUSHMINEVENTS }}
|
||||
@@ -1,10 +0,0 @@
|
||||
{%- set ES = salt['grains.get']('master') -%}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
|
||||
output.elasticsearch:
|
||||
enabled: true
|
||||
hosts: ["https://{{ ES }}:9200"]
|
||||
username: "{{ ES_USER }}"
|
||||
password: "{{ ES_PASS }}"
|
||||
ssl.certificate_authorities: ["/usr/share/filebeat/intraca.crt"]
|
||||
@@ -1,2 +0,0 @@
|
||||
# DO NOT EDIT THIS FILE
|
||||
{{ MODULES|yaml(False) }}
|
||||
@@ -1,162 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls in allowed_states %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %}
|
||||
{% from 'filebeat/modules.map.jinja' import MODULESENABLED with context %}
|
||||
{% from 'filebeat/map.jinja' import FILEBEAT_EXTRA_HOSTS with context %}
|
||||
{% set ES_INCLUDED_NODES = ['so-eval', 'so-standalone', 'so-managersearch', 'so-searchnode', 'so-heavynode', 'so-import'] %}
|
||||
|
||||
include:
|
||||
- ssl
|
||||
#only include elastic state for certain nodes
|
||||
{% if grains.role in ES_INCLUDED_NODES %}
|
||||
- elasticsearch
|
||||
{% endif %}
|
||||
|
||||
filebeatetcdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/filebeat/etc
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
filebeatmoduledir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/filebeat/modules
|
||||
- user: root
|
||||
- group: root
|
||||
- makedirs: True
|
||||
|
||||
filebeatlogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/filebeat
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
filebeatpkidir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/filebeat/etc/pki
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
fileregistrydir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/filebeat/registry
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
# This needs to be owned by root
|
||||
filebeatconf:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/filebeat/etc/filebeat.yml
|
||||
- source: salt://filebeat/etc/filebeat.yml
|
||||
- user: root
|
||||
- group: root
|
||||
- template: jinja
|
||||
- defaults:
|
||||
INPUTS: {{ salt['pillar.get']('filebeat:config:inputs', {}) }}
|
||||
OUTPUT: {{ salt['pillar.get']('filebeat:config:output', {}) }}
|
||||
- show_changes: False
|
||||
|
||||
# Filebeat module config file
|
||||
filebeatmoduleconf:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/filebeat/etc/module-setup.yml
|
||||
- source: salt://filebeat/etc/module-setup.yml
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 640
|
||||
- template: jinja
|
||||
- show_changes: False
|
||||
|
||||
merged_module_conf:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/filebeat/modules/modules.yml
|
||||
- source: salt://filebeat/etc/module_config.yml.jinja
|
||||
- template: jinja
|
||||
- defaults:
|
||||
MODULES: {{ MODULESENABLED }}
|
||||
|
||||
so_module_conf_remove:
|
||||
file.absent:
|
||||
- name: /opt/so/conf/filebeat/modules/securityonion.yml
|
||||
|
||||
thirdyparty_module_conf_remove:
|
||||
file.absent:
|
||||
- name: /opt/so/conf/filebeat/modules/thirdparty.yml
|
||||
|
||||
so-filebeat:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-filebeat:{{ GLOBALS.so_version }}
|
||||
- hostname: so-filebeat
|
||||
- user: root
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-filebeat'].ip }}
|
||||
- extra_hosts: {{ FILEBEAT_EXTRA_HOSTS }}
|
||||
- binds:
|
||||
- /nsm:/nsm:ro
|
||||
- /opt/so/log/filebeat:/usr/share/filebeat/logs:rw
|
||||
- /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
|
||||
- /opt/so/conf/filebeat/etc/module-setup.yml:/usr/share/filebeat/module-setup.yml:ro
|
||||
- /nsm/wazuh/logs/alerts:/wazuh/alerts:ro
|
||||
- /nsm/wazuh/logs/archives:/wazuh/archives:ro
|
||||
- /opt/so/conf/filebeat/modules:/usr/share/filebeat/modules.d
|
||||
- /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro
|
||||
- /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro
|
||||
- /opt/so/conf/filebeat/registry:/usr/share/filebeat/data/registry:rw
|
||||
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro
|
||||
- /opt/so/log:/logs:ro
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-filebeat'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
{% for module in MODULESMERGED.modules.keys() %}
|
||||
{% for submodule in MODULESMERGED.modules[module] %}
|
||||
{% if MODULESMERGED.modules[module][submodule].enabled and MODULESMERGED.modules[module][submodule]["var.syslog_port"] is defined %}
|
||||
- {{ MODULESMERGED.modules[module][submodule].get("var.syslog_host", "0.0.0.0") }}:{{ MODULESMERGED.modules[module][submodule]["var.syslog_port"] }}:{{ MODULESMERGED.modules[module][submodule]["var.syslog_port"] }}/tcp
|
||||
- {{ MODULESMERGED.modules[module][submodule].get("var.syslog_host", "0.0.0.0") }}:{{ MODULESMERGED.modules[module][submodule]["var.syslog_port"] }}:{{ MODULESMERGED.modules[module][submodule]["var.syslog_port"] }}/udp
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
- watch:
|
||||
- file: filebeatconf
|
||||
- require:
|
||||
- file: filebeatconf
|
||||
- file: filebeatmoduleconf
|
||||
- file: filebeatmoduledir
|
||||
- x509: conf_filebeat_crt
|
||||
- x509: conf_filebeat_key
|
||||
- x509: trusttheca
|
||||
|
||||
{% if grains.role in ES_INCLUDED_NODES %}
|
||||
run_module_setup:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-filebeat-module-setup
|
||||
- require:
|
||||
- file: filebeatmoduleconf
|
||||
- docker_container: so-filebeat
|
||||
- onchanges:
|
||||
- docker_container: so-elasticsearch
|
||||
{% endif %}
|
||||
|
||||
append_so-filebeat_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-filebeat
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,15 +0,0 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set role = GLOBALS.role %}
|
||||
{% set FILEBEAT_EXTRA_HOSTS = [] %}
|
||||
{% if role in ['so-sensor', 'so-fleet', 'so-searchnode', 'so-idh'] %}
|
||||
{% set node_data = salt['pillar.get']('logstash:nodes') %}
|
||||
{% for node_type, node_details in node_data.items() | sort %}
|
||||
{% if node_type in ['manager', 'managersearch', 'standalone', 'receiver' ] %}
|
||||
{% for hostname in node_data[node_type].keys() %}
|
||||
{% do FILEBEAT_EXTRA_HOSTS.append({hostname:node_details[hostname].ip}) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% do FILEBEAT_EXTRA_HOSTS.append({GLOBALS.hostname:GLOBALS.node_ip}) %}
|
||||
@@ -1,18 +0,0 @@
|
||||
{% import_yaml 'filebeat/thirdpartydefaults.yaml' as TPDEFAULTS %}
|
||||
{% import_yaml 'filebeat/securityoniondefaults.yaml' as SODEFAULTS %}
|
||||
{% set THIRDPARTY = salt['pillar.get']('filebeat:third_party_filebeat', default=TPDEFAULTS.third_party_filebeat, merge=True) %}
|
||||
{% set SO = salt['pillar.get']('filebeat:securityonion_filebeat', default=SODEFAULTS.securityonion_filebeat, merge=True) %}
|
||||
{% set MODULESMERGED = salt['defaults.merge'](SO, THIRDPARTY, in_place=False) %}
|
||||
|
||||
{% set MODULESENABLED = [] %}
|
||||
{% for module in MODULESMERGED.modules.keys() %}
|
||||
{% set ENABLEDFILESETS = {} %}
|
||||
{% for fileset in MODULESMERGED.modules[module] %}
|
||||
{% if MODULESMERGED.modules[module][fileset].get('enabled', False) %}
|
||||
{% do ENABLEDFILESETS.update({'module': module, fileset: MODULESMERGED.modules[module][fileset]}) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% if ENABLEDFILESETS|length > 0 %}
|
||||
{% do MODULESENABLED.append(ENABLEDFILESETS) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
@@ -1,22 +0,0 @@
|
||||
filebeat:
|
||||
config:
|
||||
inputs:
|
||||
- type: filestream
|
||||
paths:
|
||||
- /nsm/mylogdir/mylog.log
|
||||
fields:
|
||||
module: mymodule
|
||||
dataset: mydataset
|
||||
category: mycategory
|
||||
|
||||
processors:
|
||||
- drop_fields:
|
||||
fields: '["source", "prospector", "input", "offset", "beat"]'
|
||||
|
||||
fields_under_root: true
|
||||
clean_removed: false
|
||||
close_removed: false
|
||||
output:
|
||||
file:
|
||||
path: "/tmp/filebeat"
|
||||
filename: filebeat
|
||||
@@ -1,30 +0,0 @@
|
||||
{% set ZEEKLOGLOOKUP = {
|
||||
'conn': 'connection',
|
||||
} %}
|
||||
securityonion_filebeat:
|
||||
modules:
|
||||
{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone','so-searchnode', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %}
|
||||
elasticsearch:
|
||||
server:
|
||||
enabled: true
|
||||
var.paths: ["/logs/elasticsearch/*.log"]
|
||||
logstash:
|
||||
log:
|
||||
enabled: true
|
||||
var.paths: ["/logs/logstash.log"]
|
||||
{%- endif %}
|
||||
{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
|
||||
kibana:
|
||||
log:
|
||||
enabled: true
|
||||
var.paths: ["/logs/kibana/kibana.log"]
|
||||
{%- endif %}
|
||||
{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-heavynode'] %}
|
||||
redis:
|
||||
log:
|
||||
enabled: true
|
||||
var.paths: ["/logs/redis.log"]
|
||||
slowlog:
|
||||
enabled: false
|
||||
{%- endif %}
|
||||
|
||||
@@ -1,261 +0,0 @@
|
||||
third_party_filebeat:
|
||||
modules:
|
||||
aws:
|
||||
cloudtrail:
|
||||
enabled: false
|
||||
cloudwatch:
|
||||
enabled: false
|
||||
ec2:
|
||||
enabled: false
|
||||
elb:
|
||||
enabled: false
|
||||
s3access:
|
||||
enabled: false
|
||||
vpcflow:
|
||||
enabled: false
|
||||
azure:
|
||||
activitylogs:
|
||||
enabled: false
|
||||
platformlogs:
|
||||
enabled: false
|
||||
auditlogs:
|
||||
enabled: false
|
||||
signinlogs:
|
||||
enabled: false
|
||||
barracuda:
|
||||
waf:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9503
|
||||
spamfirewall:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9524
|
||||
bluecoat:
|
||||
director:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9505
|
||||
cef:
|
||||
log:
|
||||
enabled: false
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9003
|
||||
checkpoint:
|
||||
firewall:
|
||||
enabled: false
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9505
|
||||
cisco:
|
||||
asa:
|
||||
enabled: false
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9001
|
||||
ftd:
|
||||
enabled: false
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9003
|
||||
ios:
|
||||
enabled: false
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9002
|
||||
nexus:
|
||||
enabled: false
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9506
|
||||
meraki:
|
||||
enabled: false
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9525
|
||||
umbrella:
|
||||
enabled: false
|
||||
amp:
|
||||
enabled: false
|
||||
cylance:
|
||||
protect:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9508
|
||||
f5:
|
||||
bigipapm:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9504
|
||||
bigipafm:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9528
|
||||
fortinet:
|
||||
firewall:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9004
|
||||
clientendpoint:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9510
|
||||
fortimail:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_port: 9350
|
||||
gcp:
|
||||
vpcflow:
|
||||
enabled: false
|
||||
firewall:
|
||||
enabled: false
|
||||
audit:
|
||||
enabled: false
|
||||
google_workspace:
|
||||
saml:
|
||||
enabled: false
|
||||
user_accounts:
|
||||
enabled: false
|
||||
login:
|
||||
enabled: false
|
||||
admin:
|
||||
enabled: false
|
||||
drive:
|
||||
enabled: false
|
||||
groups:
|
||||
enabled: false
|
||||
imperva:
|
||||
securesphere:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9511
|
||||
infoblox:
|
||||
nios:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9512
|
||||
juniper:
|
||||
junos:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9513
|
||||
netscreen:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9523
|
||||
srx:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9006
|
||||
microsoft:
|
||||
defender_atp:
|
||||
enabled: false
|
||||
m365_defender:
|
||||
enabled: false
|
||||
dhcp:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9515
|
||||
misp:
|
||||
threat:
|
||||
enabled: false
|
||||
netflow:
|
||||
log:
|
||||
enabled: false
|
||||
var.netflow_host: 0.0.0.0
|
||||
var.netflow_port: 2055
|
||||
var.internal_networks:
|
||||
- private
|
||||
netscout:
|
||||
sightline:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9502
|
||||
o365:
|
||||
audit:
|
||||
enabled: false
|
||||
okta:
|
||||
system:
|
||||
enabled: false
|
||||
proofpoint:
|
||||
emailsecurity:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9531
|
||||
radware:
|
||||
defensepro:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9518
|
||||
snort:
|
||||
log:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9532
|
||||
snyk:
|
||||
audit:
|
||||
enabled: false
|
||||
vulnerabilities:
|
||||
enabled: false
|
||||
sonicwall:
|
||||
firewall:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9519
|
||||
sophos:
|
||||
xg:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9005
|
||||
utm:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9533
|
||||
squid:
|
||||
log:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9520
|
||||
tomcat:
|
||||
log:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9501
|
||||
threatintel:
|
||||
abuseurl:
|
||||
enabled: false
|
||||
abusemalware:
|
||||
enabled: false
|
||||
misp:
|
||||
enabled: false
|
||||
malwarebazaar:
|
||||
enabled: false
|
||||
otx:
|
||||
enabled: false
|
||||
anomali:
|
||||
enabled: false
|
||||
anomalithreatstream:
|
||||
enabled: false
|
||||
zscaler:
|
||||
zia:
|
||||
enabled: false
|
||||
var.input: udp
|
||||
var.syslog_host: 0.0.0.0
|
||||
var.syslog_port: 9521
|
||||
@@ -9,7 +9,7 @@ role:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
manager:
|
||||
eval:
|
||||
portgroups:
|
||||
- {{ portgroups.playbook }}
|
||||
- {{ portgroups.mysql }}
|
||||
@@ -255,6 +255,9 @@ role:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
localhost:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
standalone:
|
||||
portgroups:
|
||||
- {{ portgroups.playbook }}
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
'so-elasticsearch',
|
||||
'so-elastic-fleet',
|
||||
'so-elastic-fleet-package-registry',
|
||||
'so-grafana',
|
||||
'so-influxdb',
|
||||
'so-kibana',
|
||||
'so-kratos',
|
||||
@@ -32,7 +31,6 @@
|
||||
'so-elasticsearch',
|
||||
'so-elastic-fleet',
|
||||
'so-elastic-fleet-package-registry',
|
||||
'so-filebeat',
|
||||
'so-influxdb',
|
||||
'so-kibana',
|
||||
'so-kratos',
|
||||
@@ -54,7 +52,6 @@
|
||||
{% if GLOBALS.role == 'so-searchnode' %}
|
||||
{% set NODE_CONTAINERS = [
|
||||
'so-elasticsearch',
|
||||
'so-filebeat',
|
||||
'so-logstash',
|
||||
'so-nginx'
|
||||
] %}
|
||||
@@ -64,7 +61,6 @@
|
||||
{% set NODE_CONTAINERS = [
|
||||
'so-curator',
|
||||
'so-elasticsearch',
|
||||
'so-filebeat',
|
||||
'so-logstash',
|
||||
'so-nginx',
|
||||
'so-redis',
|
||||
@@ -83,7 +79,6 @@
|
||||
'so-elasticsearch',
|
||||
'so-elastic-fleet',
|
||||
'so-elastic-fleet-package-registry',
|
||||
'so-filebeat',
|
||||
'so-influxdb',
|
||||
'so-kibana',
|
||||
'so-kratos',
|
||||
@@ -94,7 +89,6 @@
|
||||
|
||||
{% if GLOBALS.role == 'so-receiver' %}
|
||||
{% set NODE_CONTAINERS = [
|
||||
'so-filebeat',
|
||||
'so-logstash',
|
||||
'so-redis',
|
||||
] %}
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls in allowed_states %}
|
||||
|
||||
disable_firewalld:
|
||||
service.dead:
|
||||
- name: firewalld
|
||||
- enable: False
|
||||
|
||||
create_sysconfig_iptables:
|
||||
file.touch:
|
||||
- name: /etc/sysconfig/iptables
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
|
||||
|
||||
{% do KIBANACONFIG.kibana.config.server.update({'publicBaseUrl': 'https://' ~ GLOBALS.url_base ~ '/kibana'}) %}
|
||||
{% do KIBANACONFIG.kibana.config.elasticsearch.update({'hosts': ['https://' ~ GLOBALS.manager_ip ~ ':9200']}) %}
|
||||
{% do KIBANACONFIG.kibana.config.elasticsearch.update({'hosts': ['https://' ~ GLOBALS.manager ~ ':9200']}) %}
|
||||
|
||||
{% do KIBANACONFIG.kibana.config.elasticsearch.update({'username': salt['pillar.get']('elasticsearch:auth:users:so_kibana_user:user'), 'password': salt['pillar.get']('elasticsearch:auth:users:so_kibana_user:pass')}) %}
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ kibana:
|
||||
name: kibana
|
||||
host: "0.0.0.0"
|
||||
basePath: /kibana
|
||||
publicBaseUrl: https://{{salt['pillar.get']('global:url_base')}}/kibana
|
||||
rewriteBasePath: false
|
||||
elasticsearch:
|
||||
ssl:
|
||||
|
||||
@@ -89,6 +89,8 @@ so-kibana:
|
||||
- ELASTICSEARCH_HOST={{ GLOBALS.manager }}
|
||||
- ELASTICSEARCH_PORT=9200
|
||||
- MANAGER={{ GLOBALS.manager }}
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
- binds:
|
||||
- /opt/so/conf/kibana/etc:/usr/share/kibana/config:rw
|
||||
- /opt/so/log/kibana:/var/log/kibana:rw
|
||||
|
||||
@@ -14,14 +14,14 @@ mysqlpkgs:
|
||||
pkg.installed:
|
||||
- skip_suggestions: False
|
||||
- pkgs:
|
||||
{% if grains['os'] != 'CentOS' %}
|
||||
{% if grains['os'] != 'Rocky' %}
|
||||
{% if grains['oscodename'] == 'bionic' %}
|
||||
- python3-mysqldb
|
||||
{% elif grains['oscodename'] == 'focal' %}
|
||||
- python3-mysqldb
|
||||
{% endif %}
|
||||
{% else %}
|
||||
- MySQL-python
|
||||
- python3-mysqlclient
|
||||
{% endif %}
|
||||
|
||||
mysqletcdir:
|
||||
@@ -92,7 +92,7 @@ so-mysql:
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
- environment:
|
||||
- MYSQL_ROOT_HOST={{ GLOBALS.manager_ip }}
|
||||
- MYSQL_ROOT_HOST={{ GLOBALS.so_docker_bip }}
|
||||
- MYSQL_ROOT_PASSWORD=/etc/mypass
|
||||
- binds:
|
||||
- /opt/so/conf/mysql/etc/my.cnf:/etc/my.cnf:ro
|
||||
@@ -105,7 +105,7 @@ so-mysql:
|
||||
- file: mysqlcnf
|
||||
- file: mysqlpass
|
||||
cmd.run:
|
||||
- name: until nc -z {{ GLOBALS.manager_ip }} 3306; do sleep 1; done
|
||||
- name: until nc -z {{ GLOBALS.so_docker_bip }} 3306; do sleep 1; done
|
||||
- timeout: 600
|
||||
- onchanges:
|
||||
- docker_container: so-mysql
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{%- from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{%- from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{%- set role = grains.id.split('_') | last %}
|
||||
|
||||
{%- set influxpass = salt['pillar.get']('secrets:influx_pass') %}
|
||||
{%- set influxauth = ('so:' + influxpass) | base64_encode %}
|
||||
|
||||
@@ -98,7 +98,7 @@ http {
|
||||
ssl_protocols TLSv1.2;
|
||||
|
||||
location ~* (^/login/.*|^/js/.*|^/css/.*|^/images/.*) {
|
||||
proxy_pass http://{{ GLOBALS.manager_ip }}:9822;
|
||||
proxy_pass http://{{ GLOBALS.manager }}:9822;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header x-user-id "";
|
||||
@@ -115,7 +115,7 @@ http {
|
||||
auth_request /auth/sessions/whoami;
|
||||
auth_request_set $userid $upstream_http_x_kratos_authenticated_identity_id;
|
||||
proxy_set_header x-user-id $userid;
|
||||
proxy_pass http://{{ GLOBALS.manager_ip }}:9822/;
|
||||
proxy_pass http://{{ GLOBALS.manager }}:9822/;
|
||||
proxy_read_timeout 300;
|
||||
proxy_connect_timeout 300;
|
||||
proxy_set_header Host $host;
|
||||
@@ -129,7 +129,7 @@ http {
|
||||
|
||||
location ~ ^/auth/.*?(whoami|login|logout|settings) {
|
||||
rewrite /auth/(.*) /$1 break;
|
||||
proxy_pass http://{{ GLOBALS.manager_ip }}:4433;
|
||||
proxy_pass http://{{ GLOBALS.manager }}:4433;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -186,7 +186,7 @@ http {
|
||||
location /influxdb/ {
|
||||
auth_request /auth/sessions/whoami;
|
||||
rewrite /influxdb/api/(.*) /api/$1 break;
|
||||
proxy_pass https://{{ GLOBALS.manager_ip }}:8086/;
|
||||
proxy_pass https://{{ GLOBALS.manager }}:8086/;
|
||||
proxy_read_timeout 300;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -200,7 +200,7 @@ http {
|
||||
location /kibana/ {
|
||||
auth_request /auth/sessions/whoami;
|
||||
rewrite /kibana/(.*) /$1 break;
|
||||
proxy_pass http://{{ GLOBALS.manager_ip }}:5601/;
|
||||
proxy_pass http://{{ GLOBALS.manager }}:5601/;
|
||||
proxy_read_timeout 300;
|
||||
proxy_connect_timeout 300;
|
||||
proxy_set_header Host $host;
|
||||
@@ -209,24 +209,10 @@ http {
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /nodered/ {
|
||||
auth_request /auth/sessions/whoami;
|
||||
proxy_pass http://{{ GLOBALS.manager_ip }}:1880/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
|
||||
location /playbook/ {
|
||||
auth_request /auth/sessions/whoami;
|
||||
proxy_pass http://{{ GLOBALS.manager_ip }}:3200/playbook/;
|
||||
proxy_pass http://{{ GLOBALS.manager }}:3000/playbook/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -239,7 +225,7 @@ http {
|
||||
|
||||
location /soctopus/ {
|
||||
auth_request /auth/sessions/whoami;
|
||||
proxy_pass http://{{ GLOBALS.manager_ip }}:7000/;
|
||||
proxy_pass http://{{ GLOBALS.manager }}:7000/;
|
||||
proxy_read_timeout 300;
|
||||
proxy_connect_timeout 300;
|
||||
proxy_set_header Host $host;
|
||||
@@ -261,7 +247,7 @@ http {
|
||||
if ($http_authorization = "") {
|
||||
return 403;
|
||||
}
|
||||
proxy_pass http://{{ GLOBALS.manager_ip }}:9822/;
|
||||
proxy_pass http://{{ GLOBALS.manager }}:9822/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header x-user-id "";
|
||||
|
||||
@@ -88,6 +88,8 @@ so-nginx:
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-nginx'].ip }}
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
- binds:
|
||||
- /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
- /opt/so/log/nginx/:/var/log/nginx:rw
|
||||
|
||||
@@ -7,7 +7,7 @@ include:
|
||||
|
||||
wait_for_playbook:
|
||||
cmd.run:
|
||||
- name: until nc -z {{ GLOBALS.manager_ip }} 3200; do sleep 1; done
|
||||
- name: until nc -z {{ GLOBALS.manager }} 3200; do sleep 1; done
|
||||
- timeout: 300
|
||||
|
||||
create_user:
|
||||
|
||||
@@ -19,7 +19,7 @@ create_playbookdbuser:
|
||||
- name: playbookdbuser
|
||||
- password: {{ PLAYBOOKPASS }}
|
||||
- host: "{{ DOCKER.sorange.split('/')[0] }}/255.255.255.0"
|
||||
- connection_host: {{ GLOBALS.manager_ip }}
|
||||
- connection_host: {{ GLOBALS.manager }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
@@ -28,7 +28,7 @@ query_playbookdbuser_grants:
|
||||
mysql_query.run:
|
||||
- database: playbook
|
||||
- query: "GRANT ALL ON playbook.* TO 'playbookdbuser'@'{{ DOCKER.sorange.split('/')[0] }}/255.255.255.0';"
|
||||
- connection_host: {{ GLOBALS.manager_ip }}
|
||||
- connection_host: {{ GLOBALS.manager }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
@@ -36,8 +36,8 @@ query_playbookdbuser_grants:
|
||||
query_updatwebhooks:
|
||||
mysql_query.run:
|
||||
- database: playbook
|
||||
- query: "update webhooks set url = 'http://{{ GLOBALS.manager_ip }}:7000/playbook/webhook' where project_id = 1"
|
||||
- connection_host: {{ GLOBALS.manager_ip }}
|
||||
- query: "update webhooks set url = 'http://{{ GLOBALS.manager_ip}}:7000/playbook/webhook' where project_id = 1"
|
||||
- connection_host: {{ GLOBALS.manager }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
@@ -49,10 +49,10 @@ query_updatepluginurls:
|
||||
update settings set value =
|
||||
"--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess
|
||||
project: '1'
|
||||
convert_url: http://{{ GLOBALS.manager_ip }}:7000/playbook/sigmac
|
||||
create_url: http://{{ GLOBALS.manager_ip }}:7000/playbook/play"
|
||||
convert_url: http://{{ GLOBALS.manager }}:7000/playbook/sigmac
|
||||
create_url: http://{{ GLOBALS.manager }}:7000/playbook/play"
|
||||
where id = 43
|
||||
- connection_host: {{ GLOBALS.manager_ip }}
|
||||
- connection_host: {{ GLOBALS.manager }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
@@ -86,7 +86,7 @@ so-playbook:
|
||||
- binds:
|
||||
- /opt/so/log/playbook:/playbook/log:rw
|
||||
- environment:
|
||||
- REDMINE_DB_MYSQL={{ GLOBALS.manager_ip }}
|
||||
- REDMINE_DB_MYSQL={{ DOCKER.containers['so-mysql'].ip }}
|
||||
- REDMINE_DB_DATABASE=playbook
|
||||
- REDMINE_DB_USERNAME=playbookdbuser
|
||||
- REDMINE_DB_PASSWORD={{ PLAYBOOKPASS }}
|
||||
|
||||
17
salt/podman/files/podman.service
Normal file
17
salt/podman/files/podman.service
Normal file
@@ -0,0 +1,17 @@
|
||||
[Unit]
|
||||
Description=Podman API Service
|
||||
Requires=podman.socket
|
||||
After=podman.socket
|
||||
Documentation=man:podman-api(1)
|
||||
StartLimitIntervalSec=0
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
Environment=REGISTRIES_CONFIG_PATH=/etc/containers/registries.conf
|
||||
ExecStart=/usr/bin/podman system service
|
||||
TimeoutStopSec=30
|
||||
KillMode=process
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
Also=podman.socket
|
||||
10
salt/podman/files/podman.socket
Normal file
10
salt/podman/files/podman.socket
Normal file
@@ -0,0 +1,10 @@
|
||||
[Unit]
|
||||
Description=Podman API Socket
|
||||
Documentation=man:podman-api(1)
|
||||
|
||||
[Socket]
|
||||
ListenStream=%t/podman/podman.sock
|
||||
SocketMode=0660
|
||||
|
||||
[Install]
|
||||
WantedBy=sockets.target
|
||||
48
salt/podman/files/sobridge.conflist
Normal file
48
salt/podman/files/sobridge.conflist
Normal file
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"args": {
|
||||
"podman_options": {
|
||||
"isolate": "true",
|
||||
"mtu": "1500"
|
||||
}
|
||||
},
|
||||
"cniVersion": "0.4.0",
|
||||
"name": "sobridge",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "bridge",
|
||||
"bridge": "sobridge",
|
||||
"isGateway": true,
|
||||
"ipMasq": false,
|
||||
"mtu": 1500,
|
||||
"hairpinMode": false,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"routes": [
|
||||
{
|
||||
"dst": "0.0.0.0/0"
|
||||
}
|
||||
],
|
||||
"ranges": [
|
||||
[
|
||||
{
|
||||
"subnet": "172.17.1.0/24",
|
||||
"gateway": "172.17.1.1"
|
||||
}
|
||||
]
|
||||
]
|
||||
},
|
||||
"capabilities": {
|
||||
"ips": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"capabilities": {
|
||||
"portMappings": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "tuning"
|
||||
}
|
||||
]
|
||||
}
|
||||
56
salt/podman/init.sls
Normal file
56
salt/podman/init.sls
Normal file
@@ -0,0 +1,56 @@
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
|
||||
Podman pkg:
|
||||
pkg.installed:
|
||||
- name: podman
|
||||
|
||||
cnipkg:
|
||||
pkg.installed:
|
||||
- name: containernetworking-plugins
|
||||
|
||||
{#
|
||||
Podman service:
|
||||
file.managed:
|
||||
- name: /usr/lib/systemd/system/podman.service
|
||||
- source: salt://podman/podman.service
|
||||
#}
|
||||
|
||||
sobridgeconf:
|
||||
file.managed:
|
||||
- name: /etc/cni/net.d/sobridge.conflist
|
||||
- source: salt://podman/files/sobridge.conflist
|
||||
|
||||
Podman_socket_service:
|
||||
service.running:
|
||||
- name: podman.socket
|
||||
- enable: true
|
||||
|
||||
Podman_service:
|
||||
service.running:
|
||||
- name: podman.service
|
||||
- enable: true
|
||||
|
||||
Docker socket:
|
||||
file.symlink:
|
||||
- name: /var/run/docker.sock
|
||||
- target: /var/run/podman/podman.sock
|
||||
|
||||
podman_docker_symlink:
|
||||
file.symlink:
|
||||
- name: /usr/bin/docker
|
||||
- target: /usr/bin/podman
|
||||
|
||||
{#
|
||||
sos_docker_net:
|
||||
docker_network.present:
|
||||
- name: sobridge
|
||||
- subnet: {{ DOCKER.sorange }}
|
||||
- gateway: {{ DOCKER.sobip }}
|
||||
- options:
|
||||
com.docker.network.bridge.name: 'sobridge'
|
||||
com.docker.network.driver.mtu: '1500'
|
||||
com.docker.network.bridge.enable_ip_masquerade: 'true'
|
||||
com.docker.network.bridge.enable_icc: 'true'
|
||||
com.docker.network.bridge.host_binding_ipv4: '0.0.0.0'
|
||||
- unless: 'docker network ls | grep sobridge'
|
||||
#}
|
||||
@@ -53,6 +53,8 @@ so-dockerregistry:
|
||||
- /etc/pki/registry.crt:/etc/pki/registry.crt:ro
|
||||
- /etc/pki/registry.key:/etc/pki/registry.key:ro
|
||||
- client_timeout: 180
|
||||
- environment:
|
||||
- HOME=/root
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
29
salt/repo/client/files/rocky/keys/RPM-GPG-KEY-EPEL-9
Normal file
29
salt/repo/client/files/rocky/keys/RPM-GPG-KEY-EPEL-9
Normal file
@@ -0,0 +1,29 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQINBGE3mOsBEACsU+XwJWDJVkItBaugXhXIIkb9oe+7aadELuVo0kBmc3HXt/Yp
|
||||
CJW9hHEiGZ6z2jwgPqyJjZhCvcAWvgzKcvqE+9i0NItV1rzfxrBe2BtUtZmVcuE6
|
||||
2b+SPfxQ2Hr8llaawRjt8BCFX/ZzM4/1Qk+EzlfTcEcpkMf6wdO7kD6ulBk/tbsW
|
||||
DHX2lNcxszTf+XP9HXHWJlA2xBfP+Dk4gl4DnO2Y1xR0OSywE/QtvEbN5cY94ieu
|
||||
n7CBy29AleMhmbnx9pw3NyxcFIAsEZHJoU4ZW9ulAJ/ogttSyAWeacW7eJGW31/Z
|
||||
39cS+I4KXJgeGRI20RmpqfH0tuT+X5Da59YpjYxkbhSK3HYBVnNPhoJFUc2j5iKy
|
||||
XLgkapu1xRnEJhw05kr4LCbud0NTvfecqSqa+59kuVc+zWmfTnGTYc0PXZ6Oa3rK
|
||||
44UOmE6eAT5zd/ToleDO0VesN+EO7CXfRsm7HWGpABF5wNK3vIEF2uRr2VJMvgqS
|
||||
9eNwhJyOzoca4xFSwCkc6dACGGkV+CqhufdFBhmcAsUotSxe3zmrBjqA0B/nxIvH
|
||||
DVgOAMnVCe+Lmv8T0mFgqZSJdIUdKjnOLu/GRFhjDKIak4jeMBMTYpVnU+HhMHLq
|
||||
uDiZkNEvEEGhBQmZuI8J55F/a6UURnxUwT3piyi3Pmr2IFD7ahBxPzOBCQARAQAB
|
||||
tCdGZWRvcmEgKGVwZWw5KSA8ZXBlbEBmZWRvcmFwcm9qZWN0Lm9yZz6JAk4EEwEI
|
||||
ADgWIQT/itE0RZcQbs6BO5GKOHK/MihGfAUCYTeY6wIbDwULCQgHAgYVCgkICwIE
|
||||
FgIDAQIeAQIXgAAKCRCKOHK/MihGfFX/EACBPWv20+ttYu1A5WvtHJPzwbj0U4yF
|
||||
3zTQpBglQ2UfkRpYdipTlT3Ih6j5h2VmgRPtINCc/ZE28adrWpBoeFIS2YAKOCLC
|
||||
nZYtHl2nCoLq1U7FSttUGsZ/t8uGCBgnugTfnIYcmlP1jKKA6RJAclK89evDQX5n
|
||||
R9ZD+Cq3CBMlttvSTCht0qQVlwycedH8iWyYgP/mF0W35BIn7NuuZwWhgR00n/VG
|
||||
4nbKPOzTWbsP45awcmivdrS74P6mL84WfkghipdmcoyVb1B8ZP4Y/Ke0RXOnLhNe
|
||||
CfrXXvuW+Pvg2RTfwRDtehGQPAgXbmLmz2ZkV69RGIr54HJv84NDbqZovRTMr7gL
|
||||
9k3ciCzXCiYQgM8yAyGHV0KEhFSQ1HV7gMnt9UmxbxBE2pGU7vu3CwjYga5DpwU7
|
||||
w5wu1TmM5KgZtZvuWOTDnqDLf0cKoIbW8FeeCOn24elcj32bnQDuF9DPey1mqcvT
|
||||
/yEo/Ushyz6CVYxN8DGgcy2M9JOsnmjDx02h6qgWGWDuKgb9jZrvRedpAQCeemEd
|
||||
fhEs6ihqVxRFl16HxC4EVijybhAL76SsM2nbtIqW1apBQJQpXWtQwwdvgTVpdEtE
|
||||
r4ArVJYX5LrswnWEQMOelugUG6S3ZjMfcyOa/O0364iY73vyVgaYK+2XtT2usMux
|
||||
VL469Kj5m13T6w==
|
||||
=Mjs/
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
29
salt/repo/client/files/rocky/keys/RPM-GPG-KEY-rockyofficial
Normal file
29
salt/repo/client/files/rocky/keys/RPM-GPG-KEY-rockyofficial
Normal file
@@ -0,0 +1,29 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQINBGAofzYBEAC6yS1azw6f3wmaVd//3aSy6O2c9+jeetulRQvg2LvhRRS1eNqp
|
||||
/x9tbBhfohu/tlDkGpYHV7diePgMml9SZDy1sKlI3tDhx6GZ3xwF0fd1vWBZpmNk
|
||||
D9gRkUmYBeLotmcXQZ8ZpWLicosFtDpJEYpLUhuIgTKwt4gxJrHvkWsGQiBkJxKD
|
||||
u3/RlL4IYA3Ot9iuCBflc91EyAw1Yj0gKcDzbOqjvlGtS3ASXgxPqSfU0uLC9USF
|
||||
uKDnP2tcnlKKGfj0u6VkqISliSuRAzjlKho9Meond+mMIFOTT6qp4xyu+9Dj3IjZ
|
||||
IC6rBXRU3xi8z0qYptoFZ6hx70NV5u+0XUzDMXdjQ5S859RYJKijiwmfMC7gZQAf
|
||||
OkdOcicNzen/TwD/slhiCDssHBNEe86Wwu5kmDoCri7GJlYOlWU42Xi0o1JkVltN
|
||||
D8ZId+EBDIms7ugSwGOVSxyZs43q2IAfFYCRtyKHFlgHBRe9/KTWPUrnsfKxGJgC
|
||||
Do3Yb63/IYTvfTJptVfhQtL1AhEAeF1I+buVoJRmBEyYKD9BdU4xQN39VrZKziO3
|
||||
hDIGng/eK6PaPhUdq6XqvmnsZ2h+KVbyoj4cTo2gKCB2XA7O2HLQsuGduHzYKNjf
|
||||
QR9j0djjwTrsvGvzfEzchP19723vYf7GdcLvqtPqzpxSX2FNARpCGXBw9wARAQAB
|
||||
tDNSZWxlYXNlIEVuZ2luZWVyaW5nIDxpbmZyYXN0cnVjdHVyZUByb2NreWxpbnV4
|
||||
Lm9yZz6JAk4EEwEIADgWIQRwUcRwqSn0VM6+N7cVr12sbXRaYAUCYCh/NgIbDwUL
|
||||
CQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRAVr12sbXRaYLFmEACSMvoO1FDdyAbu
|
||||
1m6xEzDhs7FgnZeQNzLZECv2j+ggFSJXezlNVOZ5I1I8umBan2ywfKQD8M+IjmrW
|
||||
k9/7h9i54t8RS/RN7KNo7ECGnKXqXDPzBBTs1Gwo1WzltAoaDKUfXqQ4oJ4aCP/q
|
||||
/XPVWEzgpJO1XEezvCq8VXisutyDiXEjjMIeBczxb1hbamQX+jLTIQ1MDJ4Zo1YP
|
||||
zlUqrHW434XC2b1/WbSaylq8Wk9cksca5J+g3FqTlgiWozyy0uxygIRjb6iTzKXk
|
||||
V7SYxeXp3hNTuoUgiFkjh5/0yKWCwx7aQqlHar9GjpxmBDAO0kzOlgtTw//EqTwR
|
||||
KnYZLig9FW0PhwvZJUigr0cvs/XXTTb77z/i/dfHkrjVTTYenNyXogPtTtSyxqca
|
||||
61fbPf0B/S3N43PW8URXBRS0sykpX4SxKu+PwKCqf+OJ7hMEVAapqzTt1q9T7zyB
|
||||
QwvCVx8s7WWvXbs2d6ZUrArklgjHoHQcdxJKdhuRmD34AuXWCLW+gH8rJWZpuNl3
|
||||
+WsPZX4PvjKDgMw6YMcV7zhWX6c0SevKtzt7WP3XoKDuPhK1PMGJQqQ7spegGB+5
|
||||
DZvsJS48Ip0S45Qfmj82ibXaCBJHTNZE8Zs+rdTjQ9DS5qvzRA1sRA1dBb/7OLYE
|
||||
JmeWf4VZyebm+gc50szsg6Ut2yT8hw==
|
||||
=AiP8
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
31
salt/repo/client/files/rocky/keys/SALTSTACK-GPG-KEY2.pub
Normal file
31
salt/repo/client/files/rocky/keys/SALTSTACK-GPG-KEY2.pub
Normal file
@@ -0,0 +1,31 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQENBGLXV/8BCADCuomk2pibSOuLQeKMIwV3Afy60080hykdc4tU4qQS+zBJZZC0
|
||||
VBl2TAOmMWyeY5DRF2ibRTx6Ap8qYefuEjWlo2WHWWZH4WhNkJWL3aWiu8Ga+fFo
|
||||
ebjoUFLGgpKDGKveO9PF8A41IP1CLvDicpWXTxfqzQKDOvg3g5EmCx+5ksviXHJ1
|
||||
lY5CBbhVPmU3ruzGBqN/6B90VyTicbIyIZKZdnElAqaW6OiEaOmj2Oadi3ARJLWA
|
||||
8rpVPweZE0/S4B5UIuMh+JVJU3Os1BUXHKN3LAPENZa1NNYX3j53GxGMf+SAKe0g
|
||||
QHe+fHiiB7a6iBl09W8cUJh8HINXW+vvU6mZABEBAAG0MlNhbHRTdGFjayBQYWNr
|
||||
YWdpbmcgVGVhbSA8cGFja2FnaW5nQHNhbHRzdGFjay5jb20+iQFSBBMBCAA8FiEE
|
||||
9+rekz4krjI0B2hWN6cQR50w17YFAmLXV/8CGwMFCwkIBwIDIgIBBhUKCQgLAgQW
|
||||
AgMBAh4HAheAAAoJEDenEEedMNe2d0MH/36khQzCWMc5ezznO7bcOHOS3OWjQveF
|
||||
Vv60y54QRnINCEa7w7ckjiap3dUSJxTo5eoAKNbgX5SgrshEY1HDXDoqgumHJLFW
|
||||
J+L4f3CXFBhvObUOwB7ApUNHURcoNQYK7kS/vUJrQ3dFyT7uvgysGtv+/WpboY1s
|
||||
ScJnVtWyQmLe7qj5pJ0aI5pPjFnP9869zPScNb6o6lbqGp/xhnL5NkZCF0DNgItw
|
||||
HXyNsRPyc8JG+P+GP80XWZ37ajEdwkiPbtu3CD5pvBO1w5FPLBwuH5CSgQFEcA4V
|
||||
QH8ThU0P1IhKe3xPRNgawcBTAHXqOD0OxilAIsQdfrKkRiTEcZtFZW25AQ0EYtdX
|
||||
/wEIANFBzJfSks4ti/JQkECtEAwH7OtqUxu1QhSSRusGsQu/PpjBRZzlaVlKjS4c
|
||||
fGTiZ8+25RX063vBQ+XpuTN9T9boEE4EywM11FCx1zRZIc+HlLOIJ10uKWUapmPM
|
||||
+7flnQWXMgJzP47rHe0ofEHlP4/av5C1imgWEtEpYyn1B4qgSxvLFDq46rD5m+DP
|
||||
2xNZbwWd0uSAG/wZNonVkISYymB0UTnUm8FABH1Ci7lXO9JnuW+IvVt32C5VibGy
|
||||
FXdAJGmIiqsvBhJSUl+GJhO6NTXntuevqPLUXD9PuHWo4Vo1Afek8kqZByyiyrTZ
|
||||
StDhrbo/8dSAVQMibLEfNS7R0QkAEQEAAYkBNgQYAQgAIBYhBPfq3pM+JK4yNAdo
|
||||
VjenEEedMNe2BQJi11f/AhsMAAoJEDenEEedMNe2zhgH/0wxbQpaCho0BRbUbe6L
|
||||
jm9r3yTWn6M+yYv+cBeH9sbobIVOqTvZcawzTEPWa+eVbKgkqhZjUTyfFDpjq9s6
|
||||
67zLZnCh85hLoyieSQBER59dc1pmqZJP3VrAIT1lGKMIdjZoN8JAF8IbmJHE1j65
|
||||
iZZdhbxfFHnDx22gQ+3nfniTNTWsfVAQeoAjeOuakPKdfUEMsXPBhtBBuFY4NcrT
|
||||
TIsBevT4J/STCLkEqlMtYC8ldxUCZqQXdtxqltC4k+y0kp4PmNc3/Vmp65oAeuxI
|
||||
d8TNwgZdamdinv5mPrTfBqSNiELQAcPQnOwpsqEDYF2pq9L4sdNGavP5ZvPGRLkH
|
||||
+uU=
|
||||
=383D
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
28
salt/repo/client/files/rocky/keys/docker.pub
Normal file
28
salt/repo/client/files/rocky/keys/docker.pub
Normal file
@@ -0,0 +1,28 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQINBFit5IEBEADDt86QpYKz5flnCsOyZ/fk3WwBKxfDjwHf/GIflo+4GWAXS7wJ
|
||||
1PSzPsvSDATV10J44i5WQzh99q+lZvFCVRFiNhRmlmcXG+rk1QmDh3fsCCj9Q/yP
|
||||
w8jn3Hx0zDtz8PIB/18ReftYJzUo34COLiHn8WiY20uGCF2pjdPgfxE+K454c4G7
|
||||
gKFqVUFYgPug2CS0quaBB5b0rpFUdzTeI5RCStd27nHCpuSDCvRYAfdv+4Y1yiVh
|
||||
KKdoe3Smj+RnXeVMgDxtH9FJibZ3DK7WnMN2yeob6VqXox+FvKYJCCLkbQgQmE50
|
||||
uVK0uN71A1mQDcTRKQ2q3fFGlMTqJbbzr3LwnCBE6hV0a36t+DABtZTmz5O69xdJ
|
||||
WGdBeePCnWVqtDb/BdEYz7hPKskcZBarygCCe2Xi7sZieoFZuq6ltPoCsdfEdfbO
|
||||
+VBVKJnExqNZCcFUTEnbH4CldWROOzMS8BGUlkGpa59Sl1t0QcmWlw1EbkeMQNrN
|
||||
spdR8lobcdNS9bpAJQqSHRZh3cAM9mA3Yq/bssUS/P2quRXLjJ9mIv3dky9C3udM
|
||||
+q2unvnbNpPtIUly76FJ3s8g8sHeOnmYcKqNGqHq2Q3kMdA2eIbI0MqfOIo2+Xk0
|
||||
rNt3ctq3g+cQiorcN3rdHPsTRSAcp+NCz1QF9TwXYtH1XV24A6QMO0+CZwARAQAB
|
||||
tCtEb2NrZXIgUmVsZWFzZSAoQ0UgcnBtKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
|
||||
BBMBCgAhBQJYrep4AhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEMUv62ti
|
||||
Hp816C0P/iP+1uhSa6Qq3TIc5sIFE5JHxOO6y0R97cUdAmCbEqBiJHUPNQDQaaRG
|
||||
VYBm0K013Q1gcJeUJvS32gthmIvhkstw7KTodwOM8Kl11CCqZ07NPFef1b2SaJ7l
|
||||
TYpyUsT9+e343ph+O4C1oUQw6flaAJe+8ATCmI/4KxfhIjD2a/Q1voR5tUIxfexC
|
||||
/LZTx05gyf2mAgEWlRm/cGTStNfqDN1uoKMlV+WFuB1j2oTUuO1/dr8mL+FgZAM3
|
||||
ntWFo9gQCllNV9ahYOON2gkoZoNuPUnHsf4Bj6BQJnIXbAhMk9H2sZzwUi9bgObZ
|
||||
XO8+OrP4D4B9kCAKqqaQqA+O46LzO2vhN74lm/Fy6PumHuviqDBdN+HgtRPMUuao
|
||||
xnuVJSvBu9sPdgT/pR1N9u/KnfAnnLtR6g+fx4mWz+ts/riB/KRHzXd+44jGKZra
|
||||
IhTMfniguMJNsyEOO0AN8Tqcl0eRBxcOArcri7xu8HFvvl+e+ILymu4buusbYEVL
|
||||
GBkYP5YMmScfKn+jnDVN4mWoN1Bq2yMhMGx6PA3hOvzPNsUoYy2BwDxNZyflzuAi
|
||||
g59mgJm2NXtzNbSRJbMamKpQ69mzLWGdFNsRd4aH7PT7uPAURaf7B5BVp3UyjERW
|
||||
5alSGnBqsZmvlRnVH5BDUhYsWZMPRQS9rRr4iGW0l+TH+O2VJ8aQ
|
||||
=0Zqq
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
52
salt/repo/client/files/rocky/keys/securityonion.pub
Normal file
52
salt/repo/client/files/rocky/keys/securityonion.pub
Normal file
@@ -0,0 +1,52 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQINBF7rzwEBEADBg87uJhnC3Ls7s60hbHGaywGrPtbz2WuYA/ev3YS3X7WS75p8
|
||||
PGlzTWUCujx0pEHbK2vYfExl3zksZ8ZmLyZ9VB3oSLiWBzJgKAeB7YCFEo8te+eE
|
||||
P2Z+8c+kX4eOV+2waxZyewA2TipSkhWgStSI4Ow8SyVUcUWA3hCw7mo2duNVi7KO
|
||||
C3vvI3wzirH+8/XIGo+lWTg6yYlSxdf+0xWzYvV2QCMpwzJfARw6GGXtfCZw/zoO
|
||||
o4+YPsiyztQdyI1y+g3Fbesl65E36DelbyP+lYd2VecX8ELEv0wlKCgHYlk6lc+n
|
||||
qnOotVjWbsyXuFfo06PHUd6O9n3nmo0drC6kmXGw1e8hu0t8VcGfMTKS/hszwVUY
|
||||
bHS6kbfsOoAb6LXPWKfqxk/BdreLXmcHHz88DimS3OS0JufkcmkjxEzSFRL0kb2h
|
||||
QVb1SATrbx+v2RWQXvi9sLCjT2fdOiwi1Tgc84orc7A1C3Jwu353YaX9cV+n5uyG
|
||||
OZ2AULZ5z2h13sVuiZAwfyyFs/O0CJ783hFA2TNPnyNGAgw/kaIo7nNRnggtndBo
|
||||
oQzVS+BHiFx98IF4zDqmF2r2+jOCjxSrw8KnZBe4bgXFtl89DmjoejGvWDnu2MVM
|
||||
pZDEs1DcOxHBQmTCWMIYLyNKG0xW6diyWBxEIaa7YgrP6kA+RaDfZ/xXPwARAQAB
|
||||
tD9TZWN1cml0eSBPbmlvbiBTb2x1dGlvbnMsIExMQyA8aW5mb0BzZWN1cml0eW9u
|
||||
aW9uc29sdXRpb25zLmNvbT6JAlQEEwEKAD4WIQTIBKk9Nr4Mcz6hlkR8EGC3/lBw
|
||||
EwUCXuvPAQIbAwUJEswDAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRB8EGC3
|
||||
/lBwExB1D/42xIDGU2XFNFyTU+ZqzDA8qNC9hEKjLeizbeM8RIm3xO+3p7SdqbuJ
|
||||
7pA8gk0RiHuILb+Ba1xiSh/w/W2bOxQhsXuWHih2z3W1tI+hu6RQhIm4e6CIHHf7
|
||||
Vzj4RSvHOVS0AzITUwkHjv0x0Z8zVBPJfEHKkK2x03BqP1o12rd7n2ZMrSfN6sED
|
||||
fUwOJLDjthShtyLSPBVG8j7T5cfSCPSLhfVOKPQVcI1sSir7RLeyxt1v1kzjQdaA
|
||||
+znxO8EgfZJN93wzfBrAGcVT8KmpmgwR6p46m20wJXyZC9DZxJ0o1y3toVWTC+kP
|
||||
Qj1ROPivySVn10rBoOJk8HteyhW07gTcydq+noKHV7SqJ1899xRAYP7rDCfI9iMW
|
||||
Nn22ZDLnAkIcbNR7JLJCHwsZH/Umo9KO/dIccIqVQel3UCCYZcWTZW0VkcjqVKRa
|
||||
eK+JQGaJPrBAoxIG5/sMlbk2sINSubNWlcbH6kM0V8NVwdPiOO9xLmp2hI4ICxE3
|
||||
M+O2HCNX4QYzVizzTFxEvW3ieLa4nePQ8J6lvMI2oLkFP7xHoFluvZnuwfNvoEy0
|
||||
RnlHExN1UQTUvcbCxIbzjaJ4HJXilWHjgmGaVQO1S7AYskWnNWQ7uJvxnuZBNNwm
|
||||
pIvwYEZp23fYaWl/xKqnmPMy2ADjROBKlCm7L+Ntq1r7ELGW5ZCTobkCDQRe688B
|
||||
ARAA22GzdkSAo+mwJ2S1RbJ1G20tFnLsG/NC8iMN3lEh/PSmyPdB7mBtjZ+HPDzF
|
||||
VSznXZdr3LItBBQOli2hVIj1lZBY7+s2ZufV3TFFwselUwT3b1g1KMkopD95Ckf8
|
||||
WhLbSz2yqgrvcEvbB0HFX/ZEsHGqIz2kLacixjwXXLWOMQ2LNbeW1f5zQkBnaNNQ
|
||||
/4njzTj68OxnvfplNYNJqi2pZGb2UqarYX04FqKNuocN8E7AC9FQdBXylmVctw9T
|
||||
pQVwfCI76bTe6vPWb+keb6UNN1jyXVnhIQ3Fv5sFBsmgXf/hO8tqCotrKjEiK2/i
|
||||
RkvFeqsGMXreCgYg9zW4k+DcJtVa+Q8juGOjElrubY3Ua9mCusx3vY4QYSWxQ5Ih
|
||||
k1lXiUcM5Rt38lfpKHRJ5Pd4Y5xlWSQfZ7nmzbf/GzJQz+rWrA0X6Oc6cDOPLNXK
|
||||
w1dAygre4f2bsp5kHQt6NMefxeNTDmi+4R62K0tb40f5q0Vxz8qdyD48bBsbULNx
|
||||
kb6mjOAD+FNkfNXcGeuTq9oRnjx8i93mhYsIP5LFNDXS/zSP1nv0ZUFeIlGQGjV9
|
||||
1wOvT454qkI9sKiVFtd4FrNKZJbKszxxDm+DPfB5j+hRC4oeEJ7w+sVyh3EawtfM
|
||||
V7Mwj8i+7c3YUCravXBhSwG7SCTggFUgA8lMr8oWVgCATYsAEQEAAYkCPAQYAQoA
|
||||
JhYhBMgEqT02vgxzPqGWRHwQYLf+UHATBQJe688BAhsMBQkSzAMAAAoJEHwQYLf+
|
||||
UHATTtwQAJiztPW68ykifpFdwYFp1VC7c+uGLhWBqjDY9NSUKNC9caR7bV0cnNu8
|
||||
07UG6j18gCB2GSkukXjOR/oTj6rNcW/WouPYfQOrw7+M2Ya8M8iq+E/HOXaXB3b4
|
||||
FeCcB0UuwfcHHd2KbXrRHA+9GNpmuOcfTCdsPpIr41Xg4QltATDEt/FrzuKspXg4
|
||||
vUKDXgfnbj7y0JcJM2FfcwWGlnAG5MMRyjJQAleGdiidX/9WxgJ4Mweq4qJM0jr3
|
||||
Qsrc9VuzxsLr85no3Hn5UYVgT7bBZ59HUbQoi775m78MxN3mWUSdcyLQKovI+YXr
|
||||
tshTxWIf/2Ovdzt6Wq1WWXOGGuK1qgdPJTFWrlh3amFdb70zR1p6A/Lthd7Zty+n
|
||||
QjRZRQo5jBSnYtjhMrZP6rxM3QqnQ0frEKK9HfDYONk1Bw18CUtdwFGb9OMregLR
|
||||
IjvNLp9coSh5yYAepZyUGEPRET0GsmVw2trQF0uyMSkQfiq2zjPto6WWbsmrrbLr
|
||||
cfZ/wnBw1FoNEd51U54euo9yvOgOVtJGvqLgHNwB8574FhQhoWAMhyizqdgeEt26
|
||||
m3FXecUNKL/AK71/l04vor+/WsXe8uhDg3O84qeYa9wgd8LZZVmGZJDosSwqYjtb
|
||||
LdNNm+v60Zo6rFWSREegqi/nRTTDdxdW99ybjlh+mpbq3xavyFXF
|
||||
=bhkm
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
17
salt/repo/client/files/rocky/yum.conf.jinja
Normal file
17
salt/repo/client/files/rocky/yum.conf.jinja
Normal file
@@ -0,0 +1,17 @@
|
||||
{% set proxy = salt['pillar.get']('manager:proxy') -%}
|
||||
[main]
|
||||
cachedir=/var/cache/yum/$basearch/$releasever
|
||||
keepcache=0
|
||||
debuglevel=2
|
||||
logfile=/var/log/yum.log
|
||||
exactarch=1
|
||||
obsoletes=1
|
||||
gpgcheck=1
|
||||
plugins=1
|
||||
installonly_limit={{ salt['pillar.get']('yum:config:installonly_limit', 2) }}
|
||||
bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum
|
||||
distroverpkg=centos-release
|
||||
clean_requirements_on_remove=1
|
||||
{% if proxy -%}
|
||||
proxy={{ proxy }}
|
||||
{% endif %}
|
||||
@@ -1,16 +1,16 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% if GLOBALS.os == 'CentOS' %}
|
||||
{% if GLOBALS.os == 'Rocky' %}
|
||||
|
||||
{% set REPOPATH = '/etc/yum.repos.d/' %}
|
||||
{% set ABSENTFILES = [
|
||||
'CentOS-Base.repo',
|
||||
'CentOS-CR.repo',
|
||||
'CentOS-Debuginfo.repo',
|
||||
'CentOS-fasttrack.repo',
|
||||
'CentOS-Media.repo',
|
||||
'CentOS-Sources.repo',
|
||||
'CentOS-Vault.repo',
|
||||
'CentOS-x86_64-kernel.repo',
|
||||
'Rocky-Base.repo',
|
||||
'Rocky-CR.repo',
|
||||
'Rocky-Debuginfo.repo',
|
||||
'Rocky-fasttrack.repo',
|
||||
'Rocky-Media.repo',
|
||||
'Rocky-Sources.repo',
|
||||
'Rocky-Vault.repo',
|
||||
'Rocky-x86_64-kernel.repo',
|
||||
'docker-ce.repo',
|
||||
'epel.repo',
|
||||
'epel-testing.repo',
|
||||
|
||||
62
salt/repo/client/rocky.sls
Normal file
62
salt/repo/client/rocky.sls
Normal file
@@ -0,0 +1,62 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
|
||||
{% from 'repo/client/map.jinja' import ABSENTFILES with context %}
|
||||
{% from 'repo/client/map.jinja' import REPOPATH with context %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
{% set role = grains.id.split('_') | last %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
{% if grains['os'] == 'Rocky' %}
|
||||
|
||||
{% if ABSENTFILES|length > 0%}
|
||||
{% for file in ABSENTFILES %}
|
||||
{{ file }}:
|
||||
file.absent:
|
||||
- name: {{ REPOPATH }}{{ file }}
|
||||
- onchanges_in:
|
||||
- cmd: cleanyum
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
cleanyum:
|
||||
cmd.run:
|
||||
- name: 'yum clean all'
|
||||
- onchanges:
|
||||
- so_repo
|
||||
|
||||
yumconf:
|
||||
file.managed:
|
||||
- name: /etc/yum.conf
|
||||
- source: salt://repo/client/files/rocky/yum.conf.jinja
|
||||
- mode: 644
|
||||
- template: jinja
|
||||
- show_changes: False
|
||||
|
||||
repair_yumdb:
|
||||
cmd.run:
|
||||
- name: 'mv -f /var/lib/rpm/__db* /tmp && yum clean all'
|
||||
- onlyif:
|
||||
- 'yum check-update 2>&1 | grep "Error: rpmdb open failed"'
|
||||
|
||||
crsynckeys:
|
||||
file.recurse:
|
||||
- name: /etc/pki/rpm-gpg
|
||||
- source: salt://repo/client/files/rocky/keys/
|
||||
|
||||
so_repo:
|
||||
pkgrepo.managed:
|
||||
- name: securityonion
|
||||
- humanname: Security Onion Repo
|
||||
{% if GLOBALS.role in ['so-eval', 'so-standalone', 'so-import', 'so-manager', 'so-managersearch'] %}
|
||||
- baseurl: file:///nsm/repo/
|
||||
{% else %}
|
||||
- baseurl: https://{{ GLOBALS.repo_host }}/repo
|
||||
{% endif %}
|
||||
- enabled: 1
|
||||
- gpgcheck: 1
|
||||
|
||||
{% endif %}
|
||||
|
||||
# TODO: Add a pillar entry for custom repos
|
||||
@@ -1,3 +0,0 @@
|
||||
patch_package:
|
||||
pkg.installed:
|
||||
- name: patch
|
||||
@@ -1,6 +1,6 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
{% if GLOBALS.os != 'CentOS' %}
|
||||
{% if GLOBALS.os != 'Rocky' %}
|
||||
saltpymodules:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
@@ -18,9 +18,3 @@ salt_bootstrap:
|
||||
- name: /usr/sbin/bootstrap-salt.sh
|
||||
- source: salt://salt/scripts/bootstrap-salt.sh
|
||||
- mode: 755
|
||||
|
||||
{% if GLOBALS.os == 'CentOS' %}
|
||||
remove_salt-2019-2-5.repo:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/salt-2019-2-5.repo
|
||||
{% endif %}
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
{% set INSTALLEDSALTVERSION = salt['pkg.version']('salt-minion').split(SPLITCHAR)[0] %}
|
||||
|
||||
{% if grains.saltversion|string != SALTVERSION|string %}
|
||||
{% if grains.os|lower in ['centos', 'redhat'] %}
|
||||
{% if grains.os|lower in ['Rocky', 'redhat'] %}
|
||||
{% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %}
|
||||
{% elif grains.os|lower == 'ubuntu' %}
|
||||
{% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %}
|
||||
|
||||
@@ -2,4 +2,4 @@
|
||||
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
|
||||
salt:
|
||||
master:
|
||||
version: 3004.2
|
||||
version: 3006.0+0na.61a7bd9
|
||||
|
||||
@@ -2,6 +2,6 @@
|
||||
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
|
||||
salt:
|
||||
minion:
|
||||
version: 3004.2
|
||||
version: 3006.0+0na.61a7bd9
|
||||
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
|
||||
service_start_delay: 30 # in seconds.
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
|
||||
include:
|
||||
- salt
|
||||
- salt.helper-packages
|
||||
- systemd.reload
|
||||
- repo.client
|
||||
|
||||
@@ -100,8 +99,3 @@ salt_minion_service:
|
||||
- file: salt_minion_service_unit_file
|
||||
{% endif %}
|
||||
- order: last
|
||||
|
||||
|
||||
patch_pkg:
|
||||
pkg.installed:
|
||||
- name: patch
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'soc/merged.map.jinja' import DOCKER_EXTRA_HOSTS %}
|
||||
|
||||
include:
|
||||
- manager.sync_es_users
|
||||
@@ -110,13 +111,7 @@ so-soc:
|
||||
- /opt/so/conf/soc/soc_users_roles:/opt/sensoroni/rbac/users_roles:rw
|
||||
- /opt/so/conf/soc/salt:/opt/sensoroni/salt:rw
|
||||
- /opt/so/saltstack:/opt/so/saltstack:rw
|
||||
- extra_hosts:
|
||||
- {{GLOBALS.influxdb_host}}:{{pillar.node_data[GLOBALS.influxdb_host].ip}}
|
||||
{%- if salt['pillar.get']('nodestab', {}) %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
|
||||
- {{ SN.split('_')|first }}:{{ SNDATA.ip }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
- extra_hosts: {{ DOCKER_EXTRA_HOSTS }}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-soc'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'soc/defaults.map.jinja' import SOCDEFAULTS with context %}
|
||||
{% from 'logstash/map.jinja' import LOGSTASH_NODES %}
|
||||
{% set DOCKER_EXTRA_HOSTS = LOGSTASH_NODES %}
|
||||
{% do DOCKER_EXTRA_HOSTS.append({GLOBALS.influxdb_host:pillar.node_data[GLOBALS.influxdb_host].ip}) %}
|
||||
|
||||
{% set SOCMERGED = salt['pillar.get']('soc', SOCDEFAULTS, merge=true) %}
|
||||
|
||||
{# if SOCMERGED.server.modules.cases == httpcase details come from the soc pillar #}
|
||||
|
||||
@@ -41,18 +41,12 @@ trusttheca:
|
||||
- name: /etc/ssl/certs/intca.crt
|
||||
- text: {{ trusttheca_text }}
|
||||
|
||||
{% if grains['os'] != 'CentOS' %}
|
||||
# Install packages needed for the sensor
|
||||
m2cryptopkgs:
|
||||
pkg.installed:
|
||||
- skip_suggestions: False
|
||||
- pkgs:
|
||||
{% if grains['oscodename'] == 'bionic' %}
|
||||
- python-m2crypto
|
||||
{% elif grains['oscodename'] == 'focal' %}
|
||||
- python3-m2crypto
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
influxdb_key:
|
||||
x509.private_key_managed:
|
||||
|
||||
@@ -15,7 +15,7 @@ from watchdog.observers import Observer
|
||||
from watchdog.events import FileSystemEventHandler
|
||||
|
||||
with open("/opt/so/conf/strelka/filecheck.yaml", "r") as ymlfile:
|
||||
cfg = yaml.load(ymlfile)
|
||||
cfg = yaml.load(ymlfile, Loader=yaml.Loader)
|
||||
|
||||
extract_path = cfg["filecheck"]["extract_path"]
|
||||
historypath = cfg["filecheck"]["historypath"]
|
||||
|
||||
37
salt/top.sls
37
salt/top.sls
@@ -8,7 +8,6 @@
|
||||
{% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
|
||||
{% set ELASTALERT = salt['pillar.get']('elastalert:enabled', True) %}
|
||||
{% set ELASTICSEARCH = salt['pillar.get']('elasticsearch:enabled', True) %}
|
||||
{% set FILEBEAT = salt['pillar.get']('filebeat:enabled', False) %}
|
||||
{% set KIBANA = salt['pillar.get']('kibana:enabled', True) %}
|
||||
{% set LOGSTASH = salt['pillar.get']('logstash:enabled', True) %}
|
||||
{% set REDIS = salt['pillar.get']('redis:enabled', True) %}
|
||||
@@ -29,7 +28,7 @@ base:
|
||||
- salt.minion-state-apply-test
|
||||
- salt.minion
|
||||
|
||||
'G@os:CentOS and G@saltversion:{{saltversion}}':
|
||||
'G@os:Rocky and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- yum.packages
|
||||
|
||||
@@ -62,9 +61,6 @@ base:
|
||||
{%- if STRELKA %}
|
||||
- strelka
|
||||
{%- endif %}
|
||||
{%- if FILEBEAT %}
|
||||
- filebeat
|
||||
{%- endif %}
|
||||
- schedule
|
||||
- docker_clean
|
||||
|
||||
@@ -101,9 +97,6 @@ base:
|
||||
{%- if STRELKA %}
|
||||
- strelka
|
||||
{%- endif %}
|
||||
{%- if FILEBEAT %}
|
||||
- filebeat
|
||||
{%- endif %}
|
||||
- curator
|
||||
{%- if ELASTALERT %}
|
||||
- elastalert
|
||||
@@ -151,9 +144,6 @@ base:
|
||||
{%- if ELASTALERT %}
|
||||
- elastalert
|
||||
{%- endif %}
|
||||
{%- if FILEBEAT %}
|
||||
- filebeat
|
||||
{%- endif %}
|
||||
- utility
|
||||
- schedule
|
||||
- soctopus
|
||||
@@ -199,9 +189,6 @@ base:
|
||||
{%- if STRELKA %}
|
||||
- strelka
|
||||
{%- endif %}
|
||||
{%- if FILEBEAT %}
|
||||
- filebeat
|
||||
{%- endif %}
|
||||
- curator
|
||||
{%- if ELASTALERT %}
|
||||
- elastalert
|
||||
@@ -226,9 +213,6 @@ base:
|
||||
{%- if LOGSTASH %}
|
||||
- logstash
|
||||
{%- endif %}
|
||||
{%- if FILEBEAT %}
|
||||
- filebeat
|
||||
{%- endif %}
|
||||
- schedule
|
||||
- docker_clean
|
||||
|
||||
@@ -266,9 +250,6 @@ base:
|
||||
{%- if ELASTALERT %}
|
||||
- elastalert
|
||||
{%- endif %}
|
||||
{%- if FILEBEAT %}
|
||||
- filebeat
|
||||
{%- endif %}
|
||||
- utility
|
||||
- schedule
|
||||
- soctopus
|
||||
@@ -292,9 +273,6 @@ base:
|
||||
- redis
|
||||
{%- endif %}
|
||||
- curator
|
||||
{%- if FILEBEAT %}
|
||||
- filebeat
|
||||
{%- endif %}
|
||||
{%- if STRELKA %}
|
||||
- strelka
|
||||
{%- endif %}
|
||||
@@ -303,9 +281,6 @@ base:
|
||||
{%- if ZEEKVER != 'SURICATA' %}
|
||||
- zeek
|
||||
{%- endif %}
|
||||
{%- if FILEBEAT %}
|
||||
- filebeat
|
||||
{%- endif %}
|
||||
- schedule
|
||||
- docker_clean
|
||||
|
||||
@@ -332,9 +307,6 @@ base:
|
||||
{%- if KIBANA %}
|
||||
- kibana.so_savedobjects_defaults
|
||||
{%- endif %}
|
||||
{%- if FILEBEAT %}
|
||||
- filebeat
|
||||
{%- endif %}
|
||||
- utility
|
||||
- suricata
|
||||
- zeek
|
||||
@@ -354,9 +326,6 @@ base:
|
||||
{%- if REDIS %}
|
||||
- redis
|
||||
{%- endif %}
|
||||
{%- if FILEBEAT %}
|
||||
- filebeat
|
||||
{%- endif %}
|
||||
- schedule
|
||||
- docker_clean
|
||||
|
||||
@@ -370,10 +339,10 @@ base:
|
||||
- docker_clean
|
||||
- idh
|
||||
|
||||
'J@workstation:gui:enabled:^[Tt][Rr][Uu][Ee]$ and ( G@saltversion:{{saltversion}} and G@os:CentOS )':
|
||||
'J@workstation:gui:enabled:^[Tt][Rr][Uu][Ee]$ and ( G@saltversion:{{saltversion}} and G@os:Rocky )':
|
||||
- match: compound
|
||||
- workstation
|
||||
|
||||
'J@workstation:gui:enabled:^[Ff][Aa][Ll][Ss][Ee]$ and ( G@saltversion:{{saltversion}} and G@os:CentOS )':
|
||||
'J@workstation:gui:enabled:^[Ff][Aa][Ll][Ss][Ee]$ and ( G@saltversion:{{saltversion}} and G@os:Rocky )':
|
||||
- match: compound
|
||||
- workstation.remove_gui
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
{% import 'vars/init.map.jinja' as INIT %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
|
||||
{% from 'vars/' ~ INIT.GRAINS.role.split('-')[1] ~ '.map.jinja' import ROLE_GLOBALS %} {# role is so-role so we have to split off the 'so' #}
|
||||
|
||||
@@ -21,6 +22,8 @@
|
||||
'md_engine': INIT.PILLAR.global.mdengine,
|
||||
'pipeline': INIT.PILLAR.global.pipeline,
|
||||
'so_version': INIT.PILLAR.global.soversion,
|
||||
'so_docker_bip': DOCKER.sobip,
|
||||
'so_docker_range': DOCKER.sorange,
|
||||
'url_base': INIT.PILLAR.global.url_base,
|
||||
'so_model': INIT.GRAINS.get('sosmodel',''),
|
||||
'description': INIT.PILLAR.sensoroni.get('node_description',''),
|
||||
@@ -47,9 +50,9 @@
|
||||
{%
|
||||
do GLOBALS.update({
|
||||
'application_urls': {
|
||||
'kratos': 'http://' ~ GLOBALS.manager_ip ~ ':4434/',
|
||||
'elastic': 'https://' ~ GLOBALS.manager_ip ~ ':9200/',
|
||||
'influxdb': 'https://' ~ GLOBALS.manager_ip ~ ':8086/'
|
||||
'kratos': 'http://' ~ GLOBALS.manager ~ ':4434/',
|
||||
'elastic': 'https://' ~ GLOBALS.manager ~ ':9200/',
|
||||
'influxdb': 'https://' ~ GLOBALS.manager ~ ':8086/'
|
||||
}
|
||||
})
|
||||
%}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
{# we only want this state to run it is CentOS #}
|
||||
{% if GLOBALS.os == 'CentOS' %}
|
||||
{% if GLOBALS.os == 'Rocky' %}
|
||||
|
||||
xwindows_group:
|
||||
pkg.group_installed:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
{# we only want this state to run it is CentOS #}
|
||||
{% if GLOBALS.os == 'CentOS' %}
|
||||
{% if GLOBALS.os == 'Rocky' %}
|
||||
|
||||
remove_graphical_target:
|
||||
file.symlink:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
{# we only want this state to run it is CentOS #}
|
||||
{% if GLOBALS.os == 'CentOS' %}
|
||||
{% if GLOBALS.os == 'Rocky' %}
|
||||
|
||||
{% set global_ca_text = [] %}
|
||||
{% set global_ca_server = [] %}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
{# we only want this state to run it is CentOS #}
|
||||
{% if GLOBALS.os == 'CentOS' %}
|
||||
{% if GLOBALS.os == 'Rocky' %}
|
||||
|
||||
include:
|
||||
- workstation.packages
|
||||
|
||||
@@ -644,7 +644,8 @@ configure_minion() {
|
||||
sed -i "s/{{ GLOBALS.main_interface }}/$MNIC/" /etc/salt/minion.d/mine_functions.conf
|
||||
|
||||
{
|
||||
systemctl restart salt-minion;
|
||||
logCmd "systemctl enable salt-minion";
|
||||
logCmd "systemctl restart salt-minion";
|
||||
} >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
@@ -653,7 +654,7 @@ configure_ntp() {
|
||||
|
||||
# Install chrony if it isn't already installed
|
||||
if ! command -v chronyc &> /dev/null; then
|
||||
logCmd "yum -y install chrony"
|
||||
logCmd "dnf -y install chrony"
|
||||
fi
|
||||
|
||||
[[ -f $chrony_conf ]] && mv $chrony_conf "$chrony_conf.bak"
|
||||
@@ -844,10 +845,10 @@ copy_salt_master_config() {
|
||||
title "Copy the Salt master config template to the proper directory"
|
||||
if [ "$setup_type" = 'iso' ]; then
|
||||
logCmd "cp /root/SecurityOnion/files/salt/master/master /etc/salt/master"
|
||||
logCmd "cp /root/SecurityOnion/files/salt/master/salt-master.service /usr/lib/systemd/system/salt-master.service"
|
||||
#logCmd "cp /root/SecurityOnion/files/salt/master/salt-master.service /usr/lib/systemd/system/salt-master.service"
|
||||
else
|
||||
logCmd "cp ../files/salt/master/master /etc/salt/master"
|
||||
logCmd "cp ../files/salt/master/salt-master.service /usr/lib/systemd/system/salt-master.service"
|
||||
#logCmd "cp ../files/salt/master/salt-master.service /usr/lib/systemd/system/salt-master.service"
|
||||
fi
|
||||
info "Copying pillar and salt files in $temp_install_dir to $local_salt_dir"
|
||||
logCmd "cp -Rv $temp_install_dir/pillar/ $local_salt_dir/"
|
||||
@@ -857,6 +858,7 @@ copy_salt_master_config() {
|
||||
|
||||
# Restart the service so it picks up the changes
|
||||
logCmd "systemctl daemon-reload"
|
||||
logCmd "systemctl enable salt-master"
|
||||
logCmd "systemctl restart salt-master"
|
||||
}
|
||||
|
||||
@@ -902,6 +904,7 @@ create_manager_pillars() {
|
||||
|
||||
create_repo() {
|
||||
title "Create the repo directory"
|
||||
logCmd "dnf -y install yum-utils createrepo"
|
||||
logCmd "createrepo /nsm/repo"
|
||||
}
|
||||
|
||||
@@ -913,19 +916,13 @@ detect_cloud() {
|
||||
detect_os() {
|
||||
title "Detecting Base OS"
|
||||
if [ -f /etc/redhat-release ]; then
|
||||
if grep -q "CentOS Linux release 7" /etc/redhat-release; then
|
||||
OS=centos
|
||||
OSVER=7
|
||||
is_centos=true
|
||||
pkgman="yum"
|
||||
elif grep -q "Rocky Linux release 8" /etc/redhat-release; then
|
||||
if grep -q "Rocky Linux release 9" /etc/redhat-release; then
|
||||
OS=rocky
|
||||
OSVER=8
|
||||
OSVER=9
|
||||
is_rocky=true
|
||||
pkgman="dnf"
|
||||
info "We currently do not support Rocky Linux $OSVER but we are working on it!"
|
||||
else
|
||||
info "We do not support the version of CentOS you are trying to use."
|
||||
info "We do not support the operating system you are trying to use."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -959,13 +956,10 @@ installer_progress_loop() {
|
||||
}
|
||||
|
||||
installer_prereq_packages() {
|
||||
if [ "$OS" == centos ]; then
|
||||
if [ "$OS" == rocky ]; then
|
||||
if [[ ! $is_iso ]]; then
|
||||
if ! yum versionlock > /dev/null 2>&1; then
|
||||
logCmd "yum -y install yum-plugin-versionlock"
|
||||
fi
|
||||
if ! command -v nmcli > /dev/null 2>&1; then
|
||||
logCmd "yum -y install NetworkManager"
|
||||
logCmd "dnf -y install NetworkManager"
|
||||
fi
|
||||
fi
|
||||
logCmd "systemctl enable NetworkManager"
|
||||
@@ -1550,7 +1544,7 @@ make_some_dirs() {
|
||||
mkdir -p $local_salt_dir/salt/firewall/portgroups
|
||||
mkdir -p $local_salt_dir/salt/firewall/ports
|
||||
|
||||
for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni curator soc soctopus docker zeek suricata nginx telegraf filebeat logstash soc manager kratos idstools idh elastalert;do
|
||||
for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni curator soc soctopus docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert;do
|
||||
mkdir -p $local_salt_dir/pillar/$THEDIR
|
||||
touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls
|
||||
touch $local_salt_dir/pillar/$THEDIR/soc_$THEDIR.sls
|
||||
@@ -1740,7 +1734,7 @@ proxy_validate() {
|
||||
}
|
||||
|
||||
reserve_group_ids() {
|
||||
# This is a hack to fix CentOS from taking group IDs that we need
|
||||
# This is a hack to fix OS from taking group IDs that we need
|
||||
logCmd "groupadd -g 928 kratos"
|
||||
logCmd "groupadd -g 930 elasticsearch"
|
||||
logCmd "groupadd -g 931 logstash"
|
||||
@@ -1827,10 +1821,6 @@ reinstall_init() {
|
||||
|
||||
# Backup /opt/so since we'll be rebuilding this directory during setup
|
||||
backup_dir /opt/so "$date_string"
|
||||
# We need to restore these files during a reinstall so python3-influxdb state doesn't try to patch again
|
||||
restore_file "/opt/so_old_$date_string/state/influxdb_continuous_query.py.patched" "/opt/so/state/"
|
||||
restore_file "/opt/so_old_$date_string/state/influxdb_retention_policy.py.patched" "/opt/so/state/"
|
||||
restore_file "/opt/so_old_$date_string/state/influxdbmod.py.patched" "/opt/so/state/"
|
||||
# If the elastic license has been accepted restore the state file
|
||||
restore_file "/opt/so_old_$date_string/state/yeselastic.txt" "/opt/so/state/"
|
||||
|
||||
@@ -1862,8 +1852,8 @@ reset_proxy() {
|
||||
|
||||
[[ -f /etc/gitconfig ]] && rm -f /etc/gitconfig
|
||||
|
||||
if [[ $is_centos ]]; then
|
||||
sed -i "/proxy=/d" /etc/yum.conf
|
||||
if [[ $is_rocky ]]; then
|
||||
sed -i "/proxy=/d" /etc/dnf/dnf.conf
|
||||
else
|
||||
[[ -f /etc/apt/apt.conf.d/00-proxy.conf ]] && rm -f /etc/apt/apt.conf.d/00-proxy.conf
|
||||
fi
|
||||
@@ -1908,9 +1898,9 @@ drop_install_options() {
|
||||
|
||||
remove_package() {
|
||||
local package_name=$1
|
||||
if [[ $is_centos ]]; then
|
||||
if [[ $is_rocky ]]; then
|
||||
if rpm -qa | grep -q "$package_name"; then
|
||||
logCmd "yum remove -y $package_name"
|
||||
logCmd "dnf remove -y $package_name"
|
||||
fi
|
||||
else
|
||||
if dpkg -l | grep -q "$package_name"; then
|
||||
@@ -1929,13 +1919,8 @@ remove_package() {
|
||||
|
||||
securityonion_repo() {
|
||||
# Remove all the current repos
|
||||
if [[ $is_centos ]]; then
|
||||
if [[ $waitforstate ]]; then
|
||||
# Build the repo locally so we can use it
|
||||
echo "Syncing Repo"
|
||||
repo_sync_local
|
||||
fi
|
||||
logCmd "yum -v clean all"
|
||||
if [[ $is_rocky ]]; then
|
||||
logCmd "dnf -v clean all"
|
||||
logCmd "mkdir -vp /root/oldrepos"
|
||||
logCmd "mv -v /etc/yum.repos.d/* /root/oldrepos/"
|
||||
logCmd "ls -la /etc/yum.repos.d/"
|
||||
@@ -1955,13 +1940,19 @@ securityonion_repo() {
|
||||
fi
|
||||
|
||||
# need to yum clean all before repo conf files are removed or clean,cleans nothing
|
||||
logCmd "yum repolist all"
|
||||
logCmd "dnf repolist all"
|
||||
# update this package because the repo config files get added back
|
||||
# if the package is updated when the update_packages function is called
|
||||
logCmd "yum -v -y update centos-release"
|
||||
info "Backing up the .repo files that were added by the centos-release package."
|
||||
logCmd "mv -bvf /etc/yum.repos.d/CentOS* /root/oldrepos/"
|
||||
logCmd "yum repolist all"
|
||||
if [ -f "/etc/yum.repos.d/rocky.repo" ]; then
|
||||
info "Backing up the .repo files that were added by the centos-release package."
|
||||
logCmd "mv -bvf /etc/yum.repos.d/rocky* /root/oldrepos/"
|
||||
logCmd "dnf repolist all"
|
||||
fi
|
||||
if [[ $waitforstate ]]; then
|
||||
# Build the repo locally so we can use it
|
||||
echo "Syncing Repo"
|
||||
repo_sync_local
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -1972,36 +1963,26 @@ repo_sync_local() {
|
||||
mkdir -p /nsm/repo
|
||||
mkdir -p /root/reposync_cache
|
||||
echo "[main]" > /root/repodownload.conf
|
||||
echo "gpgcheck=1" >> /root/repodownload.conf
|
||||
echo "installonly_limit=3" >> /root/repodownload.conf
|
||||
echo "clean_requirements_on_remove=True" >> /root/repodownload.conf
|
||||
echo "best=True" >> /root/repodownload.conf
|
||||
echo "skip_if_unavailable=False" >> /root/repodownload.conf
|
||||
echo "cachedir=/root/reposync_cache" >> /root/repodownload.conf
|
||||
echo "keepcache=0" >> /root/repodownload.conf
|
||||
echo "debuglevel=2" >> /root/repodownload.conf
|
||||
echo "logfile=/var/log/yum.log" >> /root/repodownload.conf
|
||||
echo "exactarch=1" >> /root/repodownload.conf
|
||||
echo "obsoletes=1" >> /root/repodownload.conf
|
||||
echo "gpgcheck=1" >> /root/repodownload.conf
|
||||
echo "plugins=1" >> /root/repodownload.conf
|
||||
echo "installonly_limit=2" >> /root/repodownload.conf
|
||||
echo "bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum" >> /root/repodownload.conf
|
||||
echo "distroverpkg=centos-release" >> /root/repodownload.conf
|
||||
echo "clean_requirements_on_remove=1" >> /root/repodownload.conf
|
||||
echo "[securityonionsync]" >> /root/repodownload.conf
|
||||
echo "name=Security Onion Repo repo" >> /root/repodownload.conf
|
||||
echo "baseurl=https://repo.securityonion.net/file/securityonion-repo/c7so/" >> /root/repodownload.conf
|
||||
echo "baseurl=https://repo.securityonion.net/file/securityonion-repo/2.4/" >> /root/repodownload.conf
|
||||
echo "enabled=1" >> /root/repodownload.conf
|
||||
echo "gpgcheck=1" >> /root/repodownload.conf
|
||||
echo "gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/securityonion.pub" >> /root/repodownload.conf
|
||||
|
||||
REPOSYNC=$(rpm -qa | grep createrepo | wc -l)
|
||||
if [[ ! "$REPOSYNC" -gt 0 ]]; then
|
||||
# Install reposync
|
||||
info "Installing createrepo"
|
||||
logCmd "yum -y install -c /root/repodownload.conf yum-utils createrepo"
|
||||
else
|
||||
info "We have what we need to sync"
|
||||
fi
|
||||
|
||||
logCmd "reposync --norepopath -n -g -l -d -m -c /root/repodownload.conf -r securityonionsync --download-metadata -p /nsm/repo/"
|
||||
|
||||
dnf repolist
|
||||
# Make sure we can get to the sig repo
|
||||
logCmd "curl --retry 5 --retry-delay 60 -A 'gridinstall/$SOVERSION/$OS/$(uname -r)/1' https://sigs.securityonion.net/checkup --output /tmp/checkup"
|
||||
logCmd "dnf reposync --norepopath -n -g --delete -m -c /root/repodownload.conf --repoid=securityonionsync --download-metadata -p /nsm/repo/"
|
||||
# Run it again and make sure we got allt he things
|
||||
logCmd "dnf reposync --norepopath -n -g --delete -m -c /root/repodownload.conf --repoid=securityonionsync --download-metadata -p /nsm/repo/"
|
||||
|
||||
# After the download is complete run createrepo
|
||||
create_repo
|
||||
@@ -2046,20 +2027,16 @@ saltify() {
|
||||
|
||||
fi
|
||||
|
||||
if [[ $is_centos ]]; then
|
||||
RUSALTY=$(rpm -qa | grep salt-minion | wc -l)
|
||||
if [[ "$RUSALTY" -gt 0 ]]; then
|
||||
# Salt is already installed.
|
||||
info "salt is installed"
|
||||
if [[ $is_rocky ]]; then
|
||||
# THIS IS A TEMP HACK
|
||||
logCmd "dnf -y install securityonion-salt python3-audit python3-libsemanage python3-policycoreutils python3-setools python3-setuptools python3-chardet python3-idna python3-pysocks python3-requests python3-urllib3 python3-websocket-client python3-docker"
|
||||
logCmd "mkdir -p /etc/salt/minion.d"
|
||||
if [[ $waitforstate ]]; then
|
||||
# Since this is a salt master so let's install it
|
||||
logCmd ""
|
||||
else
|
||||
# Install salt
|
||||
if [[ $waitforstate ]]; then
|
||||
# Since this is a salt master so let's install it
|
||||
logCmd "yum -y install salt-minion salt-master"
|
||||
else
|
||||
# We just need the minion
|
||||
logCmd "yum -y install salt-minion"
|
||||
fi
|
||||
# We just need the minion
|
||||
logCmd "dnf -y install salt-minion"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -2187,7 +2164,7 @@ set_proxy() {
|
||||
"}" > /root/.docker/config.json
|
||||
|
||||
# Set proxy for package manager
|
||||
if [[ $is_centos ]]; then
|
||||
if [[ $is_rocky ]]; then
|
||||
echo "proxy=$so_proxy" >> /etc/yum.conf
|
||||
else
|
||||
# Set it up so the updates roll through the manager
|
||||
@@ -2397,10 +2374,9 @@ update_sudoers() {
|
||||
}
|
||||
|
||||
update_packages() {
|
||||
if [[ $is_centos ]]; then
|
||||
logCmd "yum repolist"
|
||||
logCmd "yum -y update --exclude=salt*,wazuh*,docker*,containerd*"
|
||||
logCmd "yum -y install yum-utils"
|
||||
if [[ $is_rocky ]]; then
|
||||
logCmd "dnf repolist"
|
||||
logCmd "dnf -y update --exclude=salt*,wazuh*,docker*,containerd*"
|
||||
else
|
||||
retry 150 10 "apt-get -y update" >> "$setup_log" 2>&1 || exit 1
|
||||
retry 150 10 "apt-get -y upgrade" >> "$setup_log" 2>&1 || exit 1
|
||||
|
||||
@@ -577,7 +577,6 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
logCmd "salt-call state.show_top"
|
||||
logCmd "salt-key -ya $MINION_ID"
|
||||
|
||||
logCmd "salt-call state.apply salt.helper-packages"
|
||||
logCmd "salt-call state.apply common.packages"
|
||||
logCmd "salt-call state.apply common"
|
||||
logCmd "salt-call state.apply docker"
|
||||
|
||||
@@ -136,12 +136,6 @@ export suricata_pillar_file
|
||||
adv_suricata_pillar_file="$local_salt_dir/pillar/suricata/adv_suricata.sls"
|
||||
export adv_suricata_pillar_file
|
||||
|
||||
filebeat_pillar_file="$local_salt_dir/pillar/filebeat/soc_filebeat.sls"
|
||||
export filebeat_pillar_file
|
||||
|
||||
adv_filebeat_pillar_file="$local_salt_dir/pillar/filebeat/adv_filebeat.sls"
|
||||
export adv_filebeat_pillar_file
|
||||
|
||||
logstash_pillar_file="$local_salt_dir/pillar/logstash/soc_logstash.sls"
|
||||
export logstash_pillar_file
|
||||
|
||||
@@ -200,4 +194,4 @@ influxdb_pillar_file="$local_salt_dir/pillar/influxdb/soc_influxdb.sls"
|
||||
export influxdb_pillar_file
|
||||
|
||||
adv_influxdb_pillar_file="$local_salt_dir/pillar/influxdb/adv_influxdb.sls"
|
||||
export adv_influxdb_pillar_file
|
||||
export adv_influxdb_pillar_file
|
||||
|
||||
Reference in New Issue
Block a user