Merge remote-tracking branch 'origin/2.4/dev' into issue/10050

This commit is contained in:
m0duspwnens
2023-04-18 11:31:00 -04:00
32 changed files with 331 additions and 301 deletions

View File

@@ -25,6 +25,7 @@ config_backup_script:
so_config_backup: so_config_backup:
cron.present: cron.present:
- name: /usr/sbin/so-config-backup > /dev/null 2>&1 - name: /usr/sbin/so-config-backup > /dev/null 2>&1
- identifier: so_config_backup
- user: root - user: root
- minute: '1' - minute: '1'
- hour: '0' - hour: '0'

View File

@@ -133,8 +133,10 @@ so-status_script:
{% if GLOBALS.role in GLOBALS.sensor_roles %} {% if GLOBALS.role in GLOBALS.sensor_roles %}
# Add sensor cleanup # Add sensor cleanup
/usr/sbin/so-sensor-clean: so-sensor-clean:
cron.present: cron.present:
- name: /usr/sbin/so-sensor-clean
- identifier: so-sensor-clean
- user: root - user: root
- minute: '*' - minute: '*'
- hour: '*' - hour: '*'
@@ -154,8 +156,10 @@ sensorrotateconf:
- source: salt://common/files/sensor-rotate.conf - source: salt://common/files/sensor-rotate.conf
- mode: 644 - mode: 644
/usr/local/bin/sensor-rotate: sensor-rotate:
cron.present: cron.present:
- name: /usr/local/bin/sensor-rotate
- identifier: sensor-rotate
- user: root - user: root
- minute: '1' - minute: '1'
- hour: '0' - hour: '0'
@@ -178,8 +182,10 @@ commonlogrotateconf:
- template: jinja - template: jinja
- mode: 644 - mode: 644
/usr/local/bin/common-rotate: common-rotate:
cron.present: cron.present:
- name: /usr/local/bin/common-rotate
- identifier: common-rotate
- user: root - user: root
- minute: '1' - minute: '1'
- hour: '0' - hour: '0'
@@ -200,17 +206,11 @@ sostatus_log:
- name: /opt/so/log/sostatus/status.log - name: /opt/so/log/sostatus/status.log
- mode: 644 - mode: 644
common_pip_dependencies: # Install sostatus check cron. This is used to populate Grid.
pip.installed: so-status_check_cron:
- user: root
- pkgs:
- rich
- target: /usr/lib64/python3.6/site-packages
# Install sostatus check cron
sostatus_check_cron:
cron.present: cron.present:
- name: '/usr/sbin/so-status -j > /opt/so/log/sostatus/status.log 2>&1' - name: '/usr/sbin/so-status -j > /opt/so/log/sostatus/status.log 2>&1'
- identifier: so-status_check_cron
- user: root - user: root
- minute: '*/1' - minute: '*/1'
- hour: '*' - hour: '*'
@@ -220,7 +220,7 @@ sostatus_check_cron:
remove_post_setup_cron: remove_post_setup_cron:
cron.absent: cron.absent:
- name: 'salt-call state.highstate' - name: 'PATH=$PATH:/usr/sbin salt-call state.highstate'
- identifier: post_setup_cron - identifier: post_setup_cron
{% if GLOBALS.role not in ['eval', 'manager', 'managersearch', 'standalone'] %} {% if GLOBALS.role not in ['eval', 'manager', 'managersearch', 'standalone'] %}
@@ -234,7 +234,7 @@ soversionfile:
{% endif %} {% endif %}
{% if GLOBALS.so_model %} {% if GLOBALS.so_model and GLOBALS.so_model not in ['SO2AMI01', 'SO2AZI01', 'SO2GCI01'] %}
{% if GLOBALS.os == 'Rocky' %} {% if GLOBALS.os == 'Rocky' %}
# Install Raid tools # Install Raid tools
raidpkgs: raidpkgs:
@@ -246,9 +246,10 @@ raidpkgs:
{% endif %} {% endif %}
# Install raid check cron # Install raid check cron
so_raid_status: so-raid-status:
cron.present: cron.present:
- name: '/usr/sbin/so-raid-status > /dev/null 2>&1' - name: '/usr/sbin/so-raid-status > /dev/null 2>&1'
- identifier: so-raid-status
- user: root - user: root
- minute: '*/15' - minute: '*/15'
- hour: '*' - hour: '*'

View File

@@ -5,28 +5,37 @@ commonpkgs:
pkg.installed: pkg.installed:
- skip_suggestions: True - skip_suggestions: True
- pkgs: - pkgs:
- chrony
- apache2-utils - apache2-utils
- wget - wget
- ntpdate - ntpdate
- jq - jq
- python3-docker
- curl - curl
- ca-certificates - ca-certificates
- software-properties-common - software-properties-common
- apt-transport-https - apt-transport-https
- openssl - openssl
- netcat - netcat
- python3-mysqldb
- sqlite3 - sqlite3
- libssl-dev - libssl-dev
- python3-dateutil - python3-dateutil
- python3-m2crypto
- python3-mysqldb
- python3-packaging - python3-packaging
- python3-watchdog
- python3-lxml - python3-lxml
- git - git
- vim - vim
# since Ubuntu requires and internet connection we can use pip to install modules
python3-pip:
pkg.installed
python-rich:
pip.installed:
- name: rich
- target: /usr/local/lib/python3.8/dist-packages/
- require:
- pkg: python3-pip
{% elif GLOBALS.os == 'Rocky' %} {% elif GLOBALS.os == 'Rocky' %}
commonpkgs: commonpkgs:
pkg.installed: pkg.installed:
@@ -51,6 +60,7 @@ commonpkgs:
- python3-m2crypto - python3-m2crypto
- rsync - rsync
- python3-rich - python3-rich
- python3-pyyaml
- python3-watchdog - python3-watchdog
- python3-packaging - python3-packaging
- unzip - unzip

View File

@@ -61,7 +61,7 @@ if [ -f "$pillar_file" ]; then
reboot; reboot;
else else
echo "There was an issue applying the workstation state. Please review the log above or at /opt/so/logs/salt/minion." echo "There was an issue applying the workstation state. Please review the log above or at /opt/so/log/salt/minion."
fi fi
else # workstation is already added else # workstation is already added
echo "The workstation pillar already exists in $pillar_file." echo "The workstation pillar already exists in $pillar_file."

View File

@@ -21,16 +21,34 @@ Security Onion Elastic Clear
-y Skip interactive mode -y Skip interactive mode
EOF EOF
} }
while getopts "h:y" OPTION while getopts "h:cdely" OPTION
do do
case $OPTION in case $OPTION in
h) h)
usage usage
exit 0 exit 0
;; ;;
c)
y) DELETE_CASES_DATA=1
SKIP=1
;;
d)
DONT_STOP_SERVICES=1
SKIP=1 SKIP=1
;;
e)
DELETE_ELASTALERT_DATA=1
SKIP=1
;;
l)
DELETE_LOG_DATA=1
SKIP=1
;;
y)
DELETE_CASES_DATA=1
DELETE_ELASTALERT_DATA=1
DELETE_LOG_DATA=1
SKIP=1
;; ;;
*) *)
usage usage
@@ -54,41 +72,83 @@ if [ $SKIP -ne 1 ]; then
if [ "$INPUT" != "AGREE" ] ; then exit 0; fi if [ "$INPUT" != "AGREE" ] ; then exit 0; fi
fi fi
# Check to see if Logstash are running
LS_ENABLED=$(so-status | grep logstash)
EA_ENABLED=$(so-status | grep elastalert)
if [ ! -z "$LS_ENABLED" ]; then if [ -z "$DONT_STOP_SERVICES" ]; then
# Stop Elastic Agent
for i in $(pgrep elastic-agent | grep -v grep); do
kill -9 $i;
done
/usr/sbin/so-logstash-stop # Check to see if Elastic Fleet, Logstash, Elastalert are running
#EF_ENABLED=$(so-status | grep elastic-fleet)
LS_ENABLED=$(so-status | grep logstash)
EA_ENABLED=$(so-status | grep elastalert)
#if [ ! -z "$EF_ENABLED" ]; then
# /usr/sbin/so-elastic-fleet-stop
#fi
if [ ! -z "$LS_ENABLED" ]; then
/usr/sbin/so-logstash-stop
fi
if [ ! -z "$EA_ENABLED" ]; then
/usr/sbin/so-elastalert-stop
fi
fi fi
if [ ! -z "$EA_ENABLED" ]; then if [ ! -z "$DELETE_CASES_DATA" ]; then
# Delete Cases data
/usr/sbin/so-elastalert-stop echo "Deleting Cases data..."
INDXS=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index | grep "so-case")
for INDX in ${INDXS}
do
echo "Deleting $INDX"
/usr/sbin/so-elasticsearch-query ${INDX} -XDELETE > /dev/null 2>&1
done
fi fi
# Delete data # Delete Elastalert data
echo "Deleting data..." if [ ! -z "$DELETE_ELASTALERT_DATA" ]; then
# Delete Elastalert data
INDXS=$(curl -K /opt/so/conf/elasticsearch/curl.config -s -XGET -k -L https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }') echo "Deleting Elastalert data..."
for INDX in ${INDXS} INDXS=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index | grep "elastalert")
do for INDX in ${INDXS}
curl -K /opt/so/conf/elasticsearch/curl.config-XDELETE -k -L https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1 do
done echo "Deleting $INDX"
/usr/sbin/so-elasticsearch-query ${INDX} -XDELETE > /dev/null 2>&1
#Start Logstash done
if [ ! -z "$LS_ENABLED" ]; then
/usr/sbin/so-logstash-start
fi fi
if [ ! -z "$EA_ENABLED" ]; then # Delete log data
if [ ! -z "$DELETE_LOG_DATA" ]; then
/usr/sbin/so-elastalert-start echo "Deleting log data ..."
DATASTREAMS=$(/usr/sbin/so-elasticsearch-query _data_stream | jq -r '.[] |.[].name')
for DATASTREAM in ${DATASTREAMS}
do
# Delete the data stream
echo "Deleting $DATASTREAM..."
/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} -XDELETE > /dev/null 2>&1
done
fi fi
if [ -z "$DONT_STOP_SERVICES" ]; then
#Start Logstash
if [ ! -z "$LS_ENABLED" ]; then
/usr/sbin/so-logstash-start
fi
#Start Elastic Fleet
#if [ ! -z "$EF_ENABLED" ]; then
# /usr/sbin/so-elastic-fleet-start
#fi
#Start Elastalert
if [ ! -z "$EA_ENABLED" ]; then
/usr/sbin/so-elastalert-start
fi
# Start Elastic Agent
/usr/bin/elastic-agent restart
fi

View File

@@ -9,4 +9,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
/usr/sbin/so-restart elastic-fleet $1 /usr/sbin/so-restart elasticfleet $1

View File

@@ -9,4 +9,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
/usr/sbin/so-start elastic-fleet $1 /usr/sbin/so-start elasticfleet $1

View File

@@ -9,4 +9,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
/usr/sbin/so-stop elastic-fleet $1 /usr/sbin/so-stop elasticfleet $1

View File

@@ -170,7 +170,8 @@ def main():
if "-h" in options or "--help" in options or "-?" in options: if "-h" in options or "--help" in options or "-?" in options:
showUsage(options, None) showUsage(options, None)
if os.environ["USER"] != "root": proc = subprocess.run(['id', '-u'], stdout=subprocess.PIPE, encoding="utf-8")
if proc.stdout.strip() != "0":
fail("This program must be run as root") fail("This program must be run as root")
console = Console() console = Console()

View File

@@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
@@ -16,70 +16,38 @@ overlimit() {
[[ $(/usr/sbin/so-elasticsearch-cluster-space-used) -gt "${LOG_SIZE_LIMIT}" ]] [[ $(/usr/sbin/so-elasticsearch-cluster-space-used) -gt "${LOG_SIZE_LIMIT}" ]]
} }
closedindices() { # Check to see if Elasticsearch indices using more disk space than LOG_SIZE_LIMIT
# If we can't query Elasticsearch, then immediately return false. # Closed indices will be deleted first. If we are able to bring disk space under LOG_SIZE_LIMIT, we will break out of the loop.
/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close > /dev/null 2>&1
[ $? -eq 1 ] && return false
# First, get the list of closed indices using _cat/indices?h=index,status | grep close | awk '{print $1}'.
# Next, filter out any so-case indices.
# Finally, use grep's -q option to return true if there are any remaining logstash-, so-, or .ds-logs- indices.
/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close | awk '{print $1}' | grep -v "so-case" | grep -q -E "(logstash-|so-|.ds-logs-)"
}
# Check for 2 conditions:
# 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT?
# 2. Are there any closed indices that we can delete?
# If both conditions are true, keep on looping until one of the conditions is false.
while overlimit && closedindices; do
CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
# We iterate through the closed indices
for CLOSED_INDEX in ${CLOSED_INDICES}; do
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
# To do so, we need to identify to which data stream this index is associated
# We extract the data stream name using the pattern below
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
DATASTREAM=$(echo "${CLOSED_INDEX}" | grep -oE "$DATASTREAM_PATTERN")
# We look up the data stream, and determine the write index
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
# We make sure we are not trying to delete a write index
if [ "${CLOSED_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
# This should not be a write index, so we should be allowed to delete it
/usr/sbin/so-elasticsearch-query ${CLOSED_INDEX} -XDELETE
# Finally, write a log entry that says we deleted it.
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${CLOSED_INDEX} deleted ..." >> ${LOG}
fi
if ! overlimit; then
exit
fi
done
done
while overlimit; do while overlimit; do
# If we can't query Elasticsearch, then immediately return false.
# We need to determine the oldest open index. /usr/sbin/so-elasticsearch-query _cat/indices?h=index,status > /dev/null 2>&1
# First, get the list of open indices using _cat/indices?h=index,status | grep open | awk '{print $1}'. [ $? -eq 1 ] && echo "$(date) - Could not query Elasticsearch." >> ${LOG} && exit
# Next, filter out any so-case indices and only select the remaining logstash-, so-, or .ds-logs- indices. # We iterate through the closed and open indices
# Then, sort by date by telling sort to use hyphen as delimiter and sort on the third field. CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
# We iterate through the open indices for INDEX in ${CLOSED_INDICES} ${OPEN_INDICES}; do
for OPEN_INDEX in ${OPEN_INDICES}; do
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream # Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
# To do so, we need to identify to which data stream this index is associated # To do so, we need to identify to which data stream this index is associated
# We extract the data stream name using the pattern below # We extract the data stream name using the pattern below
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
DATASTREAM=$(echo "${OPEN_INDEX}" | grep -oE "$DATASTREAM_PATTERN") DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN")
# We look up the data stream, and determine the write index # We look up the data stream, and determine the write index. If there is only one backing index, we delete the entire data stream
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) BACKING_INDICES=$(/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} | jq -r '.data_streams[0].indices | length')
# We make sure we are not trying to delete a write index if [ "$BACKING_INDICES" -gt 1 ]; then
if [ "${OPEN_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
# This should not be a write index, so we should be allowed to delete it # We make sure we are not trying to delete a write index
/usr/sbin/so-elasticsearch-query ${OPEN_INDEX} -XDELETE if [ "${INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
# Finally, write a log entry that says we deleted it. # This should not be a write index, so we should be allowed to delete it
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${OPEN_INDEX} deleted ..." >> ${LOG} printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Deleting ${INDEX} index...\n" >> ${LOG}
fi /usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1
if ! overlimit; then fi
exit else
fi # We delete the entire data stream, since there is only one backing index
done printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Deleting ${DATASTREAM} data stream...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} -XDELETE >> ${LOG} 2>&1
fi
if ! overlimit; then
exit
fi
done
done done

View File

@@ -126,9 +126,10 @@ delete_so-curator_so-status.disabled:
- regex: ^so-curator$ - regex: ^so-curator$
{% endif %} {% endif %}
so-curatorclusterclose: so-curator-cluster-close:
cron.present: cron.present:
- name: /usr/sbin/so-curator-cluster-close > /opt/so/log/curator/cron-close.log 2>&1 - name: /usr/sbin/so-curator-cluster-close > /opt/so/log/curator/cron-close.log 2>&1
- identifier: so-curator-cluster-close
- user: root - user: root
- minute: '2' - minute: '2'
- hour: '*/1' - hour: '*/1'
@@ -136,9 +137,10 @@ so-curatorclusterclose:
- month: '*' - month: '*'
- dayweek: '*' - dayweek: '*'
so-curatorclusterdeletecron: so-curator-cluster-delete:
cron.present: cron.present:
- name: /usr/sbin/so-curator-cluster-delete > /opt/so/log/curator/cron-cluster-delete.log 2>&1 - name: /usr/sbin/so-curator-cluster-delete > /opt/so/log/curator/cron-cluster-delete.log 2>&1
- identifier: so-curator-cluster-delete
- user: root - user: root
- minute: '*/5' - minute: '*/5'
- hour: '*' - hour: '*'

View File

@@ -1,21 +1,21 @@
{ {
"description" : "suricata.dns", "description" : "suricata.dns",
"processors" : [ "processors" : [
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } }, { "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } }, { "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
{ "rename": { "field": "message2.dns.type", "target_field": "dns.query.type", "ignore_missing": true } }, { "rename": { "field": "message2.dns.type", "target_field": "dns.query.type", "ignore_missing": true } },
{ "rename": { "field": "message2.dns.tx_id", "target_field": "dns.id", "ignore_missing": true } }, { "rename": { "field": "message2.dns.tx_id", "target_field": "dns.id", "ignore_missing": true } },
{ "rename": { "field": "message2.dns.version", "target_field": "dns.version", "ignore_missing": true } }, { "rename": { "field": "message2.dns.version", "target_field": "dns.version", "ignore_missing": true } },
{ "rename": { "field": "message2.dns.rrname", "target_field": "dns.query.name", "ignore_missing": true } }, { "rename": { "field": "message2.dns.rrname", "target_field": "dns.query.name", "ignore_missing": true } },
{ "rename": { "field": "message2.dns.rrtype", "target_field": "dns.query.type_name", "ignore_missing": true } }, { "rename": { "field": "message2.dns.rrtype", "target_field": "dns.query.type_name", "ignore_missing": true } },
{ "rename": { "field": "message2.dns.flags", "target_field": "dns.flags", "ignore_missing": true } }, { "rename": { "field": "message2.dns.flags", "target_field": "dns.flags", "ignore_missing": true } },
{ "rename": { "field": "message2.dns.qr", "target_field": "dns.qr", "ignore_missing": true } }, { "rename": { "field": "message2.dns.qr", "target_field": "dns.qr", "ignore_missing": true } },
{ "rename": { "field": "message2.dns.rd", "target_field": "dns.recursion.desired", "ignore_missing": true } }, { "rename": { "field": "message2.dns.rd", "target_field": "dns.recursion.desired", "ignore_missing": true } },
{ "rename": { "field": "message2.dns.ra", "target_field": "dns.recursion.available", "ignore_missing": true } }, { "rename": { "field": "message2.dns.ra", "target_field": "dns.recursion.available", "ignore_missing": true } },
{ "rename": { "field": "message2.dns.rcode", "target_field": "dns.response.code_name", "ignore_missing": true } }, { "rename": { "field": "message2.dns.rcode", "target_field": "dns.response.code_name", "ignore_missing": true } },
{ "rename": { "field": "message2.grouped.A", "target_field": "dns.answers.data", "ignore_missing": true } }, { "rename": { "field": "message2.dns.grouped.A", "target_field": "dns.answers.data", "ignore_missing": true } },
{ "rename": { "field": "message2.grouped.CNAME", "target_field": "dns.answers.name", "ignore_missing": true } }, { "rename": { "field": "message2.dns.grouped.CNAME", "target_field": "dns.answers.name", "ignore_missing": true } },
{ "pipeline": { "if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } }, { "pipeline": { "if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
{ "pipeline": { "name": "common" } } { "pipeline": { "name": "common" } }
] ]
} }

View File

@@ -177,6 +177,7 @@ esyml:
ESCONFIG: {{ ESCONFIG }} ESCONFIG: {{ ESCONFIG }}
- template: jinja - template: jinja
{% if GLOBALS.role != "so-searchnode" %}
escomponenttemplates: escomponenttemplates:
file.recurse: file.recurse:
- name: /opt/so/conf/elasticsearch/templates/component - name: /opt/so/conf/elasticsearch/templates/component
@@ -219,6 +220,7 @@ es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}:
- cmd: so-elasticsearch-templates - cmd: so-elasticsearch-templates
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% endif %}
esroles: esroles:
file.recurse: file.recurse:
@@ -363,6 +365,8 @@ append_so-elasticsearch_so-status.conf:
- name: /opt/so/conf/so-status/so-status.conf - name: /opt/so/conf/so-status/so-status.conf
- text: so-elasticsearch - text: so-elasticsearch
{% if GLOBALS.role != "so-searchnode" %}
so-es-cluster-settings: so-es-cluster-settings:
cmd.run: cmd.run:
- name: /usr/sbin/so-elasticsearch-cluster-settings - name: /usr/sbin/so-elasticsearch-cluster-settings
@@ -406,7 +410,7 @@ so-elasticsearch-roles-load:
- require: - require:
- docker_container: so-elasticsearch - docker_container: so-elasticsearch
- file: es_sync_scripts - file: es_sync_scripts
{% endif %}
{% else %} {% else %}
{{sls}}_state_not_allowed: {{sls}}_state_not_allowed:

View File

@@ -20,9 +20,10 @@ idstoolslogdir:
- group: 939 - group: 939
- makedirs: True - makedirs: True
so-ruleupdatecron: so-rule-update:
cron.present: cron.present:
- name: /usr/sbin/so-rule-update > /opt/so/log/idstools/download.log 2>&1 - name: /usr/sbin/so-rule-update > /opt/so/log/idstools/download.log 2>&1
- identifier: so-rule-update
- user: root - user: root
- minute: '1' - minute: '1'
- hour: '7' - hour: '7'

View File

@@ -127,6 +127,7 @@ metrics_link_file:
get_influxdb_size: get_influxdb_size:
cron.present: cron.present:
- name: 'du -s -k /nsm/influxdb | cut -f1 > /opt/so/log/telegraf/influxdb_size.log 2>&1' - name: 'du -s -k /nsm/influxdb | cut -f1 > /opt/so/log/telegraf/influxdb_size.log 2>&1'
- identifier: get_influxdb_size
- user: root - user: root
- minute: '*/1' - minute: '*/1'
- hour: '*' - hour: '*'

File diff suppressed because one or more lines are too long

View File

@@ -51,7 +51,7 @@ repo_sync_script:
- group: root - group: root
- mode: 755 - mode: 755
reposync_cron: so-repo-sync:
{% if MANAGERMERGED.reposync.enabled %} {% if MANAGERMERGED.reposync.enabled %}
cron.present: cron.present:
{% else %} {% else %}
@@ -59,6 +59,7 @@ reposync_cron:
{% endif %} {% endif %}
- user: socore - user: socore
- name: '/usr/sbin/so-repo-sync >> /opt/so/log/reposync/reposync.log 2>&1' - name: '/usr/sbin/so-repo-sync >> /opt/so/log/reposync/reposync.log 2>&1'
- identifier: so-repo-sync
- hour: '{{ MANAGERMERGED.reposync.hour }}' - hour: '{{ MANAGERMERGED.reposync.hour }}'
- minute: '{{ MANAGERMERGED.reposync.minute }}' - minute: '{{ MANAGERMERGED.reposync.minute }}'
@@ -83,10 +84,11 @@ yara_update_script:
ISAIRGAP: {{ GLOBALS.airgap }} ISAIRGAP: {{ GLOBALS.airgap }}
EXCLUDEDRULES: {{ STRELKAMERGED.rules.excluded }} EXCLUDEDRULES: {{ STRELKAMERGED.rules.excluded }}
strelka_yara_update: strelka-yara-update:
cron.present: cron.present:
- user: root - user: root
- name: '/usr/sbin/so-yara-update >> /nsm/strelka/log/yara-update.log 2>&1' - name: '/usr/sbin/so-yara-update >> /nsm/strelka/log/yara-update.log 2>&1'
- identifier: strelka-yara-update
- hour: '7' - hour: '7'
- minute: '1' - minute: '1'

View File

@@ -24,8 +24,9 @@ sync_es_users:
# we dont want this added too early in setup, so we add the onlyif to verify 'startup_states: highstate' # we dont want this added too early in setup, so we add the onlyif to verify 'startup_states: highstate'
# is in the minion config. That line is added before the final highstate during setup # is in the minion config. That line is added before the final highstate during setup
sosyncusers: so-user_sync:
cron.present: cron.present:
- user: root - user: root
- name: 'PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin /usr/sbin/so-user sync &>> /opt/so/log/soc/sync.log' - name: 'PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin /usr/sbin/so-user sync &>> /opt/so/log/soc/sync.log'
- identifier: so-user_sync
- onlyif: "grep 'startup_states: highstate' /etc/salt/minion" - onlyif: "grep 'startup_states: highstate' /etc/salt/minion"

View File

@@ -2,8 +2,13 @@
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'ntp/config.map.jinja' import NTPCONFIG %} {% from 'ntp/config.map.jinja' import NTPCONFIG %}
chrony_pkg:
pkg.installed:
- name: chrony
chronyconf: chronyconf:
file.managed: file.managed:
- name: /etc/chrony.conf - name: /etc/chrony.conf
@@ -12,8 +17,14 @@ chronyconf:
- defaults: - defaults:
NTPCONFIG: {{ NTPCONFIG }} NTPCONFIG: {{ NTPCONFIG }}
{% if GLOBALS.os == 'Rocky' %}
chronyd: chronyd:
{% else %}
chrony:
{% endif %}
service.running: service.running:
- enable: True - enable: True
- watch: - watch:
- file: chronyconf - file: chronyconf
- require:
- pkg: chrony_pkg

View File

@@ -113,15 +113,17 @@ append_so-playbook_so-status.conf:
{% endif %} {% endif %}
so-playbooksynccron: so-playbook-sync_cron:
cron.present: cron.present:
- name: /usr/sbin/so-playbook-sync > /opt/so/log/playbook/sync.log 2>&1 - name: /usr/sbin/so-playbook-sync > /opt/so/log/playbook/sync.log 2>&1
- identifier: so-playbook-sync_cron
- user: root - user: root
- minute: '*/5' - minute: '*/5'
so-playbookruleupdatecron: so-playbook-ruleupdate_cron:
cron.present: cron.present:
- name: /usr/sbin/so-playbook-ruleupdate > /opt/so/log/playbook/update.log 2>&1 - name: /usr/sbin/so-playbook-ruleupdate > /opt/so/log/playbook/update.log 2>&1
- identifier: so-playbook-ruleupdate_cron
- user: root - user: root
- minute: '1' - minute: '1'
- hour: '6' - hour: '6'

View File

@@ -1,20 +0,0 @@
# this removes the repo file left by bootstrap-salt.sh without -r
remove_salt.list:
file.absent:
- name: /etc/apt/sources.list.d/salt.list
saltstack.list:
file.managed:
- name: /etc/apt/sources.list.d/saltstack.list
- contents:
- deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/{{grains.osrelease}}/amd64/salt3004.2/ {{grains.oscodename}} main
apt_update:
cmd.run:
- name: apt-get update
- onchanges:
- file: saltstack.list
- timeout: 30
- retry:
attempts: 5
interval: 30

View File

@@ -1,16 +1,8 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% if grains.oscodename == 'focal' %}
{% if GLOBALS.os != 'Rocky' %}
saltpymodules: saltpymodules:
pkg.installed: pkg.installed:
- pkgs: - pkgs:
{% if grains['oscodename'] == 'bionic' %}
- python-m2crypto
- python-docker
{% elif grains['oscodename'] == 'focal' %}
- python3-m2crypto
- python3-docker - python3-docker
{% endif %}
{% endif %} {% endif %}
salt_bootstrap: salt_bootstrap:

View File

@@ -12,8 +12,9 @@ state-apply-test:
start: 0 start: 0
end: 180 end: 180
/usr/sbin/so-salt-minion-check -q: so-salt-minion-check_cron:
cron.present: cron.present:
- identifier: so-salt-minion-check - name: /usr/sbin/so-salt-minion-check -q
- identifier: so-salt-minion-check_cron
- user: root - user: root
- minute: '*/5' - minute: '*/5'

View File

@@ -1,6 +1,7 @@
post_setup_cron: post_setup_cron:
cron.present: cron.present:
- name: 'PATH=$PATH:/usr/sbin salt-call state.highstate' - name: 'PATH=$PATH:/usr/sbin salt-call state.highstate'
- identifier: post_setup_cron
- user: root - user: root
- minute: '*/1' - minute: '*/1'
- identifier: post_setup_cron - identifier: post_setup_cron

View File

@@ -6,7 +6,8 @@
PIPE_OWNER=${PIPE_OWNER:-socore} PIPE_OWNER=${PIPE_OWNER:-socore}
PIPE_GROUP=${PIPE_GROUP:-socore} PIPE_GROUP=${PIPE_GROUP:-socore}
SOC_PIPE=${SOC_PIPE_REQUEST:-/opt/so/conf/soc/salt/pipe} SOC_PIPE=${SOC_PIPE:-/opt/so/conf/soc/salt/pipe}
CMD_PREFIX=${CMD_PREFIX:-""}
PATH=${PATH}:/usr/sbin PATH=${PATH}:/usr/sbin
function log() { function log() {
@@ -26,7 +27,7 @@ function make_pipe() {
make_pipe "${SOC_PIPE}" make_pipe "${SOC_PIPE}"
function list_minions() { function list_minions() {
response=$(so-minion -o=list) response=$($CMD_PREFIX so-minion -o=list)
exit_code=$? exit_code=$?
if [[ $exit_code -eq 0 ]]; then if [[ $exit_code -eq 0 ]]; then
log "Successful command execution" log "Successful command execution"
@@ -42,7 +43,7 @@ function manage_minion() {
op=$(echo "$request" | jq -r .operation) op=$(echo "$request" | jq -r .operation)
id=$(echo "$request" | jq -r .id) id=$(echo "$request" | jq -r .id)
response=$(so-minion "-o=$op" "-m=$id") response=$($CMD_PREFIX so-minion "-o=$op" "-m=$id")
exit_code=$? exit_code=$?
if [[ exit_code -eq 0 ]]; then if [[ exit_code -eq 0 ]]; then
log "Successful command execution" log "Successful command execution"
@@ -75,14 +76,14 @@ function manage_user() {
add|enable|disable|delete) add|enable|disable|delete)
email=$(echo "$request" | jq -r .email) email=$(echo "$request" | jq -r .email)
log "Performing user '$op' for user '$email'" log "Performing user '$op' for user '$email'"
response=$(so-user "$op" --email "$email" --skip-sync) response=$($CMD_PREFIX so-user "$op" --email "$email" --skip-sync)
exit_code=$? exit_code=$?
;; ;;
addrole|delrole) addrole|delrole)
email=$(echo "$request" | jq -r .email) email=$(echo "$request" | jq -r .email)
role=$(echo "$request" | jq -r .role) role=$(echo "$request" | jq -r .role)
log "Performing '$op' for user '$email' with role '$role'" log "Performing '$op' for user '$email' with role '$role'"
response=$(so-user "$op" --email "$email" --role "$role" --skip-sync) response=$($CMD_PREFIX so-user "$op" --email "$email" --role "$role" --skip-sync)
exit_code=$? exit_code=$?
;; ;;
password) password)
@@ -98,12 +99,12 @@ function manage_user() {
lastName=$(echo "$request" | jq -r .lastName) lastName=$(echo "$request" | jq -r .lastName)
note=$(echo "$request" | jq -r .note) note=$(echo "$request" | jq -r .note)
log "Performing '$op' update for user '$email' with firstname '$firstName', lastname '$lastName', and note '$note'" log "Performing '$op' update for user '$email' with firstname '$firstName', lastname '$lastName', and note '$note'"
response=$(so-user "$op" --email "$email" --firstName "$firstName" --lastName "$lastName" --note "$note") response=$($CMD_PREFIX so-user "$op" --email "$email" --firstName "$firstName" --lastName "$lastName" --note "$note")
exit_code=$? exit_code=$?
;; ;;
sync) sync)
log "Performing '$op'" log "Performing '$op'"
response=$(so-user "$op") response=$($CMD_PREFIX so-user "$op")
exit_code=$? exit_code=$?
;; ;;
*) *)
@@ -142,17 +143,17 @@ function manage_salt() {
state) state)
log "Performing '$op' for '$state' on minion '$minion'" log "Performing '$op' for '$state' on minion '$minion'"
state=$(echo "$request" | jq -r .state) state=$(echo "$request" | jq -r .state)
response=$(salt --async "$minion" state.apply "$state" queue=True) response=$($CMD_PREFIX salt --async "$minion" state.apply "$state" queue=True)
exit_code=$? exit_code=$?
;; ;;
highstate) highstate)
log "Performing '$op' on minion $minion" log "Performing '$op' on minion $minion"
response=$(salt --async "$minion" state.highstate queue=True) response=$($CMD_PREFIX salt --async "$minion" state.highstate queue=True)
exit_code=$? exit_code=$?
;; ;;
activejobs) activejobs)
response=$($CMD_PREFIX salt-run jobs.active -out json -l quiet)
log "Querying active salt jobs" log "Querying active salt jobs"
response=$(salt-run jobs.active -out json -l quiet)
$(echo "$response" > "${SOC_PIPE}") $(echo "$response" > "${SOC_PIPE}")
return return
;; ;;

View File

@@ -91,6 +91,7 @@ socusersroles:
salt-relay: salt-relay:
cron.present: cron.present:
- name: 'ps -ef | grep salt-relay.sh | grep -v grep > /dev/null 2>&1 || /opt/so/saltstack/default/salt/soc/files/bin/salt-relay.sh >> /opt/so/log/soc/salt-relay.log 2>&1 &' - name: 'ps -ef | grep salt-relay.sh | grep -v grep > /dev/null 2>&1 || /opt/so/saltstack/default/salt/soc/files/bin/salt-relay.sh >> /opt/so/log/soc/salt-relay.log 2>&1 &'
- identifier: salt-relay
so-soc: so-soc:
docker_container.running: docker_container.running:

View File

@@ -205,11 +205,13 @@ filecheck_restart:
filecheck_run: filecheck_run:
cron.present: cron.present:
- name: 'ps -ef | grep filecheck | grep -v grep > /dev/null 2>&1 || python3 /opt/so/conf/strelka/filecheck >> /opt/so/log/strelka/filecheck_stdout.log 2>&1 &' - name: 'ps -ef | grep filecheck | grep -v grep > /dev/null 2>&1 || python3 /opt/so/conf/strelka/filecheck >> /opt/so/log/strelka/filecheck_stdout.log 2>&1 &'
- identifier: filecheck_run
- user: {{ filecheck_runas }} - user: {{ filecheck_runas }}
filcheck_history_clean: filcheck_history_clean:
cron.present: cron.present:
- name: '/usr/bin/find /nsm/strelka/history/ -type f -mtime +2 -exec rm {} + > /dev/null 2>&1' - name: '/usr/bin/find /nsm/strelka/history/ -type f -mtime +2 -exec rm {} + > /dev/null 2>&1'
- identifier: filecheck_history_clean
- minute: '33' - minute: '33'
# End Filecheck Section # End Filecheck Section

View File

@@ -79,8 +79,10 @@ surilogscript:
- source: salt://suricata/cron/surilogcompress - source: salt://suricata/cron/surilogcompress
- mode: 755 - mode: 755
/usr/local/bin/surilogcompress: surilogcompress:
cron.present: cron.present:
- name: /usr/local/bin/surilogcompress
- identifier: surilogcompress
- user: suricata - user: suricata
- minute: '17' - minute: '17'
- hour: '*' - hour: '*'
@@ -181,16 +183,6 @@ delete_so-suricata_so-status.disabled:
- regex: ^so-suricata$ - regex: ^so-suricata$
{% endif %} {% endif %}
surirotate:
cron.absent:
- name: /usr/local/bin/surirotate
- user: root
- minute: '11'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
so-suricata-eve-clean: so-suricata-eve-clean:
file.managed: file.managed:
- name: /usr/sbin/so-suricata-eve-clean - name: /usr/sbin/so-suricata-eve-clean
@@ -204,6 +196,7 @@ so-suricata-eve-clean:
clean_suricata_eve_files: clean_suricata_eve_files:
cron.present: cron.present:
- name: /usr/sbin/so-suricata-eve-clean > /dev/null 2>&1 - name: /usr/sbin/so-suricata-eve-clean > /dev/null 2>&1
- identifier: clean_suricata_eve_files
- user: root - user: root
- minute: '*/5' - minute: '*/5'
- hour: '*' - hour: '*'

View File

@@ -21,6 +21,7 @@ zeek:
SpoolDir: /nsm/zeek/spool SpoolDir: /nsm/zeek/spool
CfgDir: /opt/zeek/etc CfgDir: /opt/zeek/etc
CompressLogs: 1 CompressLogs: 1
ZeekPort: 27760
local: local:
load: load:
- misc/loaded-scripts - misc/loaded-scripts

View File

@@ -674,8 +674,13 @@ configure_ntp() {
'rtcsync' \ 'rtcsync' \
'logdir /var/log/chrony' >> $chrony_conf 'logdir /var/log/chrony' >> $chrony_conf
systemctl enable chronyd if [ "$OS" == 'rocky' ]; then
systemctl restart chronyd systemctl enable chronyd
systemctl restart chronyd
elif [ "$OS" == 'ubuntu' ]; then
systemctl enable chrony
systemctl restart chrony
fi
# Tell the chrony daemon to sync time & update the system time # Tell the chrony daemon to sync time & update the system time
# Since these commands only make a call to chronyd, wait after each command to make sure the changes are made # Since these commands only make a call to chronyd, wait after each command to make sure the changes are made
@@ -770,11 +775,12 @@ check_requirements() {
} }
check_sos_appliance() { check_sos_appliance() {
title "Is this is an SOS Appliance?"
if [ -f "/etc/SOSMODEL" ]; then if [ -f "/etc/SOSMODEL" ]; then
local MODEL=$(cat /etc/SOSMODEL) local MODEL=$(cat /etc/SOSMODEL)
info "Found SOS Model $MODEL" info "Found SOS Model $MODEL"
echo "sosmodel: $MODEL" >> /etc/salt/grains echo "sosmodel: $MODEL" >> /etc/salt/grains
else
info "Not an appliance"
fi fi
} }
@@ -959,19 +965,23 @@ installer_progress_loop() {
} }
installer_prereq_packages() { installer_prereq_packages() {
if [ "$OS" == rocky ]; then # if [ "$OS" == rocky ]; then
if [[ ! $is_iso ]]; then # if [[ ! $is_iso ]]; then
if ! command -v nmcli > /dev/null 2>&1; then # if ! command -v nmcli > /dev/null 2>&1; then
logCmd "dnf -y install NetworkManager" # logCmd "dnf -y install NetworkManager"
fi # fi
fi # fi
logCmd "systemctl enable NetworkManager" # logCmd "systemctl enable NetworkManager"
logCmd "systemctl start NetworkManager" # logCmd "systemctl start NetworkManager"
elif [ "$OS" == ubuntu ]; then # el
if [ "$OS" == ubuntu ]; then
# Print message to stdout so the user knows setup is doing something # Print message to stdout so the user knows setup is doing something
info "Running apt-get update"
retry 150 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1 retry 150 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
# Install network manager so we can do interface stuff # Install network manager so we can do interface stuff
if ! command -v nmcli > /dev/null 2>&1; then if ! command -v nmcli > /dev/null 2>&1; then
info "Installing network-manager"
retry 150 10 "apt-get -y install network-manager" >> "$setup_log" 2>&1 || exit 1 retry 150 10 "apt-get -y install network-manager" >> "$setup_log" 2>&1 || exit 1
{ {
systemctl enable NetworkManager systemctl enable NetworkManager
@@ -2008,14 +2018,19 @@ saltify() {
) )
retry 150 20 "apt-get -y install ${pkg_arr[*]}" || exit 1 retry 150 20 "apt-get -y install ${pkg_arr[*]}" || exit 1
logCmd "mkdir -vp /opt/so/gpg" logCmd "mkdir -vp /etc/apt/keyrings"
logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.securityonion.net/file/securityonion-repo/ubuntu/20.04/amd64/salt/SALTSTACK-GPG-KEY.pub" #logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.securityonion.net/file/securityonion-repo/ubuntu/20.04/amd64/salt/SALTSTACK-GPG-KEY.pub"
logCmd "wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg" logCmd "wget -q --inet4-only -O /etc/apt/keyrings/docker.pub https://download.docker.com/linux/ubuntu/gpg"
logCmd "apt-key add /opt/so/gpg/SALTSTACK-GPG-KEY.pub"
logCmd "apt-key add /opt/so/gpg/docker.pub" logCmd "curl -fsSL -o /etc/apt/keyrings/salt-archive-keyring-2023.gpg https://repo.saltproject.io/salt_rc/salt/py3/ubuntu/20.04/amd64/minor/3006.0rc3/SALT-PROJECT-GPG-PUBKEY-2023.gpg"
echo "deb [signed-by=/etc/apt/keyrings/salt-archive-keyring-2023.gpg] https://repo.saltproject.io/salt_rc/salt/py3/ubuntu/20.04/amd64/minor/3006.0rc3/ focal main" | sudo tee /etc/apt/sources.list.d/salt.list
logCmd "apt-key add /etc/apt/keyrings/salt-archive-keyring-2023.gpg"
#logCmd "apt-key add /opt/so/gpg/SALTSTACK-GPG-KEY.pub"
logCmd "apt-key add /etc/apt/keyrings/docker.pub"
# Add SO Saltstack Repo # Add SO Saltstack Repo
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/20.04/amd64/salt3004.2/ focal main" > /etc/apt/sources.list.d/saltstack.list #echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/20.04/amd64/salt3004.2/ focal main" > /etc/apt/sources.list.d/saltstack.list
# Add Docker Repo # Add Docker Repo
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
@@ -2023,28 +2038,26 @@ saltify() {
# Ain't nothing but a GPG # Ain't nothing but a GPG
retry 150 20 "apt-get update" "" "Err:" || exit 1 retry 150 20 "apt-get update" "" "Err:" || exit 1
retry 150 20 "apt-get -y install salt-minion=3004.2+ds-1 salt-common=3004.2+ds-1" || exit 1 retry 150 20 "apt-get -y install salt-common salt-minion" || exit 1
retry 150 20 "apt-mark hold salt-minion salt-common" || exit 1 retry 150 20 "apt-mark hold salt-minion salt-common" || exit 1
retry 150 20 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging python3-influxdb python3-lxml" || exit 1 #retry 150 20 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging python3-influxdb python3-lxml" || exit 1
fi fi
if [[ $is_rocky ]]; then if [[ $is_rocky ]]; then
# THIS IS A TEMP HACK if [[ $waitforstate ]]; then
#logCmd "dnf -y install securityonion-salt python3-audit python3-libsemanage python3-policycoreutils python3-setools python3-setuptools python3-chardet python3-idna python3-pysocks python3-requests python3-urllib3 python3-websocket-client python3-docker" # install all for a manager
logCmd "dnf -y install salt salt-master salt-minion" logCmd "dnf -y install salt salt-master salt-minion"
logCmd "mkdir -p /etc/salt/minion.d" else
logCmd "salt-pip install docker --no-index --only-binary=:all: --find-links files/salt_module_deps/docker/" # We just need the minion
logCmd "salt-pip install pymysql --no-index --only-binary=:all: --find-links files/salt_module_deps/pymysql/" logCmd "dnf -y install salt salt-minion"
#if [[ $waitforstate ]]; then fi
# # Since this is a salt master so let's install it
# logCmd ""
#else
# # We just need the minion
# logCmd "dnf -y install salt-minion"
#fi
fi fi
logCmd "mkdir -p /etc/salt/minion.d"
logCmd "salt-pip install docker --no-index --only-binary=:all: --find-links files/salt_module_deps/docker/"
logCmd "salt-pip install pymysql --no-index --only-binary=:all: --find-links files/salt_module_deps/pymysql/"
} }
@@ -2383,7 +2396,9 @@ update_packages() {
logCmd "dnf repolist" logCmd "dnf repolist"
logCmd "dnf -y update --allowerasing --exclude=salt*,wazuh*,docker*,containerd*" logCmd "dnf -y update --allowerasing --exclude=salt*,wazuh*,docker*,containerd*"
else else
retry 150 10 "apt-get -y update" >> "$setup_log" 2>&1 || exit 1 info "Running apt-get update"
retry 150 10 "apt-get -y update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
info "Running apt-get upgrade"
retry 150 10 "apt-get -y upgrade" >> "$setup_log" 2>&1 || exit 1 retry 150 10 "apt-get -y upgrade" >> "$setup_log" 2>&1 || exit 1
fi fi
} }

View File

@@ -337,7 +337,7 @@ if ! [[ -f $install_opt_file ]]; then
# If you are a manager ask ALL the manager things here. I know there is code re-use but this makes it easier to add new roles. # If you are a manager ask ALL the manager things here. I know there is code re-use but this makes it easier to add new roles.
if [[ $is_eval ]]; then if [[ $is_eval ]]; then
waitforstate=true waitforstate=true
ubuntu_check #ubuntu_check
monints=true monints=true
check_elastic_license check_elastic_license
check_requirements "manager" check_requirements "manager"
@@ -361,7 +361,7 @@ if ! [[ -f $install_opt_file ]]; then
whiptail_end_settings whiptail_end_settings
elif [[ $is_standalone ]]; then elif [[ $is_standalone ]]; then
waitforstate=true waitforstate=true
ubuntu_check #ubuntu_check
monints=true monints=true
check_elastic_license check_elastic_license
check_requirements "manager" check_requirements "manager"
@@ -386,7 +386,7 @@ if ! [[ -f $install_opt_file ]]; then
elif [[ $is_manager ]]; then elif [[ $is_manager ]]; then
check_elastic_license check_elastic_license
waitforstate=true waitforstate=true
ubuntu_check #ubuntu_check
check_requirements "manager" check_requirements "manager"
networking_needful networking_needful
collect_net_method collect_net_method
@@ -408,7 +408,7 @@ if ! [[ -f $install_opt_file ]]; then
elif [[ $is_managersearch ]]; then elif [[ $is_managersearch ]]; then
check_elastic_license check_elastic_license
waitforstate=true waitforstate=true
ubuntu_check #ubuntu_check
check_requirements "manager" check_requirements "manager"
networking_needful networking_needful
collect_net_method collect_net_method
@@ -428,7 +428,8 @@ if ! [[ -f $install_opt_file ]]; then
collect_so_allow collect_so_allow
whiptail_end_settings whiptail_end_settings
elif [[ $is_sensor ]]; then elif [[ $is_sensor ]]; then
ubuntu_check #ubuntu_check
installer_prereq_packages
monints=true monints=true
check_requirements "sensor" check_requirements "sensor"
calculate_useable_cores calculate_useable_cores
@@ -444,7 +445,8 @@ if ! [[ -f $install_opt_file ]]; then
whiptail_end_settings whiptail_end_settings
elif [[ $is_searchnode ]]; then elif [[ $is_searchnode ]]; then
ubuntu_check #ubuntu_check
installer_prereq_packages
check_requirements "elasticsearch" check_requirements "elasticsearch"
networking_needful networking_needful
check_network_manager_conf check_network_manager_conf
@@ -457,7 +459,8 @@ if ! [[ -f $install_opt_file ]]; then
whiptail_end_settings whiptail_end_settings
elif [[ $is_heavynode ]]; then elif [[ $is_heavynode ]]; then
ubuntu_check #ubuntu_check
installer_prereq_packages
monints=true monints=true
check_requirements "heavynode" check_requirements "heavynode"
calculate_useable_cores calculate_useable_cores
@@ -469,7 +472,8 @@ if ! [[ -f $install_opt_file ]]; then
whiptail_end_settings whiptail_end_settings
elif [[ $is_idh ]]; then elif [[ $is_idh ]]; then
ubuntu_check #ubuntu_check
installer_prereq_packages
check_requirements "idh" check_requirements "idh"
networking_needful networking_needful
collect_mngr_hostname collect_mngr_hostname
@@ -481,7 +485,7 @@ if ! [[ -f $install_opt_file ]]; then
whiptail_end_settings whiptail_end_settings
elif [[ $is_import ]]; then elif [[ $is_import ]]; then
ubuntu_check #ubuntu_check
waitforstate=true waitforstate=true
monints=true monints=true
check_elastic_license check_elastic_license
@@ -503,7 +507,8 @@ if ! [[ -f $install_opt_file ]]; then
whiptail_end_settings whiptail_end_settings
elif [[ $is_receiver ]]; then elif [[ $is_receiver ]]; then
ubuntu_check #ubuntu_check
installer_prereq_packages
check_requirements "receiver" check_requirements "receiver"
networking_needful networking_needful
collect_mngr_hostname collect_mngr_hostname
@@ -594,6 +599,7 @@ if ! [[ -f $install_opt_file ]]; then
# Start the master service # Start the master service
copy_salt_master_config copy_salt_master_config
configure_minion "$minion_type" configure_minion "$minion_type"
check_sos_appliance
logCmd "salt-key -yd $MINION_ID" logCmd "salt-key -yd $MINION_ID"
logCmd "salt-call state.show_top" logCmd "salt-call state.show_top"
@@ -656,6 +662,7 @@ if ! [[ -f $install_opt_file ]]; then
update_packages update_packages
saltify saltify
configure_minion "$minion_type" configure_minion "$minion_type"
check_sos_appliance
drop_install_options drop_install_options
checkin_at_boot checkin_at_boot
logCmd "salt-call state.apply setup.highstate_cron --local --file-root=../salt/" logCmd "salt-call state.apply setup.highstate_cron --local --file-root=../salt/"

View File

@@ -523,15 +523,24 @@ whiptail_install_type() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
# What kind of install are we doing? # What kind of install are we doing?
install_type=$(whiptail --title "$whiptail_title" --menu \ if [[ $OS = 'rocky' ]]; then
"What kind of installation would you like to do?\n\nFor more information, please see:\n$DOC_BASE_URL/architecture.html" 18 65 5 \ install_type=$(whiptail --title "$whiptail_title" --menu \
"IMPORT" "Import PCAP or log files " \ "What kind of installation would you like to do?\n\nFor more information, please see:\n$DOC_BASE_URL/architecture.html" 18 65 5 \
"EVAL" "Evaluation mode (not for production) " \ "IMPORT" "Import PCAP or log files " \
"STANDALONE" "Standalone production install " \ "EVAL" "Evaluation mode (not for production) " \
"DISTRIBUTED" "Distributed install submenu " \ "STANDALONE" "Standalone production install " \
"OTHER" "Other install types" \ "DISTRIBUTED" "Distributed install submenu " \
3>&1 1>&2 2>&3 "OTHER" "Other install types" \
) 3>&1 1>&2 2>&3
)
elif [[ $OS = 'ubuntu' ]]; then
install_type=$(whiptail --title "$whiptail_title" --menu \
"What kind of installation would you like to do?\n\nFor more information, please see:\n$DOC_BASE_URL/architecture.html" 18 65 5 \
"DISTRIBUTED" "Distributed install submenu " \
"OTHER" "Other install types" \
3>&1 1>&2 2>&3
)
fi
local exitstatus=$? local exitstatus=$?
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus
@@ -554,11 +563,19 @@ whiptail_install_type_dist() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
if [[ $OS = 'rocky' ]]; then
dist_option=$(whiptail --title "$whiptail_title" --menu "Do you want to start a new deployment or join this box to \nan existing deployment?" 11 75 2 \ dist_option=$(whiptail --title "$whiptail_title" --menu "Do you want to start a new deployment or join this box to \nan existing deployment?" 11 75 2 \
"New Deployment " "Create a new Security Onion deployment" \ "New Deployment " "Create a new Security Onion deployment" \
"Existing Deployment " "Join to an existing Security Onion deployment " \ "Existing Deployment " "Join to an existing Security Onion deployment " \
3>&1 1>&2 2>&3 3>&1 1>&2 2>&3
) )
elif [[ $OS = 'ubuntu' ]]; then
dist_option=$(whiptail --title "$whiptail_title" --menu "Since this is Ubuntu, this box can only be connected to \nan existing deployment." 11 75 2 \
"Existing Deployment " "Join to an existing Security Onion deployment " \
3>&1 1>&2 2>&3
)
fi
local exitstatus=$? local exitstatus=$?
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus