mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-07 17:52:46 +01:00
Merge remote-tracking branch 'remotes/origin/dev' into newrepo
This commit is contained in:
@@ -1 +1 @@
|
||||
net.ipv4.ip_local_reserved_ports=55000,57314
|
||||
net.ipv4.ip_local_reserved_ports=55000,57314,47760-47860
|
||||
@@ -330,9 +330,10 @@ docker:
|
||||
- file: docker_daemon
|
||||
|
||||
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present
|
||||
# 55000 = Wazuh, 57314 = Strelka, 47760-47860 = Zeek
|
||||
dockerapplyports:
|
||||
cmd.run:
|
||||
- name: if [ ! -s /etc/sysctl.d/99-reserved-ports.conf ]; then sysctl -w net.ipv4.ip_local_reserved_ports="55000,57314"; fi
|
||||
- name: if [ ! -s /etc/sysctl.d/99-reserved-ports.conf ]; then sysctl -w net.ipv4.ip_local_reserved_ports="55000,57314,47760-47860"; fi
|
||||
|
||||
# Reserve OS ports for Docker proxy
|
||||
dockerreserveports:
|
||||
|
||||
@@ -436,6 +436,20 @@ valid_proxy() {
|
||||
[[ $has_prefix == true ]] && [[ $valid_url == true ]] && return 0 || return 1
|
||||
}
|
||||
|
||||
valid_ntp_list() {
|
||||
local string=$1
|
||||
local ntp_arr
|
||||
IFS="," read -r -a ntp_arr <<< "$string"
|
||||
|
||||
for ntp in "${ntp_arr[@]}"; do
|
||||
if ! valid_ip4 "$ntp" && ! valid_hostname "$ntp" && ! valid_fqdn "$ntp"; then
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
valid_string() {
|
||||
local str=$1
|
||||
local min_length=${2:-1}
|
||||
|
||||
@@ -17,4 +17,8 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
# Check to see if we are already running
|
||||
IS_RUNNING=$(ps aux | pgrep -f "so-playbook-sync" | wc -l)
|
||||
[ "$IS_RUNNING" -gt 2 ] && echo "$(date) - Multiple Playbook Sync processes already running...exiting." && exit 0
|
||||
|
||||
docker exec so-soctopus python3 playbook_play-sync.py
|
||||
|
||||
@@ -115,7 +115,7 @@ clean() {
|
||||
}
|
||||
|
||||
# Check to see if we are already running
|
||||
IS_RUNNING=$(ps aux | grep "so-sensor-clean" | grep -v grep | wc -l)
|
||||
IS_RUNNING=$(ps aux | pgrep -f "so-sensor-clean" | wc -l)
|
||||
[ "$IS_RUNNING" -gt 2 ] && echo "$(date) - $IS_RUNNING sensor clean script processes running...exiting." >>$LOG && exit 0
|
||||
|
||||
if [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ]; then
|
||||
|
||||
@@ -34,7 +34,7 @@ overlimit() {
|
||||
|
||||
closedindices() {
|
||||
|
||||
INDICES=$(curl -s -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed 2> /dev/null)
|
||||
INDICES=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed 2> /dev/null)
|
||||
[ $? -eq 1 ] && return false
|
||||
echo ${INDICES} | grep -q -E "(logstash-|so-)"
|
||||
}
|
||||
@@ -49,10 +49,10 @@ while overlimit && closedindices; do
|
||||
# First, get the list of closed indices using _cat/indices?h=index\&expand_wildcards=closed.
|
||||
# Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field.
|
||||
# Finally, select the first entry in that sorted list.
|
||||
OLDEST_INDEX=$(curl -s -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1)
|
||||
OLDEST_INDEX=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1)
|
||||
|
||||
# Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
|
||||
curl -XDELETE -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
curl -XDELETE -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
|
||||
# Finally, write a log entry that says we deleted it.
|
||||
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OLDEST_INDEX} deleted ..." >> ${LOG}
|
||||
|
||||
@@ -17,7 +17,7 @@ class PlaybookESAlerter(Alerter):
|
||||
def alert(self, matches):
|
||||
for match in matches:
|
||||
today = strftime("%Y.%m.%d", gmtime())
|
||||
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S", gmtime())
|
||||
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime())
|
||||
headers = {"Content-Type": "application/json"}
|
||||
payload = {"rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
|
||||
url = f"https://{self.rule['elasticsearch_host']}/so-playbook-alerts-{today}/_doc/"
|
||||
|
||||
@@ -18,14 +18,18 @@
|
||||
|
||||
{# This block translate the portgroups defined in the pillar to what is defined my portgroups.yaml and portgroups.local.yaml #}
|
||||
{% if salt['pillar.get']('firewall:assigned_hostgroups:chain') %}
|
||||
{% set translated_pillar_assigned_hostgroups = {'chain': {}} %}
|
||||
|
||||
{% for chain, hg in salt['pillar.get']('firewall:assigned_hostgroups:chain').items() %}
|
||||
{% for pillar_hostgroup, pillar_portgroups in salt['pillar.get']('firewall:assigned_hostgroups:chain')[chain].hostgroups.items() %}
|
||||
{% do translated_pillar_assigned_hostgroups.update({"chain": {chain: {"hostgroups": {pillar_hostgroup: {"portgroups": []}}}}}) %}
|
||||
{% if translated_pillar_assigned_hostgroups.chain[chain] is defined %}
|
||||
{% do translated_pillar_assigned_hostgroups.chain[chain].hostgroups.update({pillar_hostgroup: {"portgroups": []}}) %}
|
||||
{% else %}
|
||||
{% do translated_pillar_assigned_hostgroups.chain.update({chain: {"hostgroups": {pillar_hostgroup: {"portgroups": []}}}}) %}
|
||||
{% endif %}
|
||||
{% for pillar_portgroup in pillar_portgroups.portgroups %}
|
||||
{% set pillar_portgroup = pillar_portgroup.split('.') | last %}
|
||||
{% do translated_pillar_assigned_hostgroups.chain[chain].hostgroups[pillar_hostgroup].portgroups.append(defined_portgroups[pillar_portgroup]) %}
|
||||
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
@@ -39,7 +43,6 @@
|
||||
{% set assigned_hostgroups = default_assigned_hostgroups.role[role] %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
{% if translated_pillar_assigned_hostgroups %}
|
||||
{% do salt['defaults.merge'](assigned_hostgroups, translated_pillar_assigned_hostgroups, merge_lists=True, in_place=True) %}
|
||||
{% endif %}
|
||||
@@ -1,7 +1,7 @@
|
||||
[
|
||||
{ "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "target": "",
|
||||
"links": [
|
||||
"/#/hunt?q=\"{value}\" | groupby event.module event.dataset"
|
||||
"/#/hunt?q=\"{value|escape}\" | groupby event.module event.dataset"
|
||||
]},
|
||||
{ "name": "actionCorrelate", "description": "actionCorrelateHelp", "icon": "fab fa-searchengin", "target": "",
|
||||
"links": [
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[
|
||||
{ "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "target": "",
|
||||
"links": [
|
||||
"/#/hunt?q=\"{value}\" | groupby event.module event.dataset"
|
||||
"/#/hunt?q=\"{value|escape}\" | groupby event.module event.dataset"
|
||||
]},
|
||||
{ "name": "actionCorrelate", "description": "actionCorrelateHelp", "icon": "fab fa-searchengin", "target": "",
|
||||
"links": [
|
||||
|
||||
@@ -395,7 +395,7 @@ collect_hostname() {
|
||||
|
||||
if [[ $HOSTNAME == 'securityonion' ]]; then # Will only check HOSTNAME=securityonion once
|
||||
if ! (whiptail_avoid_default_hostname); then
|
||||
whiptail_set_hostname
|
||||
whiptail_set_hostname "$HOSTNAME"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -481,6 +481,22 @@ collect_node_ls_pipeline_worker_count() {
|
||||
done
|
||||
}
|
||||
|
||||
collect_ntp_servers() {
|
||||
if whiptail_ntp_ask; then
|
||||
[[ $is_airgap ]] && ntp_string=""
|
||||
whiptail_ntp_servers "$ntp_string"
|
||||
|
||||
while ! valid_ntp_list "$ntp_string"; do
|
||||
whiptail_invalid_input
|
||||
whiptail_ntp_servers "$ntp_string"
|
||||
done
|
||||
|
||||
IFS="," read -r -a ntp_servers <<< "$ntp_string" # Split string on commas into array
|
||||
else
|
||||
ntp_servers=()
|
||||
fi
|
||||
}
|
||||
|
||||
collect_oinkcode() {
|
||||
whiptail_oinkcode
|
||||
|
||||
@@ -576,7 +592,7 @@ collect_proxy_details() {
|
||||
else
|
||||
so_proxy="$proxy_addr"
|
||||
fi
|
||||
export proxy
|
||||
export so_proxy
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -697,6 +713,42 @@ configure_minion() {
|
||||
} >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
configure_ntp() {
|
||||
local chrony_conf=/etc/chrony.conf
|
||||
|
||||
# Install chrony if it isn't already installed
|
||||
if ! command -v chronyc &> /dev/null; then
|
||||
yum -y install chrony
|
||||
fi
|
||||
|
||||
[[ -f $chrony_conf ]] && mv $chrony_conf "$chrony_conf.bak"
|
||||
|
||||
printf '%s\n' "# NTP server list" > $chrony_conf
|
||||
|
||||
# Build list of servers
|
||||
for addr in "${ntp_servers[@]}"; do
|
||||
echo "server $addr iburst" >> $chrony_conf
|
||||
done
|
||||
|
||||
printf '\n%s\n' "# Config options" >> $chrony_conf
|
||||
|
||||
printf '%s\n' \
|
||||
'driftfile /var/lib/chrony/drift' \
|
||||
'makestep 1.0 3' \
|
||||
'rtcsync' \
|
||||
'logdir /var/log/chrony' >> $chrony_conf
|
||||
|
||||
systemctl enable chronyd
|
||||
systemctl restart chronyd
|
||||
|
||||
# Tell the chrony daemon to sync time & update the system time
|
||||
# Since these commands only make a call to chronyd, wait after each command to make sure the changes are made
|
||||
printf "Syncing chrony time to server: "
|
||||
chronyc -a 'burst 4/4' && sleep 30
|
||||
printf "Forcing chrony to update the time: "
|
||||
chronyc -a makestep && sleep 30
|
||||
}
|
||||
|
||||
checkin_at_boot() {
|
||||
local minion_config=/etc/salt/minion
|
||||
|
||||
@@ -1505,8 +1557,7 @@ manager_pillar() {
|
||||
printf '%s\n'\
|
||||
" kratoskey: '$KRATOSKEY'"\
|
||||
"" >> "$pillar_file"
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
manager_global() {
|
||||
local global_pillar="$local_salt_dir/pillar/global.sls"
|
||||
@@ -1530,7 +1581,6 @@ manager_global() {
|
||||
"global:"\
|
||||
" soversion: '$SOVERSION'"\
|
||||
" hnmanager: '$HNMANAGER'"\
|
||||
" ntpserver: '$NTPSERVER'"\
|
||||
" dockernet: '$DOCKERNET'"\
|
||||
" mdengine: '$ZEEKVERSION'"\
|
||||
" ids: '$NIDS'"\
|
||||
@@ -1685,7 +1735,6 @@ manager_global() {
|
||||
"redis_settings:"\
|
||||
" redis_maxmemory: 812" >> "$global_pillar"
|
||||
|
||||
|
||||
printf '%s\n' '----' >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
@@ -1747,6 +1796,19 @@ network_setup() {
|
||||
} >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
ntp_pillar() {
|
||||
local pillar_file="$temp_install_dir"/pillar/minions/"$MINION_ID".sls
|
||||
|
||||
if [[ ${#ntp_servers[@]} -gt 0 ]]; then
|
||||
printf '%s\n'\
|
||||
"ntp:"\
|
||||
" servers:" >> "$pillar_file"
|
||||
for addr in "${ntp_servers[@]}"; do
|
||||
printf '%s\n' " - '$addr'" >> "$pillar_file"
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
parse_install_username() {
|
||||
# parse out the install username so things copy correctly
|
||||
INSTALLUSERNAME=${SUDO_USER:-${USER}}
|
||||
|
||||
@@ -292,13 +292,22 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
|
||||
[[ -f $net_init_file ]] && whiptail_net_reinit && reinit_networking=true
|
||||
|
||||
if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
|
||||
if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
|
||||
collect_hostname
|
||||
fi
|
||||
|
||||
whiptail_node_description
|
||||
|
||||
if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
|
||||
network_init_whiptail
|
||||
else
|
||||
source "$net_init_file"
|
||||
fi
|
||||
|
||||
if [[ $is_minion ]] || [[ $reinit_networking ]] || [[ $is_iso ]] && ! [[ -f $net_init_file ]]; then
|
||||
whiptail_management_interface_setup
|
||||
fi
|
||||
|
||||
if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
|
||||
network_init
|
||||
fi
|
||||
@@ -316,10 +325,6 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
[[ -n "$so_proxy" ]] && set_proxy >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
if [[ $is_minion ]] || [[ $reinit_networking ]] || [[ $is_iso ]] && ! [[ -f $net_init_file ]]; then
|
||||
whiptail_management_interface_setup
|
||||
fi
|
||||
|
||||
if [[ $is_minion ]]; then
|
||||
add_mngr_ip_to_hosts
|
||||
fi
|
||||
@@ -335,7 +340,8 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
"MNIC=$MNIC" \
|
||||
"HOSTNAME=$HOSTNAME" \
|
||||
"MSRV=$MSRV" \
|
||||
"MSRVIP=$MSRVIP" > "$install_opt_file"
|
||||
"MSRVIP=$MSRVIP" \
|
||||
"NODE_DESCRIPTION=$NODE_DESCRIPTION" > "$install_opt_file"
|
||||
[[ -n $so_proxy ]] && echo "so_proxy=$so_proxy" >> "$install_opt_file"
|
||||
download_repo_tarball
|
||||
exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}"
|
||||
@@ -535,6 +541,8 @@ if [[ $is_sensor && ! $is_eval ]]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
[[ $is_iso ]] && collect_ntp_servers
|
||||
|
||||
if [[ $is_node && ! $is_eval ]]; then
|
||||
whiptail_node_advanced
|
||||
if [ "$NODESETUP" == 'NODEADVANCED' ]; then
|
||||
@@ -582,6 +590,8 @@ set_redirect >> $setup_log 2>&1
|
||||
# Show initial progress message
|
||||
set_progress_str 0 'Running initial configuration steps'
|
||||
|
||||
[[ ${#ntp_servers[@]} -gt 0 ]] && configure_ntp >> $setup_log 2>&1
|
||||
|
||||
reserve_ports
|
||||
|
||||
set_path
|
||||
@@ -614,6 +624,8 @@ set_redirect >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
host_pillar >> $setup_log 2>&1
|
||||
ntp_pillar >> $setup_log 2>&1
|
||||
|
||||
|
||||
if [[ $is_minion || $is_import ]]; then
|
||||
set_updates >> $setup_log 2>&1
|
||||
|
||||
@@ -72,3 +72,6 @@ export install_opt_file
|
||||
|
||||
net_init_file=/root/net_init
|
||||
export net_init_file
|
||||
|
||||
ntp_string="0.pool.ntp.org,1.pool.ntp.org"
|
||||
export ntp_string
|
||||
|
||||
@@ -1044,6 +1044,16 @@ whiptail_node_advanced() {
|
||||
|
||||
}
|
||||
|
||||
whiptail_node_description() {
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
NODE_DESCRIPTION=$(whiptail --title "Security Onion Setup" \
|
||||
--inputbox "Enter a short description for the node or press ENTER to leave blank:" 10 75 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
}
|
||||
|
||||
whiptail_node_es_heap() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
@@ -1105,6 +1115,22 @@ whiptail_node_ls_pipeline_worker() {
|
||||
|
||||
}
|
||||
|
||||
whiptail_ntp_ask() {
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
whiptail --title "Security Onion Setup" --yesno "Would you like to configure ntp servers?" 7 44
|
||||
}
|
||||
|
||||
whiptail_ntp_servers() {
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
ntp_string=$(whiptail --title "Security Onion Setup" \
|
||||
--inputbox "Input the NTP server(s) you would like to use, separated by commas:" 8 75 "$1" 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
}
|
||||
|
||||
whiptail_oinkcode() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
@@ -1271,11 +1297,7 @@ whiptail_proxy_auth_pass() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
if [[ $arg != 'confirm' ]]; then
|
||||
proxy_pass=$(whiptail --title "Security Onion Setup" --passwordbox "Please input the proxy password:" 8 60 3>&1 1>&2 2>&3)
|
||||
else
|
||||
proxy_pass_confirm=$(whiptail --title "Security Onion Setup" --passwordbox "Please confirm the proxy password:" 8 60 3>&1 1>&2 2>&3)
|
||||
fi
|
||||
proxy_pass=$(whiptail --title "Security Onion Setup" --passwordbox "Please input the proxy password:" 8 60 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
Reference in New Issue
Block a user