mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-02-21 14:35:27 +01:00
Merge remote-tracking branch 'remotes/origin/dev' into newrepo
This commit is contained in:
@@ -1 +1 @@
|
||||
net.ipv4.ip_local_reserved_ports=55000,57314
|
||||
net.ipv4.ip_local_reserved_ports=55000,57314,47760-47860
|
||||
@@ -330,9 +330,10 @@ docker:
|
||||
- file: docker_daemon
|
||||
|
||||
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present
|
||||
# 55000 = Wazuh, 57314 = Strelka, 47760-47860 = Zeek
|
||||
dockerapplyports:
|
||||
cmd.run:
|
||||
- name: if [ ! -s /etc/sysctl.d/99-reserved-ports.conf ]; then sysctl -w net.ipv4.ip_local_reserved_ports="55000,57314"; fi
|
||||
- name: if [ ! -s /etc/sysctl.d/99-reserved-ports.conf ]; then sysctl -w net.ipv4.ip_local_reserved_ports="55000,57314,47760-47860"; fi
|
||||
|
||||
# Reserve OS ports for Docker proxy
|
||||
dockerreserveports:
|
||||
|
||||
@@ -436,6 +436,20 @@ valid_proxy() {
|
||||
[[ $has_prefix == true ]] && [[ $valid_url == true ]] && return 0 || return 1
|
||||
}
|
||||
|
||||
valid_ntp_list() {
|
||||
local string=$1
|
||||
local ntp_arr
|
||||
IFS="," read -r -a ntp_arr <<< "$string"
|
||||
|
||||
for ntp in "${ntp_arr[@]}"; do
|
||||
if ! valid_ip4 "$ntp" && ! valid_hostname "$ntp" && ! valid_fqdn "$ntp"; then
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
valid_string() {
|
||||
local str=$1
|
||||
local min_length=${2:-1}
|
||||
|
||||
@@ -17,4 +17,8 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
# Check to see if we are already running
|
||||
IS_RUNNING=$(ps aux | pgrep -f "so-playbook-sync" | wc -l)
|
||||
[ "$IS_RUNNING" -gt 2 ] && echo "$(date) - Multiple Playbook Sync processes already running...exiting." && exit 0
|
||||
|
||||
docker exec so-soctopus python3 playbook_play-sync.py
|
||||
|
||||
@@ -115,7 +115,7 @@ clean() {
|
||||
}
|
||||
|
||||
# Check to see if we are already running
|
||||
IS_RUNNING=$(ps aux | grep "so-sensor-clean" | grep -v grep | wc -l)
|
||||
IS_RUNNING=$(ps aux | pgrep -f "so-sensor-clean" | wc -l)
|
||||
[ "$IS_RUNNING" -gt 2 ] && echo "$(date) - $IS_RUNNING sensor clean script processes running...exiting." >>$LOG && exit 0
|
||||
|
||||
if [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ]; then
|
||||
|
||||
@@ -34,7 +34,7 @@ overlimit() {
|
||||
|
||||
closedindices() {
|
||||
|
||||
INDICES=$(curl -s -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed 2> /dev/null)
|
||||
INDICES=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed 2> /dev/null)
|
||||
[ $? -eq 1 ] && return false
|
||||
echo ${INDICES} | grep -q -E "(logstash-|so-)"
|
||||
}
|
||||
@@ -49,12 +49,12 @@ while overlimit && closedindices; do
|
||||
# First, get the list of closed indices using _cat/indices?h=index\&expand_wildcards=closed.
|
||||
# Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field.
|
||||
# Finally, select the first entry in that sorted list.
|
||||
OLDEST_INDEX=$(curl -s -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1)
|
||||
OLDEST_INDEX=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1)
|
||||
|
||||
# Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
|
||||
curl -XDELETE -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
curl -XDELETE -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
|
||||
# Finally, write a log entry that says we deleted it.
|
||||
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OLDEST_INDEX} deleted ..." >> ${LOG}
|
||||
|
||||
done
|
||||
done
|
||||
|
||||
@@ -17,7 +17,7 @@ class PlaybookESAlerter(Alerter):
|
||||
def alert(self, matches):
|
||||
for match in matches:
|
||||
today = strftime("%Y.%m.%d", gmtime())
|
||||
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S", gmtime())
|
||||
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime())
|
||||
headers = {"Content-Type": "application/json"}
|
||||
payload = {"rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
|
||||
url = f"https://{self.rule['elasticsearch_host']}/so-playbook-alerts-{today}/_doc/"
|
||||
|
||||
@@ -18,14 +18,18 @@
|
||||
|
||||
{# This block translate the portgroups defined in the pillar to what is defined my portgroups.yaml and portgroups.local.yaml #}
|
||||
{% if salt['pillar.get']('firewall:assigned_hostgroups:chain') %}
|
||||
{% set translated_pillar_assigned_hostgroups = {'chain': {}} %}
|
||||
|
||||
{% for chain, hg in salt['pillar.get']('firewall:assigned_hostgroups:chain').items() %}
|
||||
{% for pillar_hostgroup, pillar_portgroups in salt['pillar.get']('firewall:assigned_hostgroups:chain')[chain].hostgroups.items() %}
|
||||
{% do translated_pillar_assigned_hostgroups.update({"chain": {chain: {"hostgroups": {pillar_hostgroup: {"portgroups": []}}}}}) %}
|
||||
{% if translated_pillar_assigned_hostgroups.chain[chain] is defined %}
|
||||
{% do translated_pillar_assigned_hostgroups.chain[chain].hostgroups.update({pillar_hostgroup: {"portgroups": []}}) %}
|
||||
{% else %}
|
||||
{% do translated_pillar_assigned_hostgroups.chain.update({chain: {"hostgroups": {pillar_hostgroup: {"portgroups": []}}}}) %}
|
||||
{% endif %}
|
||||
{% for pillar_portgroup in pillar_portgroups.portgroups %}
|
||||
{% set pillar_portgroup = pillar_portgroup.split('.') | last %}
|
||||
{% do translated_pillar_assigned_hostgroups.chain[chain].hostgroups[pillar_hostgroup].portgroups.append(defined_portgroups[pillar_portgroup]) %}
|
||||
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
@@ -39,7 +43,6 @@
|
||||
{% set assigned_hostgroups = default_assigned_hostgroups.role[role] %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
{% if translated_pillar_assigned_hostgroups %}
|
||||
{% do salt['defaults.merge'](assigned_hostgroups, translated_pillar_assigned_hostgroups, merge_lists=True, in_place=True) %}
|
||||
{% endif %}
|
||||
@@ -1,7 +1,7 @@
|
||||
[
|
||||
{ "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "target": "",
|
||||
"links": [
|
||||
"/#/hunt?q=\"{value}\" | groupby event.module event.dataset"
|
||||
"/#/hunt?q=\"{value|escape}\" | groupby event.module event.dataset"
|
||||
]},
|
||||
{ "name": "actionCorrelate", "description": "actionCorrelateHelp", "icon": "fab fa-searchengin", "target": "",
|
||||
"links": [
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[
|
||||
{ "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "target": "",
|
||||
"links": [
|
||||
"/#/hunt?q=\"{value}\" | groupby event.module event.dataset"
|
||||
"/#/hunt?q=\"{value|escape}\" | groupby event.module event.dataset"
|
||||
]},
|
||||
{ "name": "actionCorrelate", "description": "actionCorrelateHelp", "icon": "fab fa-searchengin", "target": "",
|
||||
"links": [
|
||||
|
||||
Reference in New Issue
Block a user