Compare commits

..

1 Commits

Author SHA1 Message Date
m0duspwnens
50ab63162a users 2024-01-17 12:51:15 -05:00
217 changed files with 4207 additions and 251701 deletions

View File

@@ -536,10 +536,11 @@ secretGroup = 4
[allowlist]
description = "global allow lists"
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''.*:.*StrelkaHexDump.*''', '''.*:.*PLACEHOLDER.*''']
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''']
paths = [
'''gitleaks.toml''',
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
'''(go.mod|go.sum)$''',
'''salt/nginx/files/enterprise-attack.json'''
]

View File

@@ -1,190 +0,0 @@
body:
- type: markdown
attributes:
value: |
⚠️ This category is solely for conversations related to Security Onion 2.4 ⚠️
If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support
- type: dropdown
attributes:
label: Version
description: Which version of Security Onion 2.4.x are you asking about?
options:
-
- 2.4 Pre-release (Beta, Release Candidate)
- 2.4.10
- 2.4.20
- 2.4.30
- 2.4.40
- 2.4.50
- 2.4.60
- 2.4.70
- 2.4.80
- 2.4.90
- 2.4.100
- Other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Installation Method
description: How did you install Security Onion?
options:
-
- Security Onion ISO image
- Network installation on Red Hat derivative like Oracle, Rocky, Alma, etc.
- Network installation on Ubuntu
- Network installation on Debian
- Other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Description
description: >
Is this discussion about installation, configuration, upgrading, or other?
options:
-
- installation
- configuration
- upgrading
- other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Installation Type
description: >
When you installed, did you choose Import, Eval, Standalone, Distributed, or something else?
options:
-
- Import
- Eval
- Standalone
- Distributed
- other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Location
description: >
Is this deployment in the cloud, on-prem with Internet access, or airgap?
options:
-
- cloud
- on-prem with Internet access
- airgap
- other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Hardware Specs
description: >
Does your hardware meet or exceed the minimum requirements for your installation type as shown at https://docs.securityonion.net/en/2.4/hardware.html?
options:
-
- Meets minimum requirements
- Exceeds minimum requirements
- Does not meet minimum requirements
- other (please provide detail below)
validations:
required: true
- type: input
attributes:
label: CPU
description: How many CPU cores do you have?
validations:
required: true
- type: input
attributes:
label: RAM
description: How much RAM do you have?
validations:
required: true
- type: input
attributes:
label: Storage for /
description: How much storage do you have for the / partition?
validations:
required: true
- type: input
attributes:
label: Storage for /nsm
description: How much storage do you have for the /nsm partition?
validations:
required: true
- type: dropdown
attributes:
label: Network Traffic Collection
description: >
Are you collecting network traffic from a tap or span port?
options:
-
- tap
- span port
- other (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Network Traffic Speeds
description: >
How much network traffic are you monitoring?
options:
-
- Less than 1Gbps
- 1Gbps to 10Gbps
- more than 10Gbps
validations:
required: true
- type: dropdown
attributes:
label: Status
description: >
Does SOC Grid show all services on all nodes as running OK?
options:
-
- Yes, all services on all nodes are running OK
- No, one or more services are failed (please provide detail below)
validations:
required: true
- type: dropdown
attributes:
label: Salt Status
description: >
Do you get any failures when you run "sudo salt-call state.highstate"?
options:
-
- Yes, there are salt failures (please provide detail below)
- No, there are no failures
validations:
required: true
- type: dropdown
attributes:
label: Logs
description: >
Are there any additional clues in /opt/so/log/?
options:
-
- Yes, there are additional clues in /opt/so/log/ (please provide detail below)
- No, there are no additional clues
validations:
required: true
- type: textarea
attributes:
label: Detail
description: Please read our discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 and then provide detailed information to help us help you.
placeholder: |-
STOP! Before typing, please read our discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 in their entirety!
If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support
validations:
required: true
- type: checkboxes
attributes:
label: Guidelines
options:
- label: I have read the discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 and assert that I have followed the guidelines.
required: true

View File

@@ -1,32 +0,0 @@
name: 'Close Threads'
on:
schedule:
- cron: '50 1 * * *'
workflow_dispatch:
permissions:
issues: write
pull-requests: write
discussions: write
concurrency:
group: lock-threads
jobs:
close-threads:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v5
with:
days-before-issue-stale: -1
days-before-issue-close: 60
stale-issue-message: "This issue is stale because it has been inactive for an extended period. Stale issues convey that the issue, while important to someone, is not critical enough for the author, or other community members to work on, sponsor, or otherwise shepherd the issue through to a resolution."
close-issue-message: "This issue was closed because it has been stale for an extended period. It will be automatically locked in 30 days, after which no further commenting will be available."
days-before-pr-stale: 45
days-before-pr-close: 60
stale-pr-message: "This PR is stale because it has been inactive for an extended period. The longer a PR remains stale the more out of date with the main branch it becomes."
close-pr-message: "This PR was closed because it has been stale for an extended period. It will be automatically locked in 30 days. If there is still a commitment to finishing this PR re-open it before it is locked."

View File

@@ -1,25 +0,0 @@
name: 'Lock Threads'
on:
schedule:
- cron: '50 2 * * *'
workflow_dispatch:
permissions:
issues: write
pull-requests: write
discussions: write
concurrency:
group: lock-threads
jobs:
lock-threads:
runs-on: ubuntu-latest
steps:
- uses: jertel/lock-threads@main
with:
include-discussion-currently-open: true
discussion-inactive-days: 90
issue-inactive-days: 30
pr-inactive-days: 30

View File

@@ -1,17 +1,17 @@
### 2.4.60-20240320 ISO image released on 2024/03/20
### 2.4.30-20231228 ISO image released on 2024/01/02
### Download and Verify
2.4.60-20240320 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.60-20240320.iso
2.4.30-20231228 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.30-20231228.iso
MD5: 178DD42D06B2F32F3870E0C27219821E
SHA1: 73EDCD50817A7F6003FE405CF1808A30D034F89D
SHA256: DD334B8D7088A7B78160C253B680D645E25984BA5CCAB5CC5C327CA72137FC06
MD5: DBD47645CD6FA8358C51D8753046FB54
SHA1: 2494091065434ACB028F71444A5D16E8F8A11EDF
SHA256: 3345AE1DC58AC7F29D82E60D9A36CDF8DE19B7DFF999D8C4F89C7BD36AEE7F1D
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.60-20240320.iso.sig
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.30-20231228.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.60-20240320.iso.sig
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.30-20231228.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.60-20240320.iso
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.30-20231228.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.4.60-20240320.iso.sig securityonion-2.4.60-20240320.iso
gpg --verify securityonion-2.4.30-20231228.iso.sig securityonion-2.4.30-20231228.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Tue 19 Mar 2024 03:17:58 PM EDT using RSA key ID FE507013
gpg: Signature made Thu 28 Dec 2023 10:08:31 AM EST using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.

View File

@@ -1 +1 @@
2.4.70
2.4.40

View File

@@ -19,4 +19,4 @@ role:
receiver:
standalone:
searchnode:
sensor:
sensor:

View File

@@ -41,8 +41,7 @@ file_roots:
base:
- /opt/so/saltstack/local/salt
- /opt/so/saltstack/default/salt
- /nsm/elastic-fleet/artifacts
- /opt/so/rules/nids
# The master_roots setting configures a master-only copy of the file_roots dictionary,
# used by the state compiler.

View File

@@ -1,30 +0,0 @@
{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', fun='network.ip_addrs', tgt_type='compound') %}
{% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %}
{% set existing_ids = [] %}
{% for node in pillar_kafkanodes.values() %}
{% if node.get('id') %}
{% do existing_ids.append(node['nodeid']) %}
{% endif %}
{% endfor %}
{% set all_possible_ids = range(1, 256)|list %}
{% set available_ids = [] %}
{% for id in all_possible_ids %}
{% if id not in existing_ids %}
{% do available_ids.append(id) %}
{% endif %}
{% endfor %}
{% set final_nodes = pillar_kafkanodes.copy() %}
{% for minionid, ip in current_kafkanodes.items() %}
{% set hostname = minionid.split('_')[0] %}
{% if hostname not in final_nodes %}
{% set new_id = available_ids.pop(0) %}
{% do final_nodes.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %}
{% endif %}
{% endfor %}
kafka:
nodes: {{ final_nodes|tojson }}

View File

@@ -16,6 +16,7 @@ base:
- sensoroni.adv_sensoroni
- telegraf.soc_telegraf
- telegraf.adv_telegraf
- users
'* and not *_desktop':
- firewall.soc_firewall
@@ -43,6 +44,8 @@ base:
- soc.soc_soc
- soc.adv_soc
- soc.license
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- kibana.soc_kibana
- kibana.adv_kibana
- kratos.soc_kratos
@@ -59,12 +62,10 @@ base:
- elastalert.adv_elastalert
- backup.soc_backup
- backup.adv_backup
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
- stig.soc_stig
'*_sensor':
- healthcheck.sensor
@@ -80,8 +81,6 @@ base:
- suricata.adv_suricata
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- stig.soc_stig
- soc.license
'*_eval':
- secrets
@@ -107,6 +106,8 @@ base:
- soc.soc_soc
- soc.adv_soc
- soc.license
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- kibana.soc_kibana
- kibana.adv_kibana
- strelka.soc_strelka
@@ -162,6 +163,8 @@ base:
- soc.soc_soc
- soc.adv_soc
- soc.license
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- kibana.soc_kibana
- kibana.adv_kibana
- strelka.soc_strelka
@@ -178,10 +181,6 @@ base:
- suricata.adv_suricata
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- stig.soc_stig
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
'*_heavynode':
- elasticsearch.auth
@@ -224,8 +223,6 @@ base:
- redis.adv_redis
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- stig.soc_stig
- soc.license
'*_receiver':
- logstash.nodes
@@ -238,9 +235,6 @@ base:
- redis.adv_redis
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
'*_import':
- secrets
@@ -263,6 +257,8 @@ base:
- soc.soc_soc
- soc.adv_soc
- soc.license
- soctopus.soc_soctopus
- soctopus.adv_soctopus
- kibana.soc_kibana
- kibana.adv_kibana
- backup.soc_backup

2
pillar/users/init.sls Normal file
View File

@@ -0,0 +1,2 @@
# users pillar goes in /opt/so/saltstack/local/pillar/users/init.sls
# the users directory may need to be created under /opt/so/saltstack/local/pillar

View File

@@ -0,0 +1,18 @@
users:
sclapton:
# required fields
status: present
# node_access determines which node types the user can access.
# this can either be by grains.role or by final part of the minion id after the _
node_access:
- standalone
- searchnode
# optional fields
fullname: Stevie Claptoon
uid: 1001
gid: 1001
homephone: does not have a phone
groups:
- mygroup1
- mygroup2
- wheel # give sudo access

20
pillar/users/pillar.usage Normal file
View File

@@ -0,0 +1,20 @@
users:
sclapton:
# required fields
status: <present | absent>
# node_access determines which node types the user can access.
# this can either be by grains.role or by final part of the minion id after the _
node_access:
- standalone
- searchnode
# optional fields
fullname: <string>
uid: <integer>
gid: <integer>
roomnumber: <string>
workphone: <string>
homephone: <string>
groups:
- <string>
- <string>
- wheel # give sudo access

View File

@@ -34,6 +34,7 @@
'suricata',
'utility',
'schedule',
'soctopus',
'tcpreplay',
'docker_clean'
],
@@ -100,9 +101,8 @@
'suricata.manager',
'utility',
'schedule',
'docker_clean',
'stig',
'kafka'
'soctopus',
'docker_clean'
],
'so-managersearch': [
'salt.master',
@@ -122,9 +122,8 @@
'suricata.manager',
'utility',
'schedule',
'docker_clean',
'stig',
'kafka'
'soctopus',
'docker_clean'
],
'so-searchnode': [
'ssl',
@@ -132,8 +131,7 @@
'telegraf',
'firewall',
'schedule',
'docker_clean',
'stig'
'docker_clean'
],
'so-standalone': [
'salt.master',
@@ -156,10 +154,9 @@
'healthcheck',
'utility',
'schedule',
'soctopus',
'tcpreplay',
'docker_clean',
'stig',
'kafka'
'docker_clean'
],
'so-sensor': [
'ssl',
@@ -171,15 +168,13 @@
'healthcheck',
'schedule',
'tcpreplay',
'docker_clean',
'stig'
'docker_clean'
],
'so-fleet': [
'ssl',
'telegraf',
'firewall',
'logstash',
'nginx',
'healthcheck',
'schedule',
'elasticfleet',
@@ -190,9 +185,7 @@
'telegraf',
'firewall',
'schedule',
'docker_clean',
'kafka',
'elasticsearch.ca'
'docker_clean'
],
'so-desktop': [
'ssl',
@@ -201,6 +194,10 @@
],
}, grain='role') %}
{% if grains.role in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone'] %}
{% do allowed_states.append('mysql') %}
{% endif %}
{%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
{% do allowed_states.append('zeek') %}
{%- endif %}
@@ -226,6 +223,10 @@
{% do allowed_states.append('elastalert') %}
{% endif %}
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
{% do allowed_states.append('playbook') %}
{% endif %}
{% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
{% do allowed_states.append('logstash') %}
{% endif %}

View File

@@ -1,10 +1,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% if GLOBALS.pcap_engine == "TRANSITION" %}
{% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %}
{% else %}
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% import 'bpf/macros.jinja' as MACROS %}
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
{% set PCAPBPF = BPFMERGED.pcap %}
{% endif %}
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% import 'bpf/macros.jinja' as MACROS %}
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
{% set PCAPBPF = BPFMERGED.pcap %}

View File

@@ -1,6 +1,6 @@
bpf:
pcap:
description: List of BPF filters to apply to Stenographer.
description: List of BPF filters to apply to PCAP.
multiline: True
forcedType: "[]string"
helpLink: bpf.html

View File

@@ -70,17 +70,3 @@ x509_signing_policies:
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: 820
- copypath: /etc/pki/issued_certs/
kafka:
- minions: '*'
- signing_private_key: /etc/pki/ca.key
- signing_cert: /etc/pki/ca.crt
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:false"
- keyUsage: "digitalSignature, keyEncipherment"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- extendedKeyUsage: "serverAuth, clientAuth"
- days_valid: 820
- copypath: /etc/pki/issued_certs/

View File

@@ -4,6 +4,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
- common.soup_scripts
- common.packages
{% if GLOBALS.role in GLOBALS.manager_roles %}
- manager.elasticsearch # needed for elastic_curl_config state
@@ -133,18 +134,6 @@ common_sbin_jinja:
- file_mode: 755
- template: jinja
{% if not GLOBALS.is_manager%}
# prior to 2.4.50 these scripts were in common/tools/sbin on the manager because of soup and distributed to non managers
# these two states remove the scripts from non manager nodes
remove_soup:
file.absent:
- name: /usr/sbin/soup
remove_so-firewall:
file.absent:
- name: /usr/sbin/so-firewall
{% endif %}
so-status_script:
file.managed:
- name: /usr/sbin/so-status

View File

@@ -1,88 +1,23 @@
{% if '2.4' in salt['cp.get_file_str']('/etc/soversion') %}
# Sync some Utilities
soup_scripts:
file.recurse:
- name: /usr/sbin
- user: root
- group: root
- file_mode: 755
- source: salt://common/tools/sbin
- include_pat:
- so-common
- so-image-common
{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %}
{% if SOC_GLOBAL.global.airgap %}
{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %}
{% else %}
{% set UPDATE_DIR='/tmp/sogh/securityonion' %}
{% endif %}
remove_common_soup:
file.absent:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/soup
remove_common_so-firewall:
file.absent:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall
copy_so-common_common_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-common
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-common
- force: True
- preserve: True
copy_so-image-common_common_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-image-common
- force: True
- preserve: True
copy_soup_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/soup
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/soup
- force: True
- preserve: True
copy_so-firewall_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-firewall
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall
- force: True
- preserve: True
copy_so-common_sbin:
file.copy:
- name: /usr/sbin/so-common
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-common
- force: True
- preserve: True
copy_so-image-common_sbin:
file.copy:
- name: /usr/sbin/so-image-common
- source: {{UPDATE_DIR}}/salt/common/tools/sbin/so-image-common
- force: True
- preserve: True
copy_soup_sbin:
file.copy:
- name: /usr/sbin/soup
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/soup
- force: True
- preserve: True
copy_so-firewall_sbin:
file.copy:
- name: /usr/sbin/so-firewall
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall
- force: True
- preserve: True
copy_so-yaml_sbin:
file.copy:
- name: /usr/sbin/so-yaml.py
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py
- force: True
- preserve: True
{% else %}
fix_23_soup_sbin:
cmd.run:
- name: curl -s -f -o /usr/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
fix_23_soup_salt:
cmd.run:
- name: curl -s -f -o /opt/so/saltstack/defalt/salt/common/tools/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
{% endif %}
soup_manager_scripts:
file.recurse:
- name: /usr/sbin
- user: root
- group: root
- file_mode: 755
- source: salt://manager/tools/sbin
- include_pat:
- so-firewall
- so-repo-sync
- soup

View File

@@ -248,14 +248,6 @@ get_random_value() {
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
}
get_agent_count() {
if [ -f /opt/so/log/agents/agentstatus.log ]; then
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}')
else
AGENTCOUNT=0
fi
}
gpg_rpm_import() {
if [[ $is_oracle ]]; then
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
@@ -337,7 +329,7 @@ lookup_salt_value() {
local=""
fi
salt-call -lerror --no-color ${kind}.get ${group}${key} --out=${output} ${local}
salt-call --no-color ${kind}.get ${group}${key} --out=${output} ${local}
}
lookup_pillar() {
@@ -374,13 +366,6 @@ is_feature_enabled() {
return 1
}
read_feat() {
if [ -f /opt/so/log/sostatus/lks_enabled ]; then
lic_id=$(cat /opt/so/saltstack/local/pillar/soc/license.sls | grep license_id: | awk '{print $2}')
echo "$lic_id/$(cat /opt/so/log/sostatus/lks_enabled)/$(cat /opt/so/log/sostatus/fps_enabled)"
fi
}
require_manager() {
if is_manager_node; then
echo "This is a manager, so we can proceed."
@@ -574,15 +559,6 @@ status () {
printf "\n=========================================================================\n$(date) | $1\n=========================================================================\n"
}
sync_options() {
set_version
set_os
salt_minion_count
get_agent_count
echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT:$AGENTCOUNT/$(read_feat)"
}
systemctl_func() {
local action=$1
local echo_action=$1

View File

@@ -8,7 +8,6 @@
import sys
import subprocess
import os
import json
sys.path.append('/opt/saltstack/salt/lib/python3.10/site-packages/')
import salt.config
@@ -37,67 +36,17 @@ def check_needs_restarted():
with open(outfile, 'w') as f:
f.write(val)
def check_for_fps():
feat = 'fps'
feat_full = feat.replace('ps', 'ips')
fps = 0
try:
result = subprocess.run([feat_full + '-mode-setup', '--is-enabled'], stdout=subprocess.PIPE)
if result.returncode == 0:
fps = 1
except FileNotFoundError:
fn = '/proc/sys/crypto/' + feat_full + '_enabled'
try:
with open(fn, 'r') as f:
contents = f.read()
if '1' in contents:
fps = 1
except:
# Unknown, so assume 0
fps = 0
with open('/opt/so/log/sostatus/fps_enabled', 'w') as f:
f.write(str(fps))
def check_for_lks():
feat = 'Lks'
feat_full = feat.replace('ks', 'uks')
lks = 0
result = subprocess.run(['lsblk', '-p', '-J'], check=True, stdout=subprocess.PIPE)
data = json.loads(result.stdout)
for device in data['blockdevices']:
if 'children' in device:
for gc in device['children']:
if 'children' in gc:
try:
arg = 'is' + feat_full
result = subprocess.run(['cryptsetup', arg, gc['name']], stdout=subprocess.PIPE)
if result.returncode == 0:
lks = 1
except FileNotFoundError:
for ggc in gc['children']:
if 'crypt' in ggc['type']:
lks = 1
if lks:
break
with open('/opt/so/log/sostatus/lks_enabled', 'w') as f:
f.write(str(lks))
def fail(msg):
print(msg, file=sys.stderr)
sys.exit(1)
def main():
proc = subprocess.run(['id', '-u'], stdout=subprocess.PIPE, encoding="utf-8")
if proc.stdout.strip() != "0":
fail("This program must be run as root")
# Ensure that umask is 0022 so that files created by this script have rw-r-r permissions
org_umask = os.umask(0o022)
check_needs_restarted()
check_for_fps()
check_for_lks()
# Restore umask to whatever value was set before this script was run. SXIG sets to 0077 rw---
os.umask(org_umask)
if __name__ == "__main__":
main()

View File

@@ -50,14 +50,16 @@ container_list() {
"so-idh"
"so-idstools"
"so-influxdb"
"so-kafka"
"so-kibana"
"so-kratos"
"so-logstash"
"so-mysql"
"so-nginx"
"so-pcaptools"
"so-playbook"
"so-redis"
"so-soc"
"so-soctopus"
"so-steno"
"so-strelka-backend"
"so-strelka-filestream"
@@ -65,7 +67,7 @@ container_list() {
"so-strelka-manager"
"so-suricata"
"so-telegraf"
"so-zeek"
"so-zeek"
)
else
TRUSTED_CONTAINERS=(

View File

@@ -49,6 +49,10 @@ if [ "$CONTINUE" == "y" ]; then
sed -i "s|$OLD_IP|$NEW_IP|g" $file
done
echo "Granting MySQL root user permissions on $NEW_IP"
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'$NEW_IP' IDENTIFIED BY '$(lookup_pillar_secret 'mysql')' WITH GRANT OPTION;" &> /dev/null
echo "Removing MySQL root user from $OLD_IP"
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "DROP USER 'root'@'$OLD_IP';" &> /dev/null
echo "Updating Kibana dashboards"
salt-call state.apply kibana.so_savedobjects_defaults -l info queue=True

View File

@@ -122,7 +122,6 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error while communicating" # Elasticsearch MS -> HN "sensor" temporarily unavailable
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to get license information" # Logstash trying to contact ES before it's ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process already finished" # Telegraf script finished just as the auto kill timeout kicked in
fi
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
@@ -155,11 +154,15 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fail\\(error\\)" # redis/python generic stack line, rely on other lines for actual error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|urlerror" # idstools connection timeout
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeouterror" # idstools connection timeout
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|forbidden" # playbook
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context canceled" # elastic agent during shutdown
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exited with code 128" # soctopus errors during forced restart by highstate
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|geoip databases update" # airgap can't update GeoIP DB
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|filenotfounderror" # bug in 2.4.10 filecheck salt state caused duplicate cronjobs
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|generating elastalert config" # playbook expected error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|activerecord" # playbook expected error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index
@@ -198,8 +201,6 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|req.LocalMeta.host.ip" # known issue in GH
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sendmail" # zeek
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|stats.log"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unknown column" # Elastalert errors from running EQL queries
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parsing_exception" # Elastalert EQL parsing issue. Temp.
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded"
fi
@@ -209,9 +210,7 @@ RESULT=0
CONTAINER_IDS=$(docker ps -q)
exclude_container so-kibana # kibana error logs are too verbose with large varieties of errors most of which are temporary
exclude_container so-idstools # ignore due to known issues and noisy logging
exclude_container so-playbook # Playbook is removed as of 2.4.70, disregard output in stopped containers
exclude_container so-mysql # MySQL is removed as of 2.4.70, disregard output in stopped containers
exclude_container so-soctopus # Soctopus is removed as of 2.4.70, disregard output in stopped containers
exclude_container so-playbook # ignore due to several playbook known issues
for container_id in $CONTAINER_IDS; do
container_name=$(docker ps --format json | jq ". | select(.ID==\"$container_id\")|.Names")
@@ -229,13 +228,10 @@ exclude_log "kibana.log" # kibana error logs are too verbose with large variet
exclude_log "spool" # disregard zeek analyze logs as this is data specific
exclude_log "import" # disregard imported test data the contains error strings
exclude_log "update.log" # ignore playbook updates due to several known issues
exclude_log "playbook.log" # ignore due to several playbook known issues
exclude_log "cron-cluster-delete.log" # ignore since Curator has been removed
exclude_log "cron-close.log" # ignore since Curator has been removed
exclude_log "curator.log" # ignore since Curator has been removed
exclude_log "playbook.log" # Playbook is removed as of 2.4.70, logs may still be on disk
exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on disk
exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk
exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state
exclude_log "curator.log" # ignore since Curator has been removed
for log_file in $(cat /tmp/log_check_files); do
status "Checking log file $log_file"

View File

@@ -334,7 +334,6 @@ desktop_packages:
- pulseaudio-libs
- pulseaudio-libs-glib2
- pulseaudio-utils
- putty
- sane-airscan
- sane-backends
- sane-backends-drivers-cameras

View File

@@ -67,6 +67,13 @@ docker:
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-mysql':
final_octet: 30
port_bindings:
- 0.0.0.0:3306:3306
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-nginx':
final_octet: 31
port_bindings:
@@ -77,10 +84,10 @@ docker:
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-nginx-fleet-node':
final_octet: 31
'so-playbook':
final_octet: 32
port_bindings:
- 8443:8443
- 0.0.0.0:3000:3000
custom_bind_mounts: []
extra_hosts: []
extra_env: []
@@ -104,6 +111,13 @@ docker:
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-soctopus':
final_octet: 35
port_bindings:
- 0.0.0.0:7000:7000
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-strelka-backend':
final_octet: 36
custom_bind_mounts: []
@@ -185,11 +199,3 @@ docker:
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-kafka':
final_octet: 88
port_bindings:
- 0.0.0.0:9092:9092
- 0.0.0.0:9093:9093
custom_bind_mounts: []
extra_hosts: []
extra_env: []

View File

@@ -46,11 +46,13 @@ docker:
so-kibana: *dockerOptions
so-kratos: *dockerOptions
so-logstash: *dockerOptions
so-mysql: *dockerOptions
so-nginx: *dockerOptions
so-nginx-fleet-node: *dockerOptions
so-playbook: *dockerOptions
so-redis: *dockerOptions
so-sensoroni: *dockerOptions
so-soc: *dockerOptions
so-soctopus: *dockerOptions
so-strelka-backend: *dockerOptions
so-strelka-filestream: *dockerOptions
so-strelka-frontend: *dockerOptions
@@ -65,4 +67,3 @@ docker:
so-steno: *dockerOptions
so-suricata: *dockerOptions
so-zeek: *dockerOptions
so-kafka: *dockerOptions

View File

@@ -45,8 +45,6 @@ elasticfleet:
- cisco_ise
- cisco_meraki
- cisco_umbrella
- citrix_adc
- citrix_waf
- cloudflare
- crowdstrike
- darktrace
@@ -65,7 +63,6 @@ elasticfleet:
- http_endpoint
- httpjson
- iis
- journald
- juniper
- juniper_srx
- kafka_log
@@ -78,7 +75,6 @@ elasticfleet:
- mimecast
- mysql
- netflow
- nginx
- o365
- okta
- osquery_manager
@@ -107,7 +103,6 @@ elasticfleet:
- udp
- vsphere
- windows
- winlog
- zscaler_zia
- zscaler_zpa
- 1password

View File

@@ -17,11 +17,6 @@ include:
- elasticfleet.sostatus
- ssl
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
wait_for_elasticsearch_elasticfleet:
cmd.run:
- name: so-elasticsearch-wait
# If enabled, automatically update Fleet Logstash Outputs
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %}
so-elastic-fleet-auto-configure-logstash-outputs:
@@ -38,26 +33,12 @@ so-elastic-fleet-auto-configure-server-urls:
- retry: True
{% endif %}
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
# Automatically update Fleet Server Elasticsearch URLs
{% if grains.role not in ['so-fleet'] %}
so-elastic-fleet-auto-configure-elasticsearch-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-es-url-update
- retry: True
so-elastic-fleet-auto-configure-artifact-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
- retry: True
{% endif %}
# Sync Elastic Agent artifacts to Fleet Node
{% if grains.role in ['so-fleet'] %}
elasticagent_syncartifacts:
file.recurse:
- name: /nsm/elastic-fleet/artifacts/beats
- source: salt://beats
{% endif %}
{% if SERVICETOKEN != '' %}

View File

@@ -1,29 +0,0 @@
{
"package": {
"name": "winlog",
"version": ""
},
"name": "windows-defender",
"namespace": "default",
"description": "Windows Defender - Operational logs",
"policy_id": "endpoints-initial",
"inputs": {
"winlogs-winlog": {
"enabled": true,
"streams": {
"winlog.winlog": {
"enabled": true,
"vars": {
"channel": "Microsoft-Windows-Windows Defender/Operational",
"data_stream.dataset": "winlog.winlog",
"preserve_original_event": false,
"providers": [],
"ignore_older": "72h",
"language": 0,
"tags": [] }
}
}
}
},
"force": true
}

View File

@@ -1,34 +0,0 @@
{
"package": {
"name": "log",
"version": ""
},
"name": "rita-logs",
"namespace": "so",
"description": "RITA Logs",
"policy_id": "so-grid-nodes_general",
"vars": {},
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/nsm/rita/beacons.csv",
"/nsm/rita/exploded-dns.csv",
"/nsm/rita/long-connections.csv"
],
"exclude_files": [],
"ignore_older": "72h",
"data_stream.dataset": "rita",
"tags": [],
"processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
"custom": "exclude_lines: ['^Score', '^Source', '^Domain', '^No results']"
}
}
}
}
}
}

View File

@@ -16,9 +16,6 @@
"paths": [
"/var/log/auth.log*",
"/var/log/secure*"
],
"tags": [
"so-grid-node"
]
}
},
@@ -28,9 +25,6 @@
"paths": [
"/var/log/messages*",
"/var/log/syslog*"
],
"tags": [
"so-grid-node"
]
}
}

View File

@@ -16,9 +16,6 @@
"paths": [
"/var/log/auth.log*",
"/var/log/secure*"
],
"tags": [
"so-grid-node"
]
}
},
@@ -28,9 +25,6 @@
"paths": [
"/var/log/messages*",
"/var/log/syslog*"
],
"tags": [
"so-grid-node"
]
}
}

View File

@@ -46,7 +46,7 @@ do
done
printf "\n### Stripping out unused components"
find /nsm/elastic-agent-workspace/elastic-agent-*/data/elastic-agent-*/components -maxdepth 1 -regex '.*fleet.*\|.*packet.*\|.*apm.*\|.*heart.*\|.*cloud.*' -delete
find /nsm/elastic-agent-workspace/elastic-agent-*/data/elastic-agent-*/components -maxdepth 1 -regex '.*fleet.*\|.*packet.*\|.*apm.*\|.*audit.*\|.*heart.*\|.*cloud.*' -delete
printf "\n### Tarring everything up again"
for OS in "${OSARCH[@]}"

View File

@@ -1,5 +1,3 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.

View File

@@ -1,90 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
. /usr/sbin/so-common
# Only run on Managers
if ! is_manager_node; then
printf "Not a Manager Node... Exiting"
exit 0
fi
# Function to check if an array contains a value
array_contains () {
local array="$1[@]"
local seeking=$2
local in=1
for element in "${!array}"; do
if [[ $element == "$seeking" ]]; then
in=0
break
fi
done
return $in
}
# Query for the current Grid Nodes that are running Logstash (which includes Fleet Nodes)
LOGSTASHNODES='{{ salt['pillar.get']('logstash:nodes', {}) | tojson }}'
# Initialize an array for new hosts from Fleet Nodes
declare -a NEW_LIST=()
# Query for Fleet Nodes & add them to the list (Hostname)
if grep -q "fleet" <<< "$LOGSTASHNODES"; then
readarray -t FLEETNODES < <(jq -r '.fleet | keys_unsorted[]' <<< "$LOGSTASHNODES")
for NODE in "${FLEETNODES[@]}"; do
URL="http://$NODE:8443/artifacts/"
NAME="FleetServer_$NODE"
NEW_LIST+=("$URL=$NAME")
done
fi
# Create an array for expected hosts and their names
declare -A expected_urls=(
["http://{{ GLOBALS.url_base }}:8443/artifacts/"]="FleetServer_{{ GLOBALS.hostname }}"
["https://artifacts.elastic.co/downloads/"]="Elastic Artifacts"
)
# Merge NEW_LIST into expected_urls
for entry in "${NEW_LIST[@]}"; do
# Extract URL and Name from each entry
IFS='=' read -r URL NAME <<< "$entry"
# Add to expected_urls, automatically handling URL as key and NAME as value
expected_urls["$URL"]="$NAME"
done
# Fetch the current hosts from the API
current_urls=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/agent_download_sources' | jq -r .items[].host)
# Convert current hosts to an array
IFS=$'\n' read -rd '' -a current_urls_array <<<"$current_urls"
# Flag to track if any host was added
any_url_added=0
# Check each expected host
for host in "${!expected_urls[@]}"; do
array_contains current_urls_array "$host" || {
echo "$host (${expected_urls[$host]}) is missing. Adding it..."
# Prepare the JSON payload
JSON_STRING=$( jq -n \
--arg NAME "${expected_urls[$host]}" \
--arg URL "$host" \
'{"name":$NAME,"host":$URL}' )
# Create the missing host
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_download_sources" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
# Flag that an artifact URL was added
any_url_added=1
}
done
if [[ $any_url_added -eq 0 ]]; then
echo "All expected artifact URLs are present. No updates needed."
fi

View File

@@ -1,5 +1,3 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.

View File

@@ -1,5 +1,3 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.

View File

@@ -1,5 +1,3 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.

View File

@@ -4,7 +4,7 @@
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
# Move our new CA over so Elastic and Logstash can use SSL with the internal CA

View File

@@ -118,19 +118,6 @@ esingestconf:
- user: 930
- group: 939
# Auto-generate Elasticsearch ingest node pipelines from pillar
{% for pipeline, config in ELASTICSEARCHMERGED.pipelines.items() %}
es_ingest_conf_{{pipeline}}:
file.managed:
- name: /opt/so/conf/elasticsearch/ingest/{{ pipeline }}
- source: salt://elasticsearch/base-template.json.jinja
- defaults:
TEMPLATE_CONFIG: {{ config }}
- template: jinja
- onchanges_in:
- file: so-pipelines-reload
{% endfor %}
eslog4jfile:
file.managed:
- name: /opt/so/conf/elasticsearch/log4j2.properties

File diff suppressed because it is too large Load Diff

View File

@@ -57,11 +57,10 @@
{ "convert": { "field": "log.id.uid", "type": "string", "ignore_failure": true, "ignore_missing": true } },
{ "convert": { "field": "agent.id", "type": "string", "ignore_failure": true, "ignore_missing": true } },
{ "convert": { "field": "event.severity", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
{ "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" } },
{ "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" }},
{ "set": { "if": "ctx.event?.dataset != null && !ctx.event.dataset.contains('.')", "field": "event.dataset", "value": "{{event.module}}.{{event.dataset}}" } },
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } },
{ "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} },
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" }},
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
{%- endraw %}
{%- if HIGHLANDER %}

View File

@@ -68,7 +68,7 @@
"field": "_security",
"ignore_missing": true
}
},
},
{ "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
{ "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
@@ -83,7 +83,6 @@
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp", "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
],
"on_failure": [

View File

@@ -1,389 +0,0 @@
{
"description": "Pipeline for pfSense",
"processors": [
{
"set": {
"field": "ecs.version",
"value": "8.10.0"
}
},
{
"set": {
"field": "observer.vendor",
"value": "netgate"
}
},
{
"set": {
"field": "observer.type",
"value": "firewall"
}
},
{
"rename": {
"field": "message",
"target_field": "event.original"
}
},
{
"set": {
"field": "event.kind",
"value": "event"
}
},
{
"set": {
"field": "event.timezone",
"value": "{{_tmp.tz_offset}}",
"if": "ctx._tmp?.tz_offset != null && ctx._tmp?.tz_offset != 'local'"
}
},
{
"grok": {
"description": "Parse syslog header",
"field": "event.original",
"patterns": [
"^(%{ECS_SYSLOG_PRI})?%{TIMESTAMP} %{GREEDYDATA:message}"
],
"pattern_definitions": {
"ECS_SYSLOG_PRI": "<%{NONNEGINT:log.syslog.priority:long}>(\\d )?",
"TIMESTAMP": "(?:%{BSD_TIMESTAMP_FORMAT}|%{SYSLOG_TIMESTAMP_FORMAT})",
"BSD_TIMESTAMP_FORMAT": "%{SYSLOGTIMESTAMP:_tmp.timestamp}(%{SPACE}%{BSD_PROCNAME}|%{SPACE}%{OBSERVER}%{SPACE}%{BSD_PROCNAME})(\\[%{POSINT:process.pid:long}\\])?:",
"BSD_PROCNAME": "(?:\\b%{NAME:process.name}|\\(%{NAME:process.name}\\))",
"NAME": "[[[:alnum:]]_-]+",
"SYSLOG_TIMESTAMP_FORMAT": "%{TIMESTAMP_ISO8601:_tmp.timestamp8601}%{SPACE}%{OBSERVER}%{SPACE}%{PROCESS}%{SPACE}(%{POSINT:process.pid:long}|-) - (-|%{META})",
"TIMESTAMP_ISO8601": "%{YEAR}-%{MONTHNUM}-%{MONTHDAY}[T ]%{HOUR}:?%{MINUTE}(?::?%{SECOND})?%{ISO8601_TIMEZONE:event.timezone}?",
"OBSERVER": "(?:%{IP:observer.ip}|%{HOSTNAME:observer.name})",
"PROCESS": "(\\(%{DATA:process.name}\\)|(?:%{UNIXPATH}*/)?%{BASEPATH:process.name})",
"BASEPATH": "[[[:alnum:]]_%!$@:.,+~-]+",
"META": "\\[[^\\]]*\\]"
}
}
},
{
"date": {
"if": "ctx._tmp.timestamp8601 != null",
"field": "_tmp.timestamp8601",
"target_field": "@timestamp",
"formats": [
"ISO8601"
]
}
},
{
"date": {
"if": "ctx.event?.timezone != null && ctx._tmp?.timestamp != null",
"field": "_tmp.timestamp",
"target_field": "@timestamp",
"formats": [
"MMM d HH:mm:ss",
"MMM d HH:mm:ss",
"MMM dd HH:mm:ss"
],
"timezone": "{{ event.timezone }}"
}
},
{
"grok": {
"description": "Set Event Provider",
"field": "process.name",
"patterns": [
"^%{HYPHENATED_WORDS:event.provider}"
],
"pattern_definitions": {
"HYPHENATED_WORDS": "\\b[A-Za-z0-9_]+(-[A-Za-z_]+)*\\b"
}
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-firewall",
"if": "ctx.event.provider == 'filterlog'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-openvpn",
"if": "ctx.event.provider == 'openvpn'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-ipsec",
"if": "ctx.event.provider == 'charon'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-dhcp",
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-unbound",
"if": "ctx.event.provider == 'unbound'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-haproxy",
"if": "ctx.event.provider == 'haproxy'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-php-fpm",
"if": "ctx.event.provider == 'php-fpm'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-squid",
"if": "ctx.event.provider == 'squid'"
}
},
{
"pipeline": {
"name": "logs-pfsense.log-1.16.0-suricata",
"if": "ctx.event.provider == 'suricata'"
}
},
{
"drop": {
"if": "![\"filterlog\", \"openvpn\", \"charon\", \"dhcpd\", \"dhclient\", \"dhcp6c\", \"unbound\", \"haproxy\", \"php-fpm\", \"squid\", \"suricata\"].contains(ctx.event?.provider)"
}
},
{
"append": {
"field": "event.category",
"value": "network",
"if": "ctx.network != null"
}
},
{
"convert": {
"field": "source.address",
"target_field": "source.ip",
"type": "ip",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"convert": {
"field": "destination.address",
"target_field": "destination.ip",
"type": "ip",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"set": {
"field": "network.type",
"value": "ipv6",
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\":\")"
}
},
{
"set": {
"field": "network.type",
"value": "ipv4",
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\".\")"
}
},
{
"geoip": {
"field": "source.ip",
"target_field": "source.geo",
"ignore_missing": true
}
},
{
"geoip": {
"field": "destination.ip",
"target_field": "destination.geo",
"ignore_missing": true
}
},
{
"geoip": {
"ignore_missing": true,
"database_file": "GeoLite2-ASN.mmdb",
"field": "source.ip",
"target_field": "source.as",
"properties": [
"asn",
"organization_name"
]
}
},
{
"geoip": {
"database_file": "GeoLite2-ASN.mmdb",
"field": "destination.ip",
"target_field": "destination.as",
"properties": [
"asn",
"organization_name"
],
"ignore_missing": true
}
},
{
"rename": {
"field": "source.as.asn",
"target_field": "source.as.number",
"ignore_missing": true
}
},
{
"rename": {
"field": "source.as.organization_name",
"target_field": "source.as.organization.name",
"ignore_missing": true
}
},
{
"rename": {
"field": "destination.as.asn",
"target_field": "destination.as.number",
"ignore_missing": true
}
},
{
"rename": {
"field": "destination.as.organization_name",
"target_field": "destination.as.organization.name",
"ignore_missing": true
}
},
{
"community_id": {
"target_field": "network.community_id",
"ignore_failure": true
}
},
{
"grok": {
"field": "observer.ingress.interface.name",
"patterns": [
"%{DATA}.%{NONNEGINT:observer.ingress.vlan.id}"
],
"ignore_missing": true,
"ignore_failure": true
}
},
{
"set": {
"field": "network.vlan.id",
"copy_from": "observer.ingress.vlan.id",
"ignore_empty_value": true
}
},
{
"append": {
"field": "related.ip",
"value": "{{destination.ip}}",
"allow_duplicates": false,
"if": "ctx.destination?.ip != null"
}
},
{
"append": {
"field": "related.ip",
"value": "{{source.ip}}",
"allow_duplicates": false,
"if": "ctx.source?.ip != null"
}
},
{
"append": {
"field": "related.ip",
"value": "{{source.nat.ip}}",
"allow_duplicates": false,
"if": "ctx.source?.nat?.ip != null"
}
},
{
"append": {
"field": "related.hosts",
"value": "{{destination.domain}}",
"if": "ctx.destination?.domain != null"
}
},
{
"append": {
"field": "related.user",
"value": "{{user.name}}",
"if": "ctx.user?.name != null"
}
},
{
"set": {
"field": "network.direction",
"value": "{{network.direction}}bound",
"if": "ctx.network?.direction != null && ctx.network?.direction =~ /^(in|out)$/"
}
},
{
"remove": {
"field": [
"_tmp"
],
"ignore_failure": true
}
},
{
"script": {
"lang": "painless",
"description": "This script processor iterates over the whole document to remove fields with null values.",
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
}
},
{
"remove": {
"field": "event.original",
"if": "ctx.tags == null || !(ctx.tags.contains('preserve_original_event'))",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"pipeline": {
"name": "logs-pfsense.log@custom",
"ignore_missing_pipeline": true
}
}
],
"on_failure": [
{
"remove": {
"field": [
"_tmp"
],
"ignore_failure": true
}
},
{
"set": {
"field": "event.kind",
"value": "pipeline_error"
}
},
{
"append": {
"field": "error.message",
"value": "{{{ _ingest.on_failure_message }}}"
}
}
],
"_meta": {
"managed_by": "fleet",
"managed": true,
"package": {
"name": "pfsense"
}
}
}

View File

@@ -1,31 +0,0 @@
{
"description": "Pipeline for parsing pfSense Suricata logs.",
"processors": [
{
"pipeline": {
"name": "suricata.common"
}
}
],
"on_failure": [
{
"set": {
"field": "event.kind",
"value": "pipeline_error"
}
},
{
"append": {
"field": "error.message",
"value": "{{{ _ingest.on_failure_message }}}"
}
}
],
"_meta": {
"managed_by": "fleet",
"managed": true,
"package": {
"name": "pfsense"
}
}
}

View File

@@ -67,8 +67,7 @@
{ "set": { "if": "ctx.scan?.pe?.image_version == '0'", "field": "scan.pe.image_version", "value": "0.0", "override": true } },
{ "set": { "field": "observer.name", "value": "{{agent.name}}" }},
{ "convert" : { "field" : "scan.exiftool","type": "string", "ignore_missing":true }},
{ "convert" : { "field" : "scan.pe.flags","type": "string", "ignore_missing":true }},
{ "remove": { "field": ["host", "path", "message", "exiftool", "scan.yara.meta"], "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
{ "remove": { "field": ["host", "path", "message", "exiftool", "scan.yara.meta"], "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -4,7 +4,6 @@
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } },
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
{ "rename": { "field": "message2.in_iface", "target_field": "observer.ingress.interface.name", "ignore_failure": true } },
{ "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } },
{ "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },
{ "rename": { "field": "message2.src_port", "target_field": "source.port", "ignore_failure": true } },

View File

@@ -1,21 +0,0 @@
{
"description" : "suricata.ike",
"processors" : [
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.alg_auth", "target_field": "ike.algorithm.authentication", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.alg_enc", "target_field": "ike.algorithm.encryption", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.alg_esn", "target_field": "ike.algorithm.esn", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.alg_dh", "target_field": "ike.algorithm.dh", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.alg_prf", "target_field": "ike.algorithm.prf", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.exchange_type", "target_field": "ike.exchange_type", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.payload", "target_field": "ike.payload", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.role", "target_field": "ike.role", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.init_spi", "target_field": "ike.spi.initiator", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.resp_spi", "target_field": "ike.spi.responder", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.version_major", "target_field": "ike.version.major", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.version_minor", "target_field": "ike.version.minor", "ignore_missing": true } },
{ "rename": { "field": "message2.ike.ikev2.errors", "target_field": "ike.ikev2.errors", "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -0,0 +1,8 @@
{
"description" : "suricata.ikev2",
"processors" : [
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -45,28 +45,6 @@ elasticsearch:
description: Max number of boolean clauses per query.
global: True
helpLink: elasticsearch.html
pipelines:
custom001: &pipelines
description:
description: Description of the ingest node pipeline
global: True
advanced: True
helpLink: elasticsearch.html
processors:
description: Processors for the ingest node pipeline
global: True
advanced: True
multiline: True
helpLink: elasticsearch.html
custom002: *pipelines
custom003: *pipelines
custom004: *pipelines
custom005: *pipelines
custom006: *pipelines
custom007: *pipelines
custom008: *pipelines
custom009: *pipelines
custom010: *pipelines
index_settings:
global_overrides:
index_template:
@@ -95,7 +73,6 @@ elasticsearch:
description: The order to sort by. Must set index_sorting to True.
global: True
helpLink: elasticsearch.html
policy:
phases:
hot:
max_age:
@@ -341,7 +318,6 @@ elasticsearch:
so-logs-windows_x_powershell: *indexSettings
so-logs-windows_x_powershell_operational: *indexSettings
so-logs-windows_x_sysmon_operational: *indexSettings
so-logs-winlog_x_winlog: *indexSettings
so-logs-apache_x_access: *indexSettings
so-logs-apache_x_error: *indexSettings
so-logs-auditd_x_log: *indexSettings
@@ -366,17 +342,10 @@ elasticsearch:
so-logs-azure_x_signinlogs: *indexSettings
so-logs-azure_x_springcloudlogs: *indexSettings
so-logs-barracuda_x_waf: *indexSettings
so-logs-cef_x_log: *indexSettings
so-logs-cisco_asa_x_log: *indexSettings
so-logs-cisco_ftd_x_log: *indexSettings
so-logs-cisco_ios_x_log: *indexSettings
so-logs-cisco_ise_x_log: *indexSettings
so-logs-citrix_adc_x_interface: *indexSettings
so-logs-citrix_adc_x_lbvserver: *indexSettings
so-logs-citrix_adc_x_service: *indexSettings
so-logs-citrix_adc_x_system: *indexSettings
so-logs-citrix_adc_x_vpn: *indexSettings
so-logs-citrix_waf_x_log: *indexSettings
so-logs-cloudflare_x_audit: *indexSettings
so-logs-cloudflare_x_logpull: *indexSettings
so-logs-crowdstrike_x_falcon: *indexSettings
@@ -437,8 +406,6 @@ elasticsearch:
so-logs-mysql_x_error: *indexSettings
so-logs-mysql_x_slowlog: *indexSettings
so-logs-netflow_x_log: *indexSettings
so-logs-nginx_x_access: *indexSettings
so-logs-nginx_x_error: *indexSettings
so-logs-o365_x_audit: *indexSettings
so-logs-okta_x_system: *indexSettings
so-logs-panw_x_panos: *indexSettings
@@ -504,7 +471,6 @@ elasticsearch:
so-metrics-endpoint_x_metadata: *indexSettings
so-metrics-endpoint_x_metrics: *indexSettings
so-metrics-endpoint_x_policy: *indexSettings
so-metrics-nginx_x_stubstatus: *indexSettings
so-case: *indexSettings
so-common: *indexSettings
so-endgame: *indexSettings

View File

@@ -1,383 +1,382 @@
{
"template": {
"settings": {
"index": {
"lifecycle": {
"name": "logs"
},
"codec": "best_compression",
"default_pipeline": "logs-elastic_agent-1.13.1",
"mapping": {
"total_fields": {
"limit": "10000"
}
},
"query": {
"default_field": [
"cloud.account.id",
"cloud.availability_zone",
"cloud.instance.id",
"cloud.instance.name",
"cloud.machine.type",
"cloud.provider",
"cloud.region",
"cloud.project.id",
"cloud.image.id",
"container.id",
"container.image.name",
"container.name",
"host.architecture",
"host.hostname",
"host.id",
"host.mac",
"host.name",
"host.os.family",
"host.os.kernel",
"host.os.name",
"host.os.platform",
"host.os.version",
"host.os.build",
"host.os.codename",
"host.type",
"ecs.version",
"agent.build.original",
"agent.ephemeral_id",
"agent.id",
"agent.name",
"agent.type",
"agent.version",
"log.level",
"message",
"elastic_agent.id",
"elastic_agent.process",
"elastic_agent.version",
"component.id",
"component.type",
"component.binary",
"component.state",
"component.old_state",
"unit.id",
"unit.type",
"unit.state",
"unit.old_state"
]
}
}
},
"mappings": {
"dynamic": false,
"dynamic_templates": [
{
"container.labels": {
"path_match": "container.labels.*",
"mapping": {
"type": "keyword"
},
"match_mapping_type": "string"
}
}
],
"properties": {
"container": {
"properties": {
"image": {
"properties": {
"name": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"agent": {
"properties": {
"build": {
"properties": {
"original": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"ephemeral_id": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"log": {
"properties": {
"level": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"elastic_agent": {
"properties": {
"process": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
},
"snapshot": {
"type": "boolean"
}
}
},
"message": {
"type": "text"
},
"cloud": {
"properties": {
"availability_zone": {
"ignore_above": 1024,
"type": "keyword"
},
"image": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"instance": {
"properties": {
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"provider": {
"ignore_above": 1024,
"type": "keyword"
},
"machine": {
"properties": {
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"project": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"region": {
"ignore_above": 1024,
"type": "keyword"
},
"account": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
{"template": {
"settings": {
"index": {
"lifecycle": {
"name": "logs"
},
"codec": "best_compression",
"default_pipeline": "logs-elastic_agent-1.13.1",
"mapping": {
"total_fields": {
"limit": "10000"
}
},
"query": {
"default_field": [
"cloud.account.id",
"cloud.availability_zone",
"cloud.instance.id",
"cloud.instance.name",
"cloud.machine.type",
"cloud.provider",
"cloud.region",
"cloud.project.id",
"cloud.image.id",
"container.id",
"container.image.name",
"container.name",
"host.architecture",
"host.hostname",
"host.id",
"host.mac",
"host.name",
"host.os.family",
"host.os.kernel",
"host.os.name",
"host.os.platform",
"host.os.version",
"host.os.build",
"host.os.codename",
"host.type",
"ecs.version",
"agent.build.original",
"agent.ephemeral_id",
"agent.id",
"agent.name",
"agent.type",
"agent.version",
"log.level",
"message",
"elastic_agent.id",
"elastic_agent.process",
"elastic_agent.version",
"component.id",
"component.type",
"component.binary",
"component.state",
"component.old_state",
"unit.id",
"unit.type",
"unit.state",
"unit.old_state"
]
}
}
}
},
"component": {
"properties": {
"binary": {
"ignore_above": 1024,
"type": "keyword"
},
"old_state": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "wildcard"
},
"state": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"unit": {
"properties": {
"old_state": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "wildcard"
},
"state": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"@timestamp": {
"type": "date"
},
"ecs": {
"properties": {
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"data_stream": {
"properties": {
"namespace": {
"type": "constant_keyword"
},
"type": {
"type": "constant_keyword"
},
"dataset": {
"type": "constant_keyword"
}
}
},
"host": {
"properties": {
"hostname": {
"ignore_above": 1024,
"type": "keyword"
},
"os": {
"properties": {
"build": {
"ignore_above": 1024,
"type": "keyword"
},
"kernel": {
"ignore_above": 1024,
"type": "keyword"
},
"codename": {
"ignore_above": 1024,
"type": "keyword"
},
"name": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"text": {
"type": "text"
},
"mappings": {
"dynamic": false,
"dynamic_templates": [
{
"container.labels": {
"path_match": "container.labels.*",
"mapping": {
"type": "keyword"
},
"match_mapping_type": "string"
}
}
],
"properties": {
"container": {
"properties": {
"image": {
"properties": {
"name": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"agent": {
"properties": {
"build": {
"properties": {
"original": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"ephemeral_id": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"log": {
"properties": {
"level": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"elastic_agent": {
"properties": {
"process": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
},
"snapshot": {
"type": "boolean"
}
}
},
"message": {
"type": "text"
},
"cloud": {
"properties": {
"availability_zone": {
"ignore_above": 1024,
"type": "keyword"
},
"image": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"instance": {
"properties": {
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"provider": {
"ignore_above": 1024,
"type": "keyword"
},
"machine": {
"properties": {
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"project": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"region": {
"ignore_above": 1024,
"type": "keyword"
},
"account": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
},
"family": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
},
"platform": {
"ignore_above": 1024,
"type": "keyword"
}
},
"component": {
"properties": {
"binary": {
"ignore_above": 1024,
"type": "keyword"
},
"old_state": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "wildcard"
},
"state": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"unit": {
"properties": {
"old_state": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "wildcard"
},
"state": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"@timestamp": {
"type": "date"
},
"ecs": {
"properties": {
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"data_stream": {
"properties": {
"namespace": {
"type": "constant_keyword"
},
"type": {
"type": "constant_keyword"
},
"dataset": {
"type": "constant_keyword"
}
}
},
"host": {
"properties": {
"hostname": {
"ignore_above": 1024,
"type": "keyword"
},
"os": {
"properties": {
"build": {
"ignore_above": 1024,
"type": "keyword"
},
"kernel": {
"ignore_above": 1024,
"type": "keyword"
},
"codename": {
"ignore_above": 1024,
"type": "keyword"
},
"name": {
"ignore_above": 1024,
"type": "keyword",
"fields": {
"text": {
"type": "text"
}
}
},
"family": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
},
"platform": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"domain": {
"ignore_above": 1024,
"type": "keyword"
},
"ip": {
"type": "ip"
},
"containerized": {
"type": "boolean"
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"mac": {
"ignore_above": 1024,
"type": "keyword"
},
"architecture": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"event": {
"properties": {
"dataset": {
"type": "constant_keyword"
}
}
}
},
"domain": {
"ignore_above": 1024,
"type": "keyword"
},
"ip": {
"type": "ip"
},
"containerized": {
"type": "boolean"
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"mac": {
"ignore_above": 1024,
"type": "keyword"
},
"architecture": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"event": {
"properties": {
"dataset": {
"type": "constant_keyword"
}
}
"_meta": {
"package": {
"name": "elastic_agent"
},
"managed_by": "fleet",
"managed": true
}
}
}
},
"_meta": {
"package": {
"name": "elastic_agent"
},
"managed_by": "fleet",
"managed": true
}
}

View File

@@ -1,12 +0,0 @@
{
"template": {
"settings": {}
},
"_meta": {
"package": {
"name": "endpoint"
},
"managed_by": "fleet",
"managed": true
}
}

View File

@@ -1,132 +0,0 @@
{
"template": {
"settings": {
"index": {
"lifecycle": {
"name": "logs-endpoint.collection-diagnostic"
},
"codec": "best_compression",
"default_pipeline": "logs-endpoint.diagnostic.collection-8.10.2",
"mapping": {
"total_fields": {
"limit": "10000"
},
"ignore_malformed": "true"
},
"query": {
"default_field": [
"ecs.version",
"event.action",
"event.category",
"event.code",
"event.dataset",
"event.hash",
"event.id",
"event.kind",
"event.module",
"event.outcome",
"event.provider",
"event.type"
]
}
}
},
"mappings": {
"dynamic": false,
"properties": {
"@timestamp": {
"ignore_malformed": false,
"type": "date"
},
"ecs": {
"properties": {
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"data_stream": {
"properties": {
"namespace": {
"type": "constant_keyword"
},
"type": {
"type": "constant_keyword"
},
"dataset": {
"type": "constant_keyword"
}
}
},
"event": {
"properties": {
"severity": {
"type": "long"
},
"code": {
"ignore_above": 1024,
"type": "keyword"
},
"created": {
"type": "date"
},
"kind": {
"ignore_above": 1024,
"type": "keyword"
},
"module": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"sequence": {
"type": "long"
},
"ingested": {
"type": "date"
},
"provider": {
"ignore_above": 1024,
"type": "keyword"
},
"action": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"category": {
"ignore_above": 1024,
"type": "keyword"
},
"dataset": {
"ignore_above": 1024,
"type": "keyword"
},
"hash": {
"ignore_above": 1024,
"type": "keyword"
},
"outcome": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
}
}
},
"_meta": {
"package": {
"name": "endpoint"
},
"managed_by": "fleet",
"managed": true
}
}

View File

@@ -1,22 +0,0 @@
{
"template": {
"mappings": {
"properties": {
"error": {
"properties": {
"message": {
"type": "match_only_text"
}
}
}
}
}
},
"_meta": {
"package": {
"name": "system"
},
"managed_by": "fleet",
"managed": true
}
}

View File

@@ -1,139 +0,0 @@
{
"template": {
"mappings": {
"properties": {
"so_audit_doc_id": {
"ignore_above": 1024,
"type": "keyword"
},
"@timestamp": {
"type": "date"
},
"so_kind": {
"ignore_above": 1024,
"type": "keyword"
},
"so_operation": {
"ignore_above": 1024,
"type": "keyword"
},
"so_detection": {
"properties": {
"publicId": {
"type": "text"
},
"title": {
"type": "text"
},
"severity": {
"ignore_above": 1024,
"type": "keyword"
},
"author": {
"ignore_above": 1024,
"type": "keyword"
},
"description": {
"type": "text"
},
"content": {
"type": "text"
},
"isEnabled": {
"type": "boolean"
},
"isReporting": {
"type": "boolean"
},
"isCommunity": {
"type": "boolean"
},
"tags": {
"type": "text"
},
"ruleset": {
"ignore_above": 1024,
"type": "keyword"
},
"engine": {
"ignore_above": 1024,
"type": "keyword"
},
"language": {
"ignore_above": 1024,
"type": "keyword"
},
"license": {
"ignore_above": 1024,
"type": "keyword"
},
"overrides": {
"properties": {
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"isEnabled": {
"type": "boolean"
},
"createdAt": {
"type": "date"
},
"updatedAt": {
"type": "date"
},
"regex": {
"type": "text"
},
"value": {
"type": "text"
},
"thresholdType": {
"ignore_above": 1024,
"type": "keyword"
},
"track": {
"ignore_above": 1024,
"type": "keyword"
},
"ip": {
"type": "text"
},
"count": {
"type": "long"
},
"seconds": {
"type": "long"
},
"customFilter": {
"type": "text"
}
}
}
}
},
"so_detectioncomment": {
"properties": {
"createTime": {
"type": "date"
},
"detectionId": {
"ignore_above": 1024,
"type": "keyword"
},
"value": {
"type": "text"
},
"userId": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
}
}
},
"_meta": {
"ecs_version": "1.12.2"
}
}

View File

@@ -1,7 +0,0 @@
{
"template": {},
"version": 1,
"_meta": {
"description": "default settings for common Security Onion Detections indices"
}
}

View File

@@ -14,19 +14,16 @@
},
"pe": {
"properties": {
"flags": {
"type": "text"
},
"image_version": {
"type": "float"
},
"sections": {
"sections": {
"properties": {
"entropy": {
"type": "float"
}
}
}
},
"image_version": {
"type": "float"
}
}
},
"elf": {

View File

@@ -9,9 +9,11 @@
'so-influxdb',
'so-kibana',
'so-kratos',
'so-mysql',
'so-nginx',
'so-redis',
'so-soc',
'so-soctopus',
'so-strelka-coordinator',
'so-strelka-gatekeeper',
'so-strelka-frontend',
@@ -27,13 +29,14 @@
'so-elastic-fleet',
'so-elastic-fleet-package-registry',
'so-influxdb',
'so-kafka',
'so-kibana',
'so-kratos',
'so-logstash',
'so-mysql',
'so-nginx',
'so-redis',
'so-soc',
'so-soctopus',
'so-strelka-coordinator',
'so-strelka-gatekeeper',
'so-strelka-frontend',
@@ -81,7 +84,6 @@
{% set NODE_CONTAINERS = [
'so-logstash',
'so-redis',
'so-kafka'
] %}
{% elif GLOBALS.role == 'so-idh' %}
@@ -93,7 +95,6 @@
{% set NODE_CONTAINERS = [
'so-elastic-fleet',
'so-logstash',
'so-nginx-fleet-node'
] %}
{% elif GLOBALS.role == 'so-sensor' %}

View File

@@ -90,11 +90,6 @@ firewall:
tcp:
- 8086
udp: []
kafka:
tcp:
- 9092
- 9093
udp: []
kibana:
tcp:
- 5601
@@ -103,11 +98,19 @@ firewall:
tcp:
- 7788
udp: []
mysql:
tcp:
- 3306
udp: []
nginx:
tcp:
- 80
- 443
udp: []
playbook:
tcp:
- 3000
udp: []
redis:
tcp:
- 6379
@@ -175,6 +178,8 @@ firewall:
hostgroups:
eval:
portgroups:
- playbook
- mysql
- kibana
- redis
- influxdb
@@ -358,6 +363,8 @@ firewall:
hostgroups:
manager:
portgroups:
- playbook
- mysql
- kibana
- redis
- influxdb
@@ -369,7 +376,6 @@ firewall:
- elastic_agent_update
- localrules
- sensoroni
- kafka
fleet:
portgroups:
- elasticsearch_rest
@@ -405,7 +411,6 @@ firewall:
- docker_registry
- influxdb
- sensoroni
- kafka
searchnode:
portgroups:
- redis
@@ -419,7 +424,6 @@ firewall:
- elastic_agent_data
- elastic_agent_update
- sensoroni
- kafka
heavynode:
portgroups:
- redis
@@ -555,6 +559,8 @@ firewall:
hostgroups:
managersearch:
portgroups:
- playbook
- mysql
- kibana
- redis
- influxdb
@@ -750,6 +756,8 @@ firewall:
- all
standalone:
portgroups:
- playbook
- mysql
- kibana
- redis
- influxdb
@@ -1283,17 +1291,10 @@ firewall:
- beats_5044
- beats_5644
- elastic_agent_data
- kafka
searchnode:
portgroups:
- redis
- beats_5644
- kafka
managersearch:
portgroups:
- redis
- beats_5644
- kafka
self:
portgroups:
- redis

View File

@@ -115,18 +115,21 @@ firewall:
influxdb:
tcp: *tcpsettings
udp: *udpsettings
kafka:
tcp: *tcpsettings
udp: *udpsettings
kibana:
tcp: *tcpsettings
udp: *udpsettings
localrules:
tcp: *tcpsettings
udp: *udpsettings
mysql:
tcp: *tcpsettings
udp: *udpsettings
nginx:
tcp: *tcpsettings
udp: *udpsettings
playbook:
tcp: *tcpsettings
udp: *udpsettings
redis:
tcp: *tcpsettings
udp: *udpsettings
@@ -935,6 +938,7 @@ firewall:
portgroups: *portgroupshost
customhostgroup9:
portgroups: *portgroupshost
idh:
chain:
DOCKER-USER:

View File

@@ -1,3 +0,0 @@
global:
pcapengine: STENO
pipeline: REDIS

View File

@@ -1,2 +0,0 @@
{% import_yaml 'global/defaults.yaml' as GLOBALDEFAULTS %}
{% set GLOBALMERGED = salt['pillar.get']('global', GLOBALDEFAULTS.global, merge=True) %}

View File

@@ -10,15 +10,10 @@ global:
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
regexFailureMessage: You must enter a valid IP address or CIDR.
mdengine:
description: Which engine to use for meta data generation. Options are ZEEK and SURICATA.
description: What engine to use for meta data generation. Options are ZEEK and SURICATA.
regex: ^(ZEEK|SURICATA)$
regexFailureMessage: You must enter either ZEEK or SURICATA.
global: True
pcapengine:
description: Which engine to use for generating pcap. Options are STENO, SURICATA or TRANSITION.
regex: ^(STENO|SURICATA|TRANSITION)$
regexFailureMessage: You must enter either STENO, SURICATA or TRANSITION.
global: True
ids:
description: Which IDS engine to use. Currently only Suricata is supported.
global: True
@@ -28,7 +23,7 @@ global:
description: Used for handling of authentication cookies.
global: True
airgap:
description: Airgapped systems do not have network connectivity to the internet. This setting represents how this grid was configured during initial setup. While it is technically possible to manually switch systems between airgap and non-airgap, there are some nuances and additional steps involved. For that reason this setting is marked read-only. Contact your support representative for guidance if there is a need to change this setting.
description: Sets airgap mode.
global: True
readonly: True
imagerepo:
@@ -36,10 +31,9 @@ global:
global: True
advanced: True
pipeline:
description: Sets which pipeline technology for events to use. Currently only Redis is fully supported. Kafka is experimental and requires a Security Onion Pro license.
regex: ^(REDIS|KAFKA)$
regexFailureMessage: You must enter either REDIS or KAFKA.
description: Sets which pipeline technology for events to use. Currently only Redis is supported.
global: True
readonly: True
advanced: True
repo_host:
description: Specify the host where operating system packages will be served from.

View File

@@ -39,7 +39,7 @@ so-idstools:
{% endif %}
- binds:
- /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro
- /opt/so/rules/nids/suri:/opt/so/rules/nids/suri:rw
- /opt/so/rules/nids:/opt/so/rules/nids:rw
- /nsm/rules/:/nsm/rules/:rw
{% if DOCKER.containers['so-idstools'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-idstools'].custom_bind_mounts %}

View File

@@ -1,10 +1,10 @@
{%- from 'vars/globals.map.jinja' import GLOBALS -%}
{%- from 'idstools/map.jinja' import IDSTOOLSMERGED -%}
--merged=/opt/so/rules/nids/suri/all.rules
--local=/opt/so/rules/nids/suri/local.rules
--merged=/opt/so/rules/nids/all.rules
--local=/opt/so/rules/nids/local.rules
{%- if GLOBALS.md_engine == "SURICATA" %}
--local=/opt/so/rules/nids/suri/extraction.rules
--local=/opt/so/rules/nids/suri/filters.rules
--local=/opt/so/rules/nids/extraction.rules
--local=/opt/so/rules/nids/filters.rules
{%- endif %}
--url=http://{{ GLOBALS.manager }}:7788/suricata/emerging-all.rules
--disable=/opt/so/idstools/etc/disable.conf

View File

@@ -6,10 +6,9 @@ idstools:
description: Enter your registration code or oinkcode for paid NIDS rulesets.
title: Registration Code
global: True
forcedType: string
helpLink: rules.html
ruleset:
description: 'Defines the ruleset you want to run. Options are ETOPEN or ETPRO. WARNING! Changing the ruleset will remove all existing Suricata rules of the previous ruleset and their associated overrides. This removal cannot be undone.'
description: Defines the ruleset you want to run. Options are ETOPEN or ETPRO.
global: True
regex: ETPRO\b|ETOPEN\b
helpLink: rules.html

View File

@@ -21,7 +21,7 @@ idstoolsetcsync:
rulesdir:
file.directory:
- name: /opt/so/rules/nids/suri
- name: /opt/so/rules/nids
- user: 939
- group: 939
- makedirs: True
@@ -29,7 +29,7 @@ rulesdir:
# Don't show changes because all.rules can be large
synclocalnidsrules:
file.recurse:
- name: /opt/so/rules/nids/suri/
- name: /opt/so/rules/nids/
- source: salt://idstools/rules/
- user: 939
- group: 939

View File

@@ -1,106 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set kafka_ips_logstash = [] %}
{% set kafka_ips_kraft = [] %}
{% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %}
{% set kafka_ip = GLOBALS.node_ip %}
{# Create list for kafka <-> logstash/searchnode communcations #}
{% for node, node_data in kafkanodes.items() %}
{% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %}
{% endfor %}
{% set kafka_server_list = "','".join(kafka_ips_logstash) %}
{# Create a list for kraft controller <-> kraft controller communications. Used for Kafka metadata management #}
{% for node, node_data in kafkanodes.items() %}
{% do kafka_ips_kraft.append(node_data['nodeid'] ~ "@" ~ node_data['ip'] ~ ":9093") %}
{% endfor %}
{% set kraft_server_list = "','".join(kafka_ips_kraft) %}
include:
- ssl
kafka_group:
group.present:
- name: kafka
- gid: 960
kafka:
user.present:
- uid: 960
- gid: 960
{# Future tools to query kafka directly / show consumer groups
kafka_sbin_tools:
file.recurse:
- name: /usr/sbin
- source: salt://kafka/tools/sbin
- user: 960
- group: 960
- file_mode: 755 #}
kafka_sbin_jinja_tools:
file.recurse:
- name: /usr/sbin
- source: salt://kafka/tools/sbin_jinja
- user: 960
- group: 960
- file_mode: 755
- template: jinja
- defaults:
GLOBALS: {{ GLOBALS }}
kakfa_log_dir:
file.directory:
- name: /opt/so/log/kafka
- user: 960
- group: 960
- makedirs: True
kafka_data_dir:
file.directory:
- name: /nsm/kafka/data
- user: 960
- group: 960
- makedirs: True
kafka_generate_keystore:
cmd.run:
- name: "/usr/sbin/so-kafka-generate-keystore"
- onchanges:
- x509: /etc/pki/kafka.key
kafka_keystore_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka.jks
- mode: 640
- user: 960
- group: 939
{% for sc in ['server', 'client'] %}
kafka_kraft_{{sc}}_properties:
file.managed:
- source: salt://kafka/etc/{{sc}}.properties.jinja
- name: /opt/so/conf/kafka/{{sc}}.properties
- template: jinja
- user: 960
- group: 960
- makedirs: True
- show_changes: False
{% endfor %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,39 +0,0 @@
kafka:
enabled: False
config:
server:
advertised_x_listeners:
auto_x_create_x_topics_x_enable: true
controller_x_listener_x_names: CONTROLLER
controller_x_quorum_x_voters:
inter_x_broker_x_listener_x_name: BROKER
listeners: BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093
listener_x_security_x_protocol_x_map: CONTROLLER:SSL,BROKER:SSL
log_x_dirs: /nsm/kafka/data
log_x_retention_x_check_x_interval_x_ms: 300000
log_x_retention_x_hours: 168
log_x_segment_x_bytes: 1073741824
node_x_id:
num_x_io_x_threads: 8
num_x_network_x_threads: 3
num_x_partitions: 1
num_x_recovery_x_threads_x_per_x_data_x_dir: 1
offsets_x_topic_x_replication_x_factor: 1
process_x_roles: broker
socket_x_receive_x_buffer_x_bytes: 102400
socket_x_request_x_max_x_bytes: 104857600
socket_x_send_x_buffer_x_bytes: 102400
ssl_x_keystore_x_location: /etc/pki/kafka.jks
ssl_x_keystore_x_password: changeit
ssl_x_keystore_x_type: JKS
ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts
ssl_x_truststore_x_password: changeit
transaction_x_state_x_log_x_min_x_isr: 1
transaction_x_state_x_log_x_replication_x_factor: 1
client:
security_x_protocol: SSL
ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts
ssl_x_truststore_x_password: changeit
ssl_x_keystore_x_location: /etc/pki/kafka.jks
ssl_x_keystore_x_type: JKS
ssl_x_keystore_x_password: changeit

View File

@@ -1,64 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %}
include:
- elasticsearch.ca
- kafka.sostatus
- kafka.config
- kafka.storage
so-kafka:
docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }}
- hostname: so-kafka
- name: so-kafka
- networks:
- sobridge:
- ipv4_address: {{ DOCKER.containers['so-kafka'].ip }}
- user: kafka
- environment:
- KAFKA_HEAP_OPTS=-Xmx2G -Xms1G
- extra_hosts:
{% for node in KAFKANODES %}
- {{ node }}:{{ KAFKANODES[node].ip }}
{% endfor %}
{% if DOCKER.containers['so-kafka'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-kafka'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-kafka'].port_bindings %}
- {{ BINDING }}
{% endfor %}
- binds:
- /etc/pki/kafka.jks:/etc/pki/kafka.jks
- /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts
- /nsm/kafka/data/:/nsm/kafka/data/:rw
- /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties
- /opt/so/conf/kafka/client.properties:/kafka/config/kraft/client.properties
- watch:
{% for sc in ['server', 'client'] %}
- file: kafka_kraft_{{sc}}_properties
{% endfor %}
delete_so-kafka_so-status.disabled:
file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-kafka$
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,20 +0,0 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %}
{% set KAFKAMERGED = salt['pillar.get']('kafka', KAFKADEFAULTS.kafka, merge=True) %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% do KAFKAMERGED.config.server.update({ 'node_x_id': salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid')}) %}
{% do KAFKAMERGED.config.server.update({'advertised_x_listeners': 'BROKER://' ~ GLOBALS.node_ip ~ ':9092'}) %}
{% set nodes = salt['pillar.get']('kafka:nodes', {}) %}
{% set combined = [] %}
{% for hostname, data in nodes.items() %}
{% do combined.append(data.nodeid ~ "@" ~ hostname ~ ":9093") %}
{% endfor %}
{% set kraft_controller_quorum_voters = ','.join(combined) %}
{% do KAFKAMERGED.config.server.update({'controller_x_quorum_x_voters': kraft_controller_quorum_voters}) %}

View File

@@ -1,170 +0,0 @@
kafka:
enabled:
description: Enable or disable Kafka.
helpLink: kafka.html
cluster_id:
description: The ID of the Kafka cluster.
readonly: True
advanced: True
sensitive: True
helpLink: kafka.html
config:
server:
advertised_x_listeners:
description: Specify the list of listeners (hostname and port) that Kafka brokers provide to clients for communication.
title: advertised.listeners
helpLink: kafka.html
auto_x_create_x_topics_x_enable:
description: Enable the auto creation of topics.
title: auto.create.topics.enable
forcedType: bool
helpLink: kafka.html
controller_x_listener_x_names:
description: Set listeners used by the controller in a comma-seperated list.
title: controller.listener.names
helpLink: kafka.html
controller_x_quorum_x_voters:
description: A comma-seperated list of ID and endpoint information mapped for a set of voters.
title: controller.quorum.voters
helpLink: kafka.html
inter_x_broker_x_listener_x_name:
description: The name of the listener used for inter-broker communication.
title: inter.broker.listener.name
helpLink: kafka.html
listeners:
description: Set of URIs that is listened on and the listener names in a comma-seperated list.
helpLink: kafka.html
listener_x_security_x_protocol_x_map:
description: Comma-seperated mapping of listener name and security protocols.
title: listener.security.protocol.map
helpLink: kafka.html
log_x_dirs:
description: Where Kafka logs are stored within the Docker container.
title: log.dirs
helpLink: kafka.html
log_x_retention_x_check_x_interval_x_ms:
description: Frequency at which log files are checked if they are qualified for deletion.
title: log.retention.check.interval.ms
helpLink: kafka.html
log_x_retention_x_hours:
description: How long, in hours, a log file is kept.
title: log.retention.hours
forcedType: int
helpLink: kafka.html
log_x_segment_x_bytes:
description: The maximum allowable size for a log file.
title: log.segment.bytes
forcedType: int
helpLink: kafka.html
node_x_id:
description: The node ID corresponds to the roles performed by this process whenever process.roles is populated.
title: node.id
forcedType: int
readonly: True
helpLink: kafka.html
num_x_io_x_threads:
description: The number of threads used by Kafka.
title: num.io.threads
forcedType: int
helpLink: kafka.html
num_x_network_x_threads:
description: The number of threads used for network communication.
title: num.network.threads
forcedType: int
helpLink: kafka.html
num_x_partitions:
description: The number of log partitions assigned per topic.
title: num.partitions
forcedType: int
helpLink: kafka.html
num_x_recovery_x_threads_x_per_x_data_x_dir:
description: The number of threads used for log recuperation at startup and purging at shutdown. This ammount of threads is used per data directory.
title: num.recovery.threads.per.data.dir
forcedType: int
helpLink: kafka.html
offsets_x_topic_x_replication_x_factor:
description: The offsets topic replication factor.
title: offsets.topic.replication.factor
forcedType: int
helpLink: kafka.html
process_x_roles:
description: The roles the process performs. Use a comma-seperated list is multiple.
title: process.roles
helpLink: kafka.html
socket_x_receive_x_buffer_x_bytes:
description: Size, in bytes of the SO_RCVBUF buffer. A value of -1 will use the OS default.
title: socket.receive.buffer.bytes
#forcedType: int - soc needs to allow -1 as an int before we can use this
helpLink: kafka.html
socket_x_request_x_max_x_bytes:
description: The maximum bytes allowed for a request to the socket.
title: socket.request.max.bytes
forcedType: int
helpLink: kafka.html
socket_x_send_x_buffer_x_bytes:
description: Size, in bytes of the SO_SNDBUF buffer. A value of -1 will use the OS default.
title: socket.send.buffer.byte
#forcedType: int - soc needs to allow -1 as an int before we can use this
helpLink: kafka.html
ssl_x_keystore_x_location:
description: The key store file location within the Docker container.
title: ssl.keystore.location
helpLink: kafka.html
ssl_x_keystore_x_password:
description: The key store file password. Invalid for PEM format.
title: ssl.keystore.password
sensitive: True
helpLink: kafka.html
ssl_x_keystore_x_type:
description: The key store file format.
title: ssl.keystore.type
regex: ^(JKS|PKCS12|PEM)$
helpLink: kafka.html
ssl_x_truststore_x_location:
description: The trust store file location within the Docker container.
title: ssl.truststore.location
helpLink: kafka.html
ssl_x_truststore_x_password:
description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format.
title: ssl.truststore.password
sensitive: True
helpLink: kafka.html
transaction_x_state_x_log_x_min_x_isr:
description: Overrides min.insync.replicas for the transaction topic. When a producer configures acks to "all" (or "-1"), this setting determines the minimum number of replicas required to acknowledge a write as successful. Failure to meet this minimum triggers an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used in conjunction, min.insync.replicas and acks enable stronger durability guarantees. For instance, creating a topic with a replication factor of 3, setting min.insync.replicas to 2, and using acks of "all" ensures that the producer raises an exception if a majority of replicas fail to receive a write.
title: transaction.state.log.min.isr
forcedType: int
helpLink: kafka.html
transaction_x_state_x_log_x_replication_x_factor:
description: Set the replication factor higher for the transaction topic to ensure availability. Internal topic creation will not proceed until the cluster size satisfies this replication factor prerequisite.
title: transaction.state.log.replication.factor
forcedType: int
helpLink: kafka.html
client:
security_x_protocol:
description: 'Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT'
title: security.protocol
regex: ^(SASL_SSL|PLAINTEXT|SSL|SASL_PLAINTEXT)
helpLink: kafka.html
ssl_x_keystore_x_location:
description: The key store file location within the Docker container.
title: ssl.keystore.location
helpLink: kafka.html
ssl_x_keystore_x_password:
description: The key store file password. Invalid for PEM format.
title: ssl.keystore.password
sensitive: True
helpLink: kafka.html
ssl_x_keystore_x_type:
description: The key store file format.
title: ssl.keystore.type
regex: ^(JKS|PKCS12|PEM)$
helpLink: kafka.html
ssl_x_truststore_x_location:
description: The trust store file location within the Docker container.
title: ssl.truststore.location
helpLink: kafka.html
ssl_x_truststore_x_password:
description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format.
title: ssl.truststore.password
sensitive: True
helpLink: kafka.html

View File

@@ -1,38 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id', default=None) %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %}
{% if kafka_cluster_id is none %}
generate_kafka_cluster_id:
cmd.run:
- name: /usr/sbin/so-kafka-clusterid
{% endif %}
{% endif %}
{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #}
{% if not salt['file.file_exists']('/nsm/kafka/data/meta.properties') %}
kafka_storage_init:
cmd.run:
- name: |
docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /kafka/bin/kafka-storage.sh {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} format -t {{ kafka_cluster_id }} -c /kafka/config/kraft/newserver.properties
kafka_rm_kafkainit:
cmd.run:
- name: |
docker rm so-kafkainit
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,13 +0,0 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
# Generate a new keystore
docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srcstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -noprompt
docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks
docker rm so-kafka-keystore

View File

@@ -21,7 +21,7 @@
{% set KRATOSMERGED = salt['pillar.get']('kratos', default=KRATOSDEFAULTS.kratos, merge=true) %}
{% if KRATOSMERGED.oidc.enabled and 'odc' in salt['pillar.get']('features') %}
{% if KRATOSMERGED.oidc.enabled and 'oidc' in salt['pillar.get']('features') %}
{% do KRATOSMERGED.config.selfservice.methods.update({'oidc': {'enabled': true, 'config': {'providers': [KRATOSMERGED.oidc.config]}}}) %}
{% endif %}

View File

@@ -63,20 +63,6 @@ lspipelinedir:
- user: 931
- group: 939
# Auto-generate Logstash pipeline config
{% for pipeline, config in LOGSTASH_MERGED.pipeline_config.items() %}
{% for assigned_pipeline in ASSIGNED_PIPELINES %}
{% set custom_pipeline = 'custom/' + pipeline + '.conf' %}
{% if custom_pipeline in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %}
ls_custom_pipeline_conf_{{assigned_pipeline}}_{{pipeline}}:
file.managed:
- name: /opt/so/conf/logstash/pipelines/{{assigned_pipeline}}/{{ pipeline }}.conf
- contents: LOGSTASH_MERGED.pipeline_config.{{pipeline}}
{% endif %}
{% endfor %}
{% endfor %}
{% for assigned_pipeline in ASSIGNED_PIPELINES %}
{% for CONFIGFILE in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %}
ls_pipeline_{{assigned_pipeline}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }}:

View File

@@ -42,24 +42,6 @@ logstash:
custom2: []
custom3: []
custom4: []
pipeline_config:
custom001: |-
filter {
if [event][module] =~ "zeek" {
mutate {
add_tag => ["network_stuff"]
}
}
}
custom002: PLACEHOLDER
custom003: PLACEHOLDER
custom004: PLACEHOLDER
custom005: PLACEHOLDER
custom006: PLACEHOLDER
custom007: PLACEHOLDER
custom008: PLACEHOLDER
custom009: PLACEHOLDER
custom010: PLACEHOLDER
settings:
lsheap: 500m
config:

View File

@@ -75,10 +75,9 @@ so-logstash:
{% else %}
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
{% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode'] %}
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
- /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro
{% endif %}
{% if GLOBALS.role == 'so-eval' %}
- /nsm/zeek:/nsm/zeek:ro

View File

@@ -4,10 +4,9 @@
# Elastic License 2.0.
{% from 'logstash/map.jinja' import LOGSTASH_MERGED %}
{% from 'kafka/map.jinja' import KAFKAMERGED %}
include:
{% if LOGSTASH_MERGED.enabled and not KAFKAMERGED.enabled %}
{% if LOGSTASH_MERGED.enabled %}
- logstash.enabled
{% else %}
- logstash.disabled

View File

@@ -1,35 +0,0 @@
{% set kafka_brokers = salt['pillar.get']('logstash:nodes:receiver', {}) %}
{% set kafka_on_mngr = salt ['pillar.get']('logstash:nodes:manager', {}) %}
{% set broker_ips = [] %}
{% for node, node_data in kafka_brokers.items() %}
{% do broker_ips.append(node_data['ip'] + ":9092") %}
{% endfor %}
{% for node, node_data in kafka_on_mngr.items() %}
{% do broker_ips.append(node_data['ip'] + ":9092") %}
{% endfor %}
{% set bootstrap_servers = "','".join(broker_ips) %}
input {
kafka {
codec => json
topics => ['default-logs', 'kratos-logs', 'soc-logs', 'strelka-logs', 'suricata-logs', 'zeek-logs']
group_id => 'searchnodes'
client_id => '{{ GLOBALS.hostname }}'
security_protocol => 'SSL'
bootstrap_servers => '{{ bootstrap_servers }}'
ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12'
ssl_keystore_password => 'changeit'
ssl_keystore_type => 'PKCS12'
ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts'
ssl_truststore_password => 'changeit'
decorate_events => true
tags => [ "elastic-agent", "input-{{ GLOBALS.hostname}}", "kafka" ]
}
}
filter {
if ![metadata] {
mutate {
rename => { "@metadata" => "metadata" }
}
}
}

View File

@@ -31,22 +31,6 @@ logstash:
custom2: *defined_pipelines
custom3: *defined_pipelines
custom4: *defined_pipelines
pipeline_config:
custom001: &pipeline_config
description: Pipeline configuration for Logstash
advanced: True
multiline: True
forcedType: string
helpLink: logstash.html
custom002: *pipeline_config
custom003: *pipeline_config
custom004: *pipeline_config
custom005: *pipeline_config
custom006: *pipeline_config
custom007: *pipeline_config
custom008: *pipeline_config
custom009: *pipeline_config
custom010: *pipeline_config
settings:
lsheap:
description: Heap size to use for logstash

View File

@@ -1,2 +0,0 @@
https://repo.securityonion.net/file/so-repo/prod/2.4/oracle/9
https://repo-alt.securityonion.net/prod/2.4/oracle/9

View File

@@ -1,13 +0,0 @@
[main]
gpgcheck=1
installonly_limit=3
clean_requirements_on_remove=True
best=True
skip_if_unavailable=False
cachedir=/opt/so/conf/reposync/cache
keepcache=0
[securityonionsync]
name=Security Onion Repo repo
mirrorlist=file:///opt/so/conf/reposync/mirror.txt
enabled=1
gpgcheck=1

View File

@@ -1,5 +1,5 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
@@ -27,15 +27,6 @@ repo_log_dir:
- user
- group
agents_log_dir:
file.directory:
- name: /opt/so/log/agents
- user: root
- group: root
- recurse:
- user
- group
yara_log_dir:
file.directory:
- name: /opt/so/log/yarasync
@@ -70,7 +61,7 @@ manager_sbin:
- user: 939
- group: 939
- file_mode: 755
- exclude_pat:
- exclude_pat:
- "*_test.py"
yara_update_scripts:
@@ -84,20 +75,6 @@ yara_update_scripts:
- defaults:
EXCLUDEDRULES: {{ STRELKAMERGED.rules.excluded }}
so-repo-file:
file.managed:
- name: /opt/so/conf/reposync/repodownload.conf
- source: salt://manager/files/repodownload.conf
- user: socore
- group: socore
so-repo-mirrorlist:
file.managed:
- name: /opt/so/conf/reposync/mirror.txt
- source: salt://manager/files/mirror.txt
- user: socore
- group: socore
so-repo-sync:
{% if MANAGERMERGED.reposync.enabled %}
cron.present:
@@ -110,17 +87,6 @@ so-repo-sync:
- hour: '{{ MANAGERMERGED.reposync.hour }}'
- minute: '{{ MANAGERMERGED.reposync.minute }}'
so_fleetagent_status:
cron.present:
- name: /usr/sbin/so-elasticagent-status > /opt/so/log/agents/agentstatus.log 2>&1
- identifier: so_fleetagent_status
- user: root
- minute: '*/5'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
socore_own_saltstack:
file.directory:
- name: /opt/so/saltstack
@@ -137,6 +103,55 @@ rules_dir:
- group: socore
- makedirs: True
{% if STRELKAMERGED.rules.enabled %}
strelkarepos:
file.managed:
- name: /opt/so/conf/strelka/repos.txt
- source: salt://strelka/rules/repos.txt.jinja
- template: jinja
- defaults:
STRELKAREPOS: {{ STRELKAMERGED.rules.repos }}
- makedirs: True
strelka-yara-update:
{% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %}
cron.present:
{% else %}
cron.absent:
{% endif %}
- user: socore
- name: '/usr/sbin/so-yara-update >> /opt/so/log/yarasync/yara-update.log 2>&1'
- identifier: strelka-yara-update
- hour: '7'
- minute: '1'
strelka-yara-download:
{% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %}
cron.present:
{% else %}
cron.absent:
{% endif %}
- user: socore
- name: '/usr/sbin/so-yara-download >> /opt/so/log/yarasync/yara-download.log 2>&1'
- identifier: strelka-yara-download
- hour: '7'
- minute: '1'
{% if not GLOBALS.airgap %}
update_yara_rules:
cmd.run:
- name: /usr/sbin/so-yara-update
- onchanges:
- file: yara_update_scripts
download_yara_rules:
cmd.run:
- name: /usr/sbin/so-yara-download
- onchanges:
- file: yara_update_scripts
{% endif %}
{% endif %}
{% else %}
{{sls}}_state_not_allowed:

View File

@@ -20,6 +20,10 @@ manager:
description: String of hosts to ignore the proxy settings for.
global: True
helpLink: proxy.html
playbook:
description: Enable playbook 1=enabled 0=disabled.
global: True
helpLink: playbook.html
proxy:
description: Proxy server to use for updates.
global: True

View File

@@ -1,29 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
### THIS SCRIPT AND SALT STATE REFERENCES TO THIS SCRIPT TO BE REMOVED ONCE INITIAL TESTING IS DONE - THESE VALUES WILL GENERATED IN SETUP AND SOUP
local_salt_dir=/opt/so/saltstack/local
if [[ -f /usr/sbin/so-common ]]; then
source /usr/sbin/so-common
else
source $(dirname $0)/../../../common/tools/sbin/so-common
fi
if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
kafka_cluster_id=$(get_random_value 22)
echo 'kafka: ' > $local_salt_dir/pillar/kafka/soc_kafka.sls
echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls
if ! grep -q "^ kafkapass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
kafkapass=$(get_random_value)
echo ' kafkapass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls
fi

View File

@@ -79,32 +79,6 @@ function getinstallinfo() {
source <(echo $INSTALLVARS)
}
function pcapspace() {
if [[ "$OPERATION" == "setup" ]]; then
# Use 25% for PCAP
PCAP_PERCENTAGE=1
DFREEPERCENT=21
local SPACESIZE=$(df -k /nsm | tail -1 | awk '{print $2}' | tr -d \n)
else
local NSMSIZE=$(salt "$MINION_ID" disk.usage --out=json | jq -r '.[]."/nsm"."1K-blocks" ')
local ROOTSIZE=$(salt "$MINION_ID" disk.usage --out=json | jq -r '.[]."/"."1K-blocks" ')
if [[ "$NSMSIZE" == "null" ]]; then
# Looks like there is no dedicated nsm partition. Using root
local SPACESIZE=$ROOTSIZE
else
local SPACESIZE=$NSMSIZE
fi
fi
local s=$(( $SPACESIZE / 1000000 ))
local s1=$(( $s / 4 * $PCAP_PERCENTAGE ))
MAX_PCAP_SPACE=$s1
}
function testMinion() {
# Always run on the host, since this is going to be the manager of a distributed grid, or an eval/standalone.
# Distributed managers must run this in order for the sensor nodes to have access to the so-tcpreplay image.
@@ -270,10 +244,6 @@ function add_sensor_to_minion() {
echo " lb_procs: '$CORECOUNT'" >> $PILLARFILE
echo "suricata:" >> $PILLARFILE
echo " enabled: True " >> $PILLARFILE
if [[ $is_pcaplimit ]]; then
echo " pcap:" >> $PILLARFILE
echo " maxsize: $MAX_PCAP_SPACE" >> $PILLARFILE
fi
echo " config:" >> $PILLARFILE
echo " af-packet:" >> $PILLARFILE
echo " threads: '$CORECOUNT'" >> $PILLARFILE
@@ -281,11 +251,17 @@ function add_sensor_to_minion() {
echo " enabled: True" >> $PILLARFILE
if [[ $is_pcaplimit ]]; then
echo " config:" >> $PILLARFILE
echo " diskfreepercentage: $DFREEPERCENT" >> $PILLARFILE
echo " diskfreepercentage: 60" >> $PILLARFILE
fi
echo " " >> $PILLARFILE
}
function add_playbook_to_minion() {
printf '%s\n'\
"playbook:"\
" enabled: True"\
" " >> $PILLARFILE
}
function add_elastalert_to_minion() {
printf '%s\n'\
@@ -347,6 +323,13 @@ function add_nginx_to_minion() {
" " >> $PILLARFILE
}
function add_soctopus_to_minion() {
printf '%s\n'\
"soctopus:"\
" enabled: True"\
" " >> $PILLARFILE
}
function add_soc_to_minion() {
printf '%s\n'\
"soc:"\
@@ -361,6 +344,13 @@ function add_registry_to_minion() {
" " >> $PILLARFILE
}
function add_mysql_to_minion() {
printf '%s\n'\
"mysql:"\
" enabled: True"\
" " >> $PILLARFILE
}
function add_kratos_to_minion() {
printf '%s\n'\
"kratos:"\
@@ -432,17 +422,19 @@ function updateMine() {
function createEVAL() {
is_pcaplimit=true
pcapspace
add_elasticsearch_to_minion
add_sensor_to_minion
add_strelka_to_minion
add_playbook_to_minion
add_elastalert_to_minion
add_kibana_to_minion
add_telegraf_to_minion
add_influxdb_to_minion
add_nginx_to_minion
add_soctopus_to_minion
add_soc_to_minion
add_registry_to_minion
add_mysql_to_minion
add_kratos_to_minion
add_idstools_to_minion
add_elastic_fleet_package_registry_to_minion
@@ -450,19 +442,21 @@ function createEVAL() {
function createSTANDALONE() {
is_pcaplimit=true
pcapspace
add_elasticsearch_to_minion
add_logstash_to_minion
add_sensor_to_minion
add_strelka_to_minion
add_playbook_to_minion
add_elastalert_to_minion
add_kibana_to_minion
add_redis_to_minion
add_telegraf_to_minion
add_influxdb_to_minion
add_nginx_to_minion
add_soctopus_to_minion
add_soc_to_minion
add_registry_to_minion
add_mysql_to_minion
add_kratos_to_minion
add_idstools_to_minion
add_elastic_fleet_package_registry_to_minion
@@ -471,14 +465,17 @@ function createSTANDALONE() {
function createMANAGER() {
add_elasticsearch_to_minion
add_logstash_to_minion
add_playbook_to_minion
add_elastalert_to_minion
add_kibana_to_minion
add_redis_to_minion
add_telegraf_to_minion
add_influxdb_to_minion
add_nginx_to_minion
add_soctopus_to_minion
add_soc_to_minion
add_registry_to_minion
add_mysql_to_minion
add_kratos_to_minion
add_idstools_to_minion
add_elastic_fleet_package_registry_to_minion
@@ -487,14 +484,17 @@ function createMANAGER() {
function createMANAGERSEARCH() {
add_elasticsearch_to_minion
add_logstash_to_minion
add_playbook_to_minion
add_elastalert_to_minion
add_kibana_to_minion
add_redis_to_minion
add_telegraf_to_minion
add_influxdb_to_minion
add_nginx_to_minion
add_soctopus_to_minion
add_soc_to_minion
add_registry_to_minion
add_mysql_to_minion
add_kratos_to_minion
add_idstools_to_minion
add_elastic_fleet_package_registry_to_minion
@@ -531,9 +531,6 @@ function createIDH() {
function createHEAVYNODE() {
is_pcaplimit=true
PCAP_PERCENTAGE=1
DFREEPERCENT=21
pcapspace
add_elasticsearch_to_minion
add_elastic_agent_to_minion
add_logstash_to_minion
@@ -544,10 +541,6 @@ function createHEAVYNODE() {
}
function createSENSOR() {
is_pcaplimit=true
DFREEPERCENT=10
PCAP_PERCENTAGE=3
pcapspace
add_sensor_to_minion
add_strelka_to_minion
add_telegraf_to_minion

View File

@@ -7,8 +7,12 @@
NOROOT=1
. /usr/sbin/so-common
set_version
set_os
salt_minion_count
set -e
curl --retry 5 --retry-delay 60 -A "reposync/$(sync_options)" https://sigs.securityonion.net/checkup --output /tmp/checkup
curl --retry 5 --retry-delay 60 -A "reposync/$VERSION/$OS/$(uname -r)/$MINIONCOUNT" https://sigs.securityonion.net/checkup --output /tmp/checkup
dnf reposync --norepopath -g --delete -m -c /opt/so/conf/reposync/repodownload.conf --repoid=securityonionsync --download-metadata -p /nsm/repo/
createrepo /nsm/repo

View File

@@ -47,7 +47,7 @@ got_root(){
got_root
if [ $# -ne 1 ] ; then
BRANCH=2.4/main
BRANCH=master
else
BRANCH=$1
fi

View File

@@ -347,7 +347,7 @@ function syncElastic() {
[[ $? != 0 ]] && fail "Unable to read credential hashes from database"
user_data_formatted=$(echo "${userData}" | jq -r '.user + ":" + .data.hashed_password')
if lookup_salt_value "features" "" "pillar" | grep -x odc; then
if lookup_salt_value "licensed_features" "" "pillar" | grep -x oidc; then
# generate random placeholder salt/hash for users without passwords
random_crypt=$(get_random_value 53)
user_data_formatted=$(echo "${user_data_formatted}" | sed -r "s/^(.+:)\$/\\1\$2a\$12${random_crypt}/")

View File

@@ -16,17 +16,12 @@ lockFile = "/tmp/so-yaml.lock"
def showUsage(args):
print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]))
print(' General commands:')
print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.')
print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.')
print(' remove - Removes a yaml key, if it exists. Requires KEY arg.')
print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.')
print(' help - Prints this usage information.')
print('')
print(' Where:')
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml')
print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2')
print(' VALUE - Value to set for a given key')
print(' LISTITEM - Item to append to a given key\'s list value')
sys.exit(1)
@@ -41,90 +36,6 @@ def writeYaml(filename, content):
return yaml.dump(content, file)
def appendItem(content, key, listItem):
pieces = key.split(".", 1)
if len(pieces) > 1:
appendItem(content[pieces[0]], pieces[1], listItem)
else:
try:
content[key].append(listItem)
except AttributeError:
print("The existing value for the given key is not a list. No action was taken on the file.")
return 1
except KeyError:
print("The key provided does not exist. No action was taken on the file.")
return 1
def convertType(value):
if len(value) > 0 and (not value.startswith("0") or len(value) == 1):
if "." in value:
try:
value = float(value)
return value
except ValueError:
pass
try:
value = int(value)
return value
except ValueError:
pass
lowered_value = value.lower()
if lowered_value == "false":
return False
elif lowered_value == "true":
return True
return value
def append(args):
if len(args) != 3:
print('Missing filename, key arg, or list item to append', file=sys.stderr)
showUsage(None)
return
filename = args[0]
key = args[1]
listItem = args[2]
content = loadYaml(filename)
appendItem(content, key, convertType(listItem))
writeYaml(filename, content)
return 0
def addKey(content, key, value):
pieces = key.split(".", 1)
if len(pieces) > 1:
if not pieces[0] in content:
content[pieces[0]] = {}
addKey(content[pieces[0]], pieces[1], value)
elif key in content:
raise KeyError("key already exists")
else:
content[key] = value
def add(args):
if len(args) != 3:
print('Missing filename, key arg, and/or value', file=sys.stderr)
showUsage(None)
return
filename = args[0]
key = args[1]
value = args[2]
content = loadYaml(filename)
addKey(content, key, convertType(value))
writeYaml(filename, content)
return 0
def removeKey(content, key):
pieces = key.split(".", 1)
if len(pieces) > 1:
@@ -149,24 +60,6 @@ def remove(args):
return 0
def replace(args):
if len(args) != 3:
print('Missing filename, key arg, and/or value', file=sys.stderr)
showUsage(None)
return
filename = args[0]
key = args[1]
value = args[2]
content = loadYaml(filename)
removeKey(content, key)
addKey(content, key, convertType(value))
writeYaml(filename, content)
return 0
def main():
args = sys.argv[1:]
@@ -176,10 +69,7 @@ def main():
commands = {
"help": showUsage,
"add": add,
"append": append,
"remove": remove,
"replace": replace,
}
code = 1

View File

@@ -42,14 +42,6 @@ class TestRemove(unittest.TestCase):
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Usage:")
def test_remove_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.remove(["file"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")
def test_remove(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
@@ -113,250 +105,3 @@ class TestRemove(unittest.TestCase):
self.assertEqual(actual, expected)
sysmock.assert_called_once_with(1)
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")
def test_append_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.append(["file", "key"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, or list item to append\n")
def test_append(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}")
file.close()
soyaml.append([filename, "key3", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2: abc\nkey2: false\nkey3:\n- a\n- b\n- c\n- d\n"
self.assertEqual(actual, expected)
def test_append_nested(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.append([filename, "key1.child2", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n - a\n - b\n - c\n - d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_append_nested_deep(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.append([filename, "key1.child2.deep2", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n deep1: 45\n deep2:\n - a\n - b\n - c\n - d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_append_key_noexist(self):
filename = "/tmp/so-yaml_test-append.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stdout', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "append", filename, "key4", "h"]
soyaml.main()
sysmock.assert_called()
self.assertEqual(mock_stdout.getvalue(), "The key provided does not exist. No action was taken on the file.\n")
def test_append_key_noexist_deep(self):
filename = "/tmp/so-yaml_test-append.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stdout', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "append", filename, "key1.child2.deep3", "h"]
soyaml.main()
sysmock.assert_called()
self.assertEqual(mock_stdout.getvalue(), "The key provided does not exist. No action was taken on the file.\n")
def test_append_key_nonlist(self):
filename = "/tmp/so-yaml_test-append.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stdout', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "append", filename, "key1", "h"]
soyaml.main()
sysmock.assert_called()
self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n")
def test_append_key_nonlist_deep(self):
filename = "/tmp/so-yaml_test-append.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stdout', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "append", filename, "key1.child2.deep1", "h"]
soyaml.main()
sysmock.assert_called()
self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n")
def test_add_key(self):
content = {}
soyaml.addKey(content, "foo", 123)
self.assertEqual(content, {"foo": 123})
try:
soyaml.addKey(content, "foo", "bar")
self.assertFail("expected key error since key already exists")
except KeyError:
pass
try:
soyaml.addKey(content, "foo.bar", 123)
self.assertFail("expected type error since key parent value is not a map")
except TypeError:
pass
content = {}
soyaml.addKey(content, "foo", "bar")
self.assertEqual(content, {"foo": "bar"})
soyaml.addKey(content, "badda.badda", "boom")
self.assertEqual(content, {"foo": "bar", "badda": {"badda": "boom"}})
def test_add_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.add(["file", "key"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n")
def test_add(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}")
file.close()
soyaml.add([filename, "key4", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2: abc\nkey2: false\nkey3:\n- a\n- b\n- c\nkey4: d\n"
self.assertEqual(actual, expected)
def test_add_nested(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.add([filename, "key1.child3", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n - a\n - b\n - c\n child3: d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_add_nested_deep(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.add([filename, "key1.child2.deep2", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n deep1: 45\n deep2: d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_replace_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.replace(["file", "key"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n")
def test_replace(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}")
file.close()
soyaml.replace([filename, "key2", True])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2: abc\nkey2: true\nkey3:\n- a\n- b\n- c\n"
self.assertEqual(actual, expected)
def test_replace_nested(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.replace([filename, "key1.child2", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2: d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_replace_nested_deep(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.replace([filename, "key1.child2.deep1", 46])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n deep1: 46\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_convert(self):
self.assertEqual(soyaml.convertType("foo"), "foo")
self.assertEqual(soyaml.convertType("foo.bar"), "foo.bar")
self.assertEqual(soyaml.convertType("123"), 123)
self.assertEqual(soyaml.convertType("0"), 0)
self.assertEqual(soyaml.convertType("00"), "00")
self.assertEqual(soyaml.convertType("0123"), "0123")
self.assertEqual(soyaml.convertType("123.456"), 123.456)
self.assertEqual(soyaml.convertType("0123.456"), "0123.456")
self.assertEqual(soyaml.convertType("true"), True)
self.assertEqual(soyaml.convertType("TRUE"), True)
self.assertEqual(soyaml.convertType("false"), False)
self.assertEqual(soyaml.convertType("FALSE"), False)
self.assertEqual(soyaml.convertType(""), "")

View File

@@ -229,7 +229,7 @@ check_local_mods() {
# {% endraw %}
check_pillar_items() {
local pillar_output=$(salt-call pillar.items -lerror --out=json)
local pillar_output=$(salt-call pillar.items --out=json)
cond=$(jq '.local | has("_errors")' <<< "$pillar_output")
if [[ "$cond" == "true" ]]; then
@@ -247,6 +247,67 @@ check_sudoers() {
fi
}
check_log_size_limit() {
local num_minion_pillars
num_minion_pillars=$(find /opt/so/saltstack/local/pillar/minions/ -type f | wc -l)
if [[ $num_minion_pillars -gt 1 ]]; then
if find /opt/so/saltstack/local/pillar/minions/ -type f | grep -q "_heavynode"; then
lsl_msg='distributed'
fi
else
local minion_id
minion_id=$(lookup_salt_value "id" "" "grains" "" "local")
local minion_arr
IFS='_' read -ra minion_arr <<< "$minion_id"
local node_type="${minion_arr[0]}"
local current_limit
# since it is possible for the salt-master service to be stopped when this is run, we need to check the pillar values locally
# we need to combine default local and default pillars before doing this so we can define --pillar-root in salt-call
local epoch_date=$(date +%s%N)
mkdir -vp /opt/so/saltstack/soup_tmp_${epoch_date}/
cp -r /opt/so/saltstack/default/pillar/ /opt/so/saltstack/soup_tmp_${epoch_date}/
# use \cp here to overwrite any pillar files from default with those in local for the tmp directory
\cp -r /opt/so/saltstack/local/pillar/ /opt/so/saltstack/soup_tmp_${epoch_date}/
current_limit=$(salt-call pillar.get elasticsearch:log_size_limit --local --pillar-root=/opt/so/saltstack/soup_tmp_${epoch_date}/pillar --out=newline_values_only)
rm -rf /opt/so/saltstack/soup_tmp_${epoch_date}/
local percent
case $node_type in
'standalone' | 'eval')
percent=50
;;
*)
percent=80
;;
esac
local disk_dir="/"
if [ -d /nsm ]; then
disk_dir="/nsm"
fi
local disk_size_1k
disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}')
local ratio="1048576"
local disk_size_gb
disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' )
local new_limit
new_limit=$( echo "$disk_size_gb" "$percent" | awk '{printf("%.0f", $1 * ($2/100))}')
if [[ $current_limit != "$new_limit" ]]; then
lsl_msg='single-node'
lsl_details=( "$current_limit" "$new_limit" "$minion_id" )
fi
fi
}
check_os_updates() {
# Check to see if there are OS updates
echo "Checking for OS updates."
@@ -311,17 +372,6 @@ enable_highstate() {
echo ""
}
get_soup_script_hashes() {
CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}')
GITSOUP=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/soup | awk '{print $1}')
CURRENTCMN=$(md5sum /usr/sbin/so-common | awk '{print $1}')
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
CURRENTIMGCMN=$(md5sum /usr/sbin/so-image-common | awk '{print $1}')
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}')
GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/so-firewall | awk '{print $1}')
}
highstate() {
# Run a highstate.
salt-call state.highstate -l info queue=True
@@ -355,9 +405,6 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.10 ]] && up_to_2.4.20
[[ "$INSTALLEDVERSION" == 2.4.20 ]] && up_to_2.4.30
[[ "$INSTALLEDVERSION" == 2.4.30 ]] && up_to_2.4.40
[[ "$INSTALLEDVERSION" == 2.4.40 ]] && up_to_2.4.50
[[ "$INSTALLEDVERSION" == 2.4.50 ]] && up_to_2.4.60
[[ "$INSTALLEDVERSION" == 2.4.60 ]] && up_to_2.4.70
true
}
@@ -372,9 +419,6 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
[[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30
[[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40
[[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50
[[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60
[[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
true
}
@@ -426,40 +470,6 @@ post_to_2.4.40() {
POSTVERSION=2.4.40
}
post_to_2.4.50() {
echo "Nothing to apply"
POSTVERSION=2.4.50
}
post_to_2.4.60() {
echo "Regenerating Elastic Agent Installers..."
so-elastic-agent-gen-installers
POSTVERSION=2.4.60
}
post_to_2.4.70() {
# Global pipeline changes to REDIS or KAFKA
echo "Removing global.pipeline pillar configuration"
sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls
# Kafka configuration
mkdir -p /opt/so/saltstack/local/pillar/kafka
touch /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls
echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
kafka_cluster_id=$(get_random_value 22)
echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls
if ! grep -q "^ certpass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
kafkapass=$(get_random_value)
echo ' certpass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls
fi
POSTVERSION=2.4.70
}
repo_sync() {
echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
@@ -560,155 +570,6 @@ up_to_2.4.40() {
INSTALLEDVERSION=2.4.40
}
up_to_2.4.50() {
echo "Creating additional pillars.."
mkdir -p /opt/so/saltstack/local/pillar/stig/
mkdir -p /opt/so/saltstack/local/salt/stig/
chown socore:socore /opt/so/saltstack/local/salt/stig/
touch /opt/so/saltstack/local/pillar/stig/adv_stig.sls
touch /opt/so/saltstack/local/pillar/stig/soc_stig.sls
# the file_roots need to be update due to salt 3006.6 upgrade not allowing symlinks outside the file_roots
# put new so-yaml in place
echo "Updating so-yaml"
\cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" "$DEFAULT_SALT_DIR/salt/manager/tools/sbin/"
\cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" /usr/sbin/
echo "Creating a backup of the salt-master config."
# INSTALLEDVERSION is 2.4.40 at this point, but we want the backup to have the version
# so was at prior to starting upgrade. use POSTVERSION here since it doesnt change until
# post upgrade changes. POSTVERSION set to INSTALLEDVERSION at start of soup
cp -v /etc/salt/master "/etc/salt/master.so-$POSTVERSION.bak"
echo "Adding /opt/so/rules to file_roots in /etc/salt/master using so-yaml"
so-yaml.py append /etc/salt/master file_roots.base /opt/so/rules/nids
echo "Moving Suricata rules"
mkdir /opt/so/rules/nids/suri
chown socore:socore /opt/so/rules/nids/suri
mv -v /opt/so/rules/nids/*.rules /opt/so/rules/nids/suri/.
echo "Adding /nsm/elastic-fleet/artifacts to file_roots in /etc/salt/master using so-yaml"
so-yaml.py append /etc/salt/master file_roots.base /nsm/elastic-fleet/artifacts
INSTALLEDVERSION=2.4.50
}
up_to_2.4.60() {
echo "Creating directory to store Suricata classification.config"
mkdir -vp /opt/so/saltstack/local/salt/suricata/classification
chown socore:socore /opt/so/saltstack/local/salt/suricata/classification
INSTALLEDVERSION=2.4.60
}
up_to_2.4.70() {
playbook_migration
toggle_telemetry
INSTALLEDVERSION=2.4.70
}
toggle_telemetry() {
if [[ -z $UNATTENDED && $is_airgap -ne 0 ]]; then
cat << ASSIST_EOF
--------------- SOC Telemetry ---------------
The Security Onion development team could use your help! Enabling SOC
Telemetry will help the team understand which UI features are being
used and enables informed prioritization of future development.
Adjust this setting at anytime via the SOC Configuration screen.
Documentation: https://docs.securityonion.net/en/2.4/telemetry.html
ASSIST_EOF
echo -n "Continue the upgrade with SOC Telemetry enabled [Y/n]? "
read -r input
input=$(echo "${input,,}" | xargs echo -n)
echo ""
if [[ ${#input} -eq 0 || "$input" == "yes" || "$input" == "y" || "$input" == "yy" ]]; then
echo "Thank you for helping improve Security Onion!"
else
if so-yaml.py replace /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.telemetryEnabled false; then
echo "Disabled SOC Telemetry."
else
fail "Failed to disable SOC Telemetry; aborting."
fi
fi
echo ""
fi
}
playbook_migration() {
# Start SOC Detections migration
mkdir -p /nsm/backup/detections-migration/{suricata,sigma/rules,elastalert}
# Remove cronjobs
crontab -l | grep -v 'so-playbook-sync_cron' | crontab -
crontab -l | grep -v 'so-playbook-ruleupdate_cron' | crontab -
if grep -A 1 'playbook:' /opt/so/saltstack/local/pillar/minions/* | grep -q 'enabled: True'; then
# Check for active Elastalert rules
active_rules_count=$(find /opt/so/rules/elastalert/playbook/ -type f -name "*.yaml" | wc -l)
if [[ "$active_rules_count" -gt 0 ]]; then
# Prompt the user to AGREE if active Elastalert rules found
echo
echo "$active_rules_count Active Elastalert/Playbook rules found."
echo "In preparation for the new Detections module, they will be backed up and then disabled."
echo
echo "If you would like to proceed, then type AGREE and press ENTER."
echo
# Read user input
read INPUT
if [ "${INPUT^^}" != 'AGREE' ]; then fail "SOUP canceled."; fi
echo "Backing up the Elastalert rules..."
rsync -av --stats /opt/so/rules/elastalert/playbook/*.yaml /nsm/backup/detections-migration/elastalert/
# Verify that rsync completed successfully
if [[ $? -eq 0 ]]; then
# Delete the Elastlaert rules
rm -f /opt/so/rules/elastalert/playbook/*.yaml
echo "Active Elastalert rules have been backed up."
else
fail "Error: rsync failed to copy the files. Active Elastalert rules have not been backed up."
fi
fi
echo
echo "Exporting Sigma rules from Playbook..."
MYSQLPW=$(awk '/mysql:/ {print $2}' /opt/so/saltstack/local/pillar/secrets.sls)
docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT id, value FROM custom_values WHERE value LIKE '%View Sigma%'\"" | while read -r id value; do
echo -e "$value" > "/nsm/backup/detections-migration/sigma/rules/$id.yaml"
done || fail "Failed to export Sigma rules..."
echo
echo "Exporting Sigma Filters from Playbook..."
docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT issues.subject as title, custom_values.value as filter FROM issues JOIN custom_values ON issues.id = custom_values.customized_id WHERE custom_values.value LIKE '%sofilter%'\"" > /nsm/backup/detections-migration/sigma/custom-filters.txt || fail "Failed to export Custom Sigma Filters."
echo
echo "Backing up Playbook database..."
docker exec so-mysql sh -c "mysqldump -uroot -p${MYSQLPW} --databases playbook > /tmp/playbook-dump" || fail "Failed to dump Playbook database."
docker cp so-mysql:/tmp/playbook-dump /nsm/backup/detections-migration/sigma/playbook-dump.sql || fail "Failed to backup Playbook database."
fi
echo
echo "Stopping Playbook services & cleaning up..."
for container in so-playbook so-mysql so-soctopus; do
if [ -n "$(docker ps -q -f name=^${container}$)" ]; then
docker stop $container
fi
done
sed -i '/so-playbook\|so-soctopus\|so-mysql/d' /opt/so/conf/so-status/so-status.conf
rm -f /usr/sbin/so-playbook-* /usr/sbin/so-soctopus-* /usr/sbin/so-mysql-*
echo
echo "Playbook Migration is complete...."
}
determine_elastic_agent_upgrade() {
if [[ $is_airgap -eq 0 ]]; then
update_elastic_agent_airgap
@@ -756,10 +617,6 @@ update_airgap_rules() {
if [ -d /nsm/repo/rules/sigma ]; then
rsync -av $UPDATE_DIR/agrules/sigma/* /nsm/repo/rules/sigma/
fi
# SOC Detections Airgap
rsync -av $UPDATE_DIR/agrules/detect-sigma/* /nsm/rules/detect-sigma/
rsync -av $UPDATE_DIR/agrules/detect-yara/* /nsm/rules/detect-yara/
}
update_airgap_repo() {
@@ -885,29 +742,31 @@ upgrade_salt() {
}
verify_latest_update_script() {
get_soup_script_hashes
# Check to see if the update scripts match. If not run the new one.
CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}')
GITSOUP=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/soup | awk '{print $1}')
CURRENTCMN=$(md5sum /usr/sbin/so-common | awk '{print $1}')
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
CURRENTIMGCMN=$(md5sum /usr/sbin/so-image-common | awk '{print $1}')
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}')
GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/so-firewall | awk '{print $1}')
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
echo "This version of the soup script is up to date. Proceeding."
else
echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete."
salt-call state.apply common.soup_scripts queue=True -lerror --file-root=$UPDATE_DIR/salt --local --out-file=/dev/null
# Verify that soup scripts updated as expected
get_soup_script_hashes
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
echo "Succesfully updated soup scripts."
else
echo "There was a problem updating soup scripts. Trying to rerun script update."
salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
fi
cp $UPDATE_DIR/salt/manager/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/manager/tools/sbin/so-firewall $DEFAULT_SALT_DIR/salt/common/tools/sbin/
salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
echo ""
echo "The soup script has been modified. Please run soup again to continue the upgrade."
exit 0
fi
}
# Keeping this block in case we need to do a hotfix that requires salt update
apply_hotfix() {
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
@@ -972,6 +831,7 @@ main() {
echo "### Preparing soup at $(date) ###"
echo ""
set_os
check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
@@ -1049,6 +909,9 @@ main() {
systemctl_func "stop" "$cron_service_name"
# update mine items prior to stopping salt-minion and salt-master
update_salt_mine
echo "Updating dockers to $NEWVERSION."
if [[ $is_airgap -eq 0 ]]; then
airgap_update_dockers
@@ -1124,9 +987,6 @@ main() {
salt-call state.apply salt.minion -l info queue=True
echo ""
# ensure the mine is updated and populated before highstates run, following the salt-master restart
update_salt_mine
enable_highstate
echo ""

0
salt/manager/tools/sbin_jinja/so-yara-update Normal file → Executable file
View File

89
salt/mysql/config.sls Normal file
View File

@@ -0,0 +1,89 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% set MYSQLPASS = salt['pillar.get']('secrets:mysql') %}
# MySQL Setup
mysqlpkgs:
pkg.removed:
- skip_suggestions: False
- pkgs:
{% if grains['os_family'] != 'RedHat' %}
- python3-mysqldb
{% else %}
- python3-mysqlclient
{% endif %}
mysqletcdir:
file.directory:
- name: /opt/so/conf/mysql/etc
- user: 939
- group: 939
- makedirs: True
mysqlpiddir:
file.directory:
- name: /opt/so/conf/mysql/pid
- user: 939
- group: 939
- makedirs: True
mysqlcnf:
file.managed:
- name: /opt/so/conf/mysql/etc/my.cnf
- source: salt://mysql/etc/my.cnf
- user: 939
- group: 939
mysqlpass:
file.managed:
- name: /opt/so/conf/mysql/etc/mypass
- source: salt://mysql/etc/mypass
- user: 939
- group: 939
- template: jinja
- defaults:
MYSQLPASS: {{ MYSQLPASS }}
mysqllogdir:
file.directory:
- name: /opt/so/log/mysql
- user: 939
- group: 939
- makedirs: True
mysqldatadir:
file.directory:
- name: /nsm/mysql
- user: 939
- group: 939
- makedirs: True
mysql_sbin:
file.recurse:
- name: /usr/sbin
- source: salt://mysql/tools/sbin
- user: 939
- group: 939
- file_mode: 755
#mysql_sbin_jinja:
# file.recurse:
# - name: /usr/sbin
# - source: salt://mysql/tools/sbin_jinja
# - user: 939
# - group: 939
# - file_mode: 755
# - template: jinja
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

2
salt/mysql/defaults.yaml Normal file
View File

@@ -0,0 +1,2 @@
mysql:
enabled: False

27
salt/mysql/disabled.sls Normal file
View File

@@ -0,0 +1,27 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
include:
- mysql.sostatus
so-mysql:
docker_container.absent:
- force: True
so-mysql_so-status.disabled:
file.comment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-mysql$
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

Some files were not shown because too many files have changed in this diff Show More