mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-01-24 08:53:27 +01:00
Compare commits
203 Commits
8abd4c9c78
...
reyesj2/pa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
55b3fa389e | ||
|
|
b3ae716929 | ||
|
|
30d8cf5a6c | ||
|
|
07dbdb9f8f | ||
|
|
b4c8f7924a | ||
|
|
809422c517 | ||
|
|
bb7593a53a | ||
|
|
8e3ba8900f | ||
|
|
005ec87248 | ||
|
|
4c6ff0641b | ||
|
|
3e242913e9 | ||
|
|
ba68e3c9bd | ||
|
|
e1199a91b9 | ||
|
|
d381248e30 | ||
|
|
f4f0218cae | ||
|
|
7a38e52b01 | ||
|
|
959fd55e32 | ||
|
|
a8e218a9ff | ||
|
|
3f5cd46d7d | ||
|
|
627f0c2bcc | ||
|
|
f6bde3eb04 | ||
|
|
f6e95c17a0 | ||
|
|
1234cbd04b | ||
|
|
fd5b93542e | ||
|
|
a192455fae | ||
|
|
66f17e95aa | ||
|
|
6f4b96b61b | ||
|
|
9905d23976 | ||
|
|
17532fe49d | ||
|
|
074158b495 | ||
|
|
82d5115b3f | ||
|
|
5c63111002 | ||
|
|
6eda7932e8 | ||
|
|
399b7567dd | ||
|
|
2133ada3a1 | ||
|
|
4f6d4738c4 | ||
|
|
d430ed6727 | ||
|
|
596bc178df | ||
|
|
0cd3d7b5a8 | ||
|
|
349d77ffdf | ||
|
|
c3283b04e5 | ||
|
|
0da0788e6b | ||
|
|
6f7e249aa2 | ||
|
|
dfaeed54b6 | ||
|
|
4f59e46235 | ||
|
|
bf4cc7befb | ||
|
|
c63c6dc68b | ||
|
|
e4225d6e9b | ||
|
|
3fb153c43e | ||
|
|
6de20c63d4 | ||
|
|
00fbc1c259 | ||
|
|
3bc552ef38 | ||
|
|
ee70d94e15 | ||
|
|
1887d2c0e9 | ||
|
|
c99dd4e44f | ||
|
|
541b8b288d | ||
|
|
db168a0452 | ||
|
|
aa96cf44d4 | ||
|
|
0d59c35d2a | ||
|
|
8463bde90d | ||
|
|
150c31009e | ||
|
|
693494024d | ||
|
|
ee66d6c7d1 | ||
|
|
3effd30f7e | ||
|
|
4ab20c2454 | ||
|
|
c075b5a1a7 | ||
|
|
cb1e59fa49 | ||
|
|
588aa435ec | ||
|
|
752c764066 | ||
|
|
af604c2ea8 | ||
|
|
6c3f9f149d | ||
|
|
152f2e03f1 | ||
|
|
605797c86a | ||
|
|
1ee5b1611a | ||
|
|
5028729e4c | ||
|
|
ab00fa8809 | ||
|
|
2d705e7caa | ||
|
|
f2370043a8 | ||
|
|
3b349b9803 | ||
|
|
f2b7ffe0eb | ||
|
|
3a410eed1a | ||
|
|
a53619f10f | ||
|
|
893aaafa1b | ||
|
|
33c34cdeca | ||
|
|
9b411867df | ||
|
|
fd1596b3a0 | ||
|
|
b05de22f58 | ||
|
|
e9341ee8d3 | ||
|
|
f666ad600f | ||
|
|
9345718967 | ||
|
|
6c879cbd13 | ||
|
|
089b5aaf44 | ||
|
|
b61885add5 | ||
|
|
702ba2e0a4 | ||
|
|
5cb1e284af | ||
|
|
e3a4f0873e | ||
|
|
7977a020ac | ||
|
|
1d63269883 | ||
|
|
dd8027480b | ||
|
|
c45bd77e44 | ||
|
|
032e0abd61 | ||
|
|
8509d1e454 | ||
|
|
8ff0c6828b | ||
|
|
ddd6935e50 | ||
|
|
5588a56b24 | ||
|
|
12aed6e280 | ||
|
|
b2a469e08c | ||
|
|
285b0e4af9 | ||
|
|
f9edfd6391 | ||
|
|
c0845e1612 | ||
|
|
9878d9d37e | ||
|
|
a2196085d5 | ||
|
|
ba62a8c10c | ||
|
|
38f38e2789 | ||
|
|
1475f0fc2f | ||
|
|
a3396b77a3 | ||
|
|
8158fee8fc | ||
|
|
f6301bc3e5 | ||
|
|
6c5c176b7d | ||
|
|
c6d52b5eb1 | ||
|
|
7cac528389 | ||
|
|
d518f75468 | ||
|
|
c6fac8c36b | ||
|
|
17b5b81696 | ||
|
|
9960db200c | ||
|
|
b9ff1704b0 | ||
|
|
6fe817ca4a | ||
|
|
cb9a6fac25 | ||
|
|
a945768251 | ||
|
|
c6646e3821 | ||
|
|
99dc72cece | ||
|
|
04d6cca204 | ||
|
|
5ab6bda639 | ||
|
|
f433de7e12 | ||
|
|
8ef6c2f91d | ||
|
|
7575218697 | ||
|
|
dc945dad00 | ||
|
|
ddcd74ffd2 | ||
|
|
e105bd12e6 | ||
|
|
f5688175b6 | ||
|
|
72a4ba405f | ||
|
|
94694d394e | ||
|
|
03dd746601 | ||
|
|
eec3373ae7 | ||
|
|
db45ce07ed | ||
|
|
ba49765312 | ||
|
|
72c8c2371e | ||
|
|
80411ab6cf | ||
|
|
0ff8fa57e7 | ||
|
|
411f28a049 | ||
|
|
0f42233092 | ||
|
|
2dd49f6d9b | ||
|
|
271f545f4f | ||
|
|
c4a70b540e | ||
|
|
bef85772e3 | ||
|
|
a6b19c4a6c | ||
|
|
44f5e6659b | ||
|
|
3f9a9b7019 | ||
|
|
b7ad985c7a | ||
|
|
dba087ae25 | ||
|
|
bbc4b1b502 | ||
|
|
9304513ce8 | ||
|
|
0b127582cb | ||
|
|
6e9b8791c8 | ||
|
|
ef87ad77c3 | ||
|
|
8477420911 | ||
|
|
f5741e318f | ||
|
|
545060103a | ||
|
|
e010b5680a | ||
|
|
8620d3987e | ||
|
|
30487a54c1 | ||
|
|
f15a39c153 | ||
|
|
aed27fa111 | ||
|
|
822c411e83 | ||
|
|
41b3ac7554 | ||
|
|
23575fdf6c | ||
|
|
52f70dc49a | ||
|
|
79c9749ff7 | ||
|
|
8d2701e143 | ||
|
|
877444ac29 | ||
|
|
b0d9426f1b | ||
|
|
18accae47e | ||
|
|
55e3a2c6b6 | ||
|
|
ef092e2893 | ||
|
|
89eb95c077 | ||
|
|
e871ec358e | ||
|
|
271a2f74ad | ||
|
|
d6bd951c37 | ||
|
|
45a8c0acd1 | ||
|
|
36a6a59d55 | ||
|
|
cc8fb96047 | ||
|
|
3339b50daf | ||
|
|
415ea07a4f | ||
|
|
b80ec95fa8 | ||
|
|
99cb51482f | ||
|
|
90638f7a43 | ||
|
|
1fb00c8eb6 | ||
|
|
4490ea7635 | ||
|
|
bce7a20d8b | ||
|
|
b52dd53e29 | ||
|
|
a155f45036 | ||
|
|
de4424fab0 | ||
|
|
33ada95bbc |
2
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
2
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
@@ -33,6 +33,8 @@ body:
|
||||
- 2.4.180
|
||||
- 2.4.190
|
||||
- 2.4.200
|
||||
- 2.4.201
|
||||
- 2.4.210
|
||||
- Other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
### 2.4.190-20251024 ISO image released on 2025/10/24
|
||||
### 2.4.201-20260114 ISO image released on 2026/1/15
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.4.190-20251024 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.190-20251024.iso
|
||||
2.4.201-20260114 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.201-20260114.iso
|
||||
|
||||
MD5: 25358481FB876226499C011FC0710358
|
||||
SHA1: 0B26173C0CE136F2CA40A15046D1DFB78BCA1165
|
||||
SHA256: 4FD9F62EDA672408828B3C0C446FE5EA9FF3C4EE8488A7AB1101544A3C487872
|
||||
MD5: 20E926E433203798512EF46E590C89B9
|
||||
SHA1: 779E4084A3E1A209B494493B8F5658508B6014FA
|
||||
SHA256: 3D10E7C885AEC5C5D4F4E50F9644FF9728E8C0A2E36EBB8C96B32569685A7C40
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.190-20251024.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.201-20260114.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
||||
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.190-20251024.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.201-20260114.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.190-20251024.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.201-20260114.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.4.190-20251024.iso.sig securityonion-2.4.190-20251024.iso
|
||||
gpg --verify securityonion-2.4.201-20260114.iso.sig securityonion-2.4.201-20260114.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Thu 23 Oct 2025 07:21:46 AM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Wed 14 Jan 2026 05:23:39 PM EST using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
|
||||
2
pillar/ca/init.sls
Normal file
2
pillar/ca/init.sls
Normal file
@@ -0,0 +1,2 @@
|
||||
ca:
|
||||
server:
|
||||
@@ -1,5 +1,6 @@
|
||||
base:
|
||||
'*':
|
||||
- ca
|
||||
- global.soc_global
|
||||
- global.adv_global
|
||||
- docker.soc_docker
|
||||
|
||||
@@ -15,11 +15,7 @@
|
||||
'salt.minion-check',
|
||||
'sensoroni',
|
||||
'salt.lasthighstate',
|
||||
'salt.minion'
|
||||
] %}
|
||||
|
||||
{% set ssl_states = [
|
||||
'ssl',
|
||||
'salt.minion',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
@@ -28,7 +24,7 @@
|
||||
|
||||
{% set manager_states = [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ca.server',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
@@ -75,28 +71,24 @@
|
||||
{# Map role-specific states #}
|
||||
{% set role_states = {
|
||||
'so-eval': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
sensor_states +
|
||||
elastic_stack_states | reject('equalto', 'logstash') | list
|
||||
elastic_stack_states | reject('equalto', 'logstash') | list +
|
||||
['logstash.ssl']
|
||||
),
|
||||
'so-heavynode': (
|
||||
ssl_states +
|
||||
sensor_states +
|
||||
['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx']
|
||||
),
|
||||
'so-idh': (
|
||||
ssl_states +
|
||||
['idh']
|
||||
),
|
||||
'so-import': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list +
|
||||
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'strelka.manager']
|
||||
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'logstash.ssl', 'strelka.manager']
|
||||
),
|
||||
'so-manager': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
|
||||
stig_states +
|
||||
@@ -104,7 +96,6 @@
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-managerhype': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] +
|
||||
stig_states +
|
||||
@@ -112,7 +103,6 @@
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-managersearch': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
|
||||
stig_states +
|
||||
@@ -120,12 +110,10 @@
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-searchnode': (
|
||||
ssl_states +
|
||||
['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] +
|
||||
stig_states
|
||||
),
|
||||
'so-standalone': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] +
|
||||
sensor_states +
|
||||
@@ -134,29 +122,24 @@
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-sensor': (
|
||||
ssl_states +
|
||||
sensor_states +
|
||||
['nginx'] +
|
||||
stig_states
|
||||
),
|
||||
'so-fleet': (
|
||||
ssl_states +
|
||||
stig_states +
|
||||
['logstash', 'nginx', 'healthcheck', 'elasticfleet']
|
||||
),
|
||||
'so-receiver': (
|
||||
ssl_states +
|
||||
kafka_states +
|
||||
stig_states +
|
||||
['logstash', 'redis']
|
||||
),
|
||||
'so-hypervisor': (
|
||||
ssl_states +
|
||||
stig_states +
|
||||
['hypervisor', 'libvirt']
|
||||
),
|
||||
'so-desktop': (
|
||||
['ssl', 'docker_clean', 'telegraf'] +
|
||||
stig_states
|
||||
)
|
||||
} %}
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
pki_issued_certs:
|
||||
file.directory:
|
||||
- name: /etc/pki/issued_certs
|
||||
- makedirs: True
|
||||
@@ -3,70 +3,10 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
|
||||
include:
|
||||
- ca.dirs
|
||||
|
||||
/etc/salt/minion.d/signing_policies.conf:
|
||||
file.managed:
|
||||
- source: salt://ca/files/signing_policies.conf
|
||||
|
||||
pki_private_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/ca.key
|
||||
- keysize: 4096
|
||||
- passphrase:
|
||||
- backup: True
|
||||
{% if salt['file.file_exists']('/etc/pki/ca.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/ca.crt
|
||||
{%- endif %}
|
||||
|
||||
pki_public_ca_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/ca.crt
|
||||
- signing_private_key: /etc/pki/ca.key
|
||||
- CN: {{ GLOBALS.manager }}
|
||||
- C: US
|
||||
- ST: Utah
|
||||
- L: Salt Lake City
|
||||
- basicConstraints: "critical CA:true"
|
||||
- keyUsage: "critical cRLSign, keyCertSign"
|
||||
- extendedkeyUsage: "serverAuth, clientAuth"
|
||||
- subjectKeyIdentifier: hash
|
||||
- authorityKeyIdentifier: keyid:always, issuer
|
||||
- days_valid: 3650
|
||||
- days_remaining: 0
|
||||
- backup: True
|
||||
- replace: False
|
||||
- require:
|
||||
- sls: ca.dirs
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
mine_update_ca_crt:
|
||||
module.run:
|
||||
- mine.update: []
|
||||
- onchanges:
|
||||
- x509: pki_public_ca_crt
|
||||
|
||||
cakeyperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/ca.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% if GLOBALS.is_manager %}
|
||||
- ca.server
|
||||
{% endif %}
|
||||
- ca.trustca
|
||||
|
||||
3
salt/ca/map.jinja
Normal file
3
salt/ca/map.jinja
Normal file
@@ -0,0 +1,3 @@
|
||||
{% set CA = {
|
||||
'server': pillar.ca.server
|
||||
}%}
|
||||
@@ -1,7 +1,35 @@
|
||||
pki_private_key:
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% set setup_running = salt['cmd.retcode']('pgrep -x so-setup') == 0 %}
|
||||
|
||||
{% if setup_running%}
|
||||
|
||||
include:
|
||||
- ssl.remove
|
||||
|
||||
remove_pki_private_key:
|
||||
file.absent:
|
||||
- name: /etc/pki/ca.key
|
||||
|
||||
pki_public_ca_crt:
|
||||
remove_pki_public_ca_crt:
|
||||
file.absent:
|
||||
- name: /etc/pki/ca.crt
|
||||
|
||||
remove_trusttheca:
|
||||
file.absent:
|
||||
- name: /etc/pki/tls/certs/intca.crt
|
||||
|
||||
remove_pki_public_ca_crt_symlink:
|
||||
file.absent:
|
||||
- name: /opt/so/saltstack/local/salt/ca/files/ca.crt
|
||||
|
||||
{% else %}
|
||||
|
||||
so-setup_not_running:
|
||||
test.show_notification:
|
||||
- text: "This state is reserved for usage during so-setup."
|
||||
|
||||
{% endif %}
|
||||
|
||||
63
salt/ca/server.sls
Normal file
63
salt/ca/server.sls
Normal file
@@ -0,0 +1,63 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
pki_private_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/ca.key
|
||||
- keysize: 4096
|
||||
- passphrase:
|
||||
- backup: True
|
||||
{% if salt['file.file_exists']('/etc/pki/ca.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/ca.crt
|
||||
{%- endif %}
|
||||
|
||||
pki_public_ca_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/ca.crt
|
||||
- signing_private_key: /etc/pki/ca.key
|
||||
- CN: {{ GLOBALS.manager }}
|
||||
- C: US
|
||||
- ST: Utah
|
||||
- L: Salt Lake City
|
||||
- basicConstraints: "critical CA:true"
|
||||
- keyUsage: "critical cRLSign, keyCertSign"
|
||||
- extendedkeyUsage: "serverAuth, clientAuth"
|
||||
- subjectKeyIdentifier: hash
|
||||
- authorityKeyIdentifier: keyid:always, issuer
|
||||
- days_valid: 3650
|
||||
- days_remaining: 7
|
||||
- backup: True
|
||||
- replace: False
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
pki_public_ca_crt_symlink:
|
||||
file.symlink:
|
||||
- name: /opt/so/saltstack/local/salt/ca/files/ca.crt
|
||||
- target: /etc/pki/ca.crt
|
||||
- require:
|
||||
- x509: pki_public_ca_crt
|
||||
|
||||
cakeyperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/ca.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
15
salt/ca/signing_policy.sls
Normal file
15
salt/ca/signing_policy.sls
Normal file
@@ -0,0 +1,15 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# when the salt-minion signs the cert, a copy is stored here
|
||||
issued_certs_copypath:
|
||||
file.directory:
|
||||
- name: /etc/pki/issued_certs
|
||||
- makedirs: True
|
||||
|
||||
signing_policy:
|
||||
file.managed:
|
||||
- name: /etc/salt/minion.d/signing_policies.conf
|
||||
- source: salt://ca/files/signing_policies.conf
|
||||
26
salt/ca/trustca.sls
Normal file
26
salt/ca/trustca.sls
Normal file
@@ -0,0 +1,26 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
include:
|
||||
- docker
|
||||
|
||||
# Trust the CA
|
||||
trusttheca:
|
||||
file.managed:
|
||||
- name: /etc/pki/tls/certs/intca.crt
|
||||
- source: salt://ca/files/ca.crt
|
||||
- watch_in:
|
||||
- service: docker_running
|
||||
- show_changes: False
|
||||
- makedirs: True
|
||||
|
||||
{% if GLOBALS.os_family == 'Debian' %}
|
||||
symlinkca:
|
||||
file.symlink:
|
||||
- target: /etc/pki/tls/certs/intca.crt
|
||||
- name: /etc/ssl/certs/intca.crt
|
||||
{% endif %}
|
||||
@@ -177,7 +177,7 @@ so-status_script:
|
||||
- source: salt://common/tools/sbin/so-status
|
||||
- mode: 755
|
||||
|
||||
{% if GLOBALS.role in GLOBALS.sensor_roles %}
|
||||
{% if GLOBALS.is_sensor %}
|
||||
# Add sensor cleanup
|
||||
so-sensor-clean:
|
||||
cron.present:
|
||||
|
||||
@@ -554,21 +554,39 @@ run_check_net_err() {
|
||||
}
|
||||
|
||||
wait_for_salt_minion() {
|
||||
local minion="$1"
|
||||
local timeout="${2:-5}"
|
||||
local logfile="${3:-'/dev/stdout'}"
|
||||
retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$logfile" 2>&1 || fail
|
||||
local attempt=0
|
||||
# each attempts would take about 15 seconds
|
||||
local maxAttempts=20
|
||||
until check_salt_minion_status "$minion" "$timeout" "$logfile"; do
|
||||
attempt=$((attempt+1))
|
||||
if [[ $attempt -eq $maxAttempts ]]; then
|
||||
return 1
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
return 0
|
||||
local minion="$1"
|
||||
local max_wait="${2:-30}"
|
||||
local interval="${3:-2}"
|
||||
local logfile="${4:-'/dev/stdout'}"
|
||||
local elapsed=0
|
||||
|
||||
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Waiting for salt-minion '$minion' to be ready..."
|
||||
|
||||
while [ $elapsed -lt $max_wait ]; do
|
||||
# Check if service is running
|
||||
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Check if salt-minion service is running"
|
||||
if ! systemctl is-active --quiet salt-minion; then
|
||||
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion service not running (elapsed: ${elapsed}s)"
|
||||
sleep $interval
|
||||
elapsed=$((elapsed + interval))
|
||||
continue
|
||||
fi
|
||||
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion service is running"
|
||||
|
||||
# Check if minion responds to ping
|
||||
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Check if $minion responds to ping"
|
||||
if salt "$minion" test.ping --timeout=3 --out=json 2>> "$logfile" | grep -q "true"; then
|
||||
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion '$minion' is connected and ready!"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Waiting... (${elapsed}s / ${max_wait}s)"
|
||||
sleep $interval
|
||||
elapsed=$((elapsed + interval))
|
||||
done
|
||||
|
||||
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - ERROR: salt-minion '$minion' not ready after $max_wait seconds"
|
||||
return 1
|
||||
}
|
||||
|
||||
salt_minion_count() {
|
||||
|
||||
@@ -129,6 +129,8 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|HTTP 404: Not Found" # Salt loops until Kratos returns 200, during startup Kratos may not be ready
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Cancelling deferred write event maybeFenceReplicas because the event queue is now closed" # Kafka controller log during shutdown/restart
|
||||
fi
|
||||
|
||||
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
||||
@@ -159,7 +161,9 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading component template" # false positive (elasticsearch index or template names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Error while parsing document for index \[.ds-logs-kratos-so-.*object mapping for \[file\]" # false positive (mapping error occuring BEFORE kratos index has rolled over in 2.4.210)
|
||||
fi
|
||||
|
||||
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
|
||||
@@ -85,7 +85,7 @@ function suricata() {
|
||||
docker run --rm \
|
||||
-v /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro \
|
||||
-v /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro \
|
||||
-v /opt/so/conf/suricata/rules:/etc/suricata/rules:ro \
|
||||
-v /opt/so/rules/suricata/:/etc/suricata/rules:ro \
|
||||
-v ${LOG_PATH}:/var/log/suricata/:rw \
|
||||
-v ${NSM_PATH}/:/nsm/:rw \
|
||||
-v "$PCAP:/input.pcap:ro" \
|
||||
|
||||
@@ -3,29 +3,16 @@
|
||||
{# we only want this state to run it is CentOS #}
|
||||
{% if GLOBALS.os == 'OEL' %}
|
||||
|
||||
{% set global_ca_text = [] %}
|
||||
{% set global_ca_server = [] %}
|
||||
{% set manager = GLOBALS.manager %}
|
||||
{% set x509dict = salt['mine.get'](manager | lower~'*', 'x509.get_pem_entries') %}
|
||||
{% for host in x509dict %}
|
||||
{% if host.split('_')|last in ['manager', 'managersearch', 'standalone', 'import', 'eval'] %}
|
||||
{% do global_ca_text.append(x509dict[host].get('/etc/pki/ca.crt')|replace('\n', '')) %}
|
||||
{% do global_ca_server.append(host) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% set trusttheca_text = global_ca_text[0] %}
|
||||
{% set ca_server = global_ca_server[0] %}
|
||||
|
||||
trusted_ca:
|
||||
x509.pem_managed:
|
||||
file.managed:
|
||||
- name: /etc/pki/ca-trust/source/anchors/ca.crt
|
||||
- text: {{ trusttheca_text }}
|
||||
- source: salt://ca/files/ca.crt
|
||||
|
||||
update_ca_certs:
|
||||
cmd.run:
|
||||
- name: update-ca-trust
|
||||
- onchanges:
|
||||
- x509: trusted_ca
|
||||
- file: trusted_ca
|
||||
|
||||
{% else %}
|
||||
|
||||
|
||||
@@ -6,9 +6,9 @@
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
# include ssl since docker service requires the intca
|
||||
# docker service requires the ca.crt
|
||||
include:
|
||||
- ssl
|
||||
- ca
|
||||
|
||||
dockergroup:
|
||||
group.present:
|
||||
@@ -89,10 +89,9 @@ docker_running:
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: docker_daemon
|
||||
- x509: trusttheca
|
||||
- require:
|
||||
- file: docker_daemon
|
||||
- x509: trusttheca
|
||||
- file: trusttheca
|
||||
|
||||
|
||||
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present
|
||||
|
||||
@@ -60,7 +60,7 @@ so-elastalert:
|
||||
- watch:
|
||||
- file: elastaconf
|
||||
- onlyif:
|
||||
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 8" {# only run this state if elasticsearch is version 8 #}
|
||||
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 9" {# only run this state if elasticsearch is version 9 #}
|
||||
|
||||
delete_so-elastalert_so-status.disabled:
|
||||
file.uncomment:
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
|
||||
include:
|
||||
- ca
|
||||
- elasticagent.config
|
||||
- elasticagent.sostatus
|
||||
|
||||
@@ -55,8 +56,10 @@ so-elastic-agent:
|
||||
{% endif %}
|
||||
- require:
|
||||
- file: create-elastic-agent-config
|
||||
- file: trusttheca
|
||||
- watch:
|
||||
- file: create-elastic-agent-config
|
||||
- file: trusttheca
|
||||
|
||||
delete_so-elastic-agent_so-status.disabled:
|
||||
file.uncomment:
|
||||
|
||||
34
salt/elasticfleet/config.map.jinja
Normal file
34
salt/elasticfleet/config.map.jinja
Normal file
@@ -0,0 +1,34 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
|
||||
{# advanced config_yaml options for elasticfleet logstash output #}
|
||||
{% set ADV_OUTPUT_LOGSTASH_RAW = ELASTICFLEETMERGED.config.outputs.logstash %}
|
||||
{% set ADV_OUTPUT_LOGSTASH = {} %}
|
||||
{% for k, v in ADV_OUTPUT_LOGSTASH_RAW.items() %}
|
||||
{% if v != "" and v is not none %}
|
||||
{% if k == 'queue_mem_events' %}
|
||||
{# rename queue_mem_events queue.mem.events #}
|
||||
{% do ADV_OUTPUT_LOGSTASH.update({'queue.mem.events':v}) %}
|
||||
{% elif k == 'loadbalance' %}
|
||||
{% if v %}
|
||||
{# only include loadbalance config when its True #}
|
||||
{% do ADV_OUTPUT_LOGSTASH.update({k:v}) %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{% do ADV_OUTPUT_LOGSTASH.update({k:v}) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{% set LOGSTASH_CONFIG_YAML_RAW = [] %}
|
||||
{% if ADV_OUTPUT_LOGSTASH %}
|
||||
{% for k, v in ADV_OUTPUT_LOGSTASH.items() %}
|
||||
{% do LOGSTASH_CONFIG_YAML_RAW.append(k ~ ': ' ~ v) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% set LOGSTASH_CONFIG_YAML = LOGSTASH_CONFIG_YAML_RAW | join('\\n') if LOGSTASH_CONFIG_YAML_RAW else '' %}
|
||||
@@ -95,6 +95,9 @@ soresourcesrepoclone:
|
||||
- rev: 'main'
|
||||
- depth: 1
|
||||
- force_reset: True
|
||||
- retry:
|
||||
attempts: 3
|
||||
interval: 10
|
||||
{% endif %}
|
||||
|
||||
elasticdefendconfdir:
|
||||
|
||||
@@ -10,6 +10,14 @@ elasticfleet:
|
||||
grid_enrollment: ''
|
||||
defend_filters:
|
||||
enable_auto_configuration: False
|
||||
outputs:
|
||||
logstash:
|
||||
bulk_max_size: ''
|
||||
worker: ''
|
||||
queue_mem_events: ''
|
||||
timeout: ''
|
||||
loadbalance: False
|
||||
compression_level: ''
|
||||
subscription_integrations: False
|
||||
auto_upgrade_integrations: False
|
||||
logging:
|
||||
|
||||
@@ -13,9 +13,11 @@
|
||||
{% set SERVICETOKEN = salt['pillar.get']('elasticfleet:config:server:es_token','') %}
|
||||
|
||||
include:
|
||||
- ca
|
||||
- logstash.ssl
|
||||
- elasticfleet.ssl
|
||||
- elasticfleet.config
|
||||
- elasticfleet.sostatus
|
||||
- ssl
|
||||
|
||||
{% if grains.role not in ['so-fleet'] %}
|
||||
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
|
||||
@@ -36,12 +38,13 @@ so-elastic-fleet-auto-configure-logstash-outputs:
|
||||
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
|
||||
so-elastic-fleet-auto-configure-logstash-outputs-force:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-outputs-update --force --certs
|
||||
- name: /usr/sbin/so-elastic-fleet-outputs-update --certs
|
||||
- retry:
|
||||
attempts: 4
|
||||
interval: 30
|
||||
- onchanges:
|
||||
- x509: etc_elasticfleet_logstash_crt
|
||||
- x509: elasticfleet_kafka_crt
|
||||
{% endif %}
|
||||
|
||||
# If enabled, automatically update Fleet Server URLs & ES Connection
|
||||
@@ -132,6 +135,11 @@ so-elastic-fleet:
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
- file: trusttheca
|
||||
- x509: etc_elasticfleet_key
|
||||
- x509: etc_elasticfleet_crt
|
||||
- require:
|
||||
- file: trusttheca
|
||||
- x509: etc_elasticfleet_key
|
||||
- x509: etc_elasticfleet_crt
|
||||
{% endif %}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
{%- raw -%}
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "import-zeek-logs",
|
||||
@@ -10,19 +10,31 @@
|
||||
"description": "Zeek Import logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/import/*/zeek/logs/*.log"
|
||||
],
|
||||
"data_stream.dataset": "import",
|
||||
"tags": [],
|
||||
"pipeline": "",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}).log$"],
|
||||
"include_files": [],
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
|
||||
"custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n"
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,36 +11,51 @@
|
||||
{%- endif -%}
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "kratos-logs",
|
||||
"namespace": "so",
|
||||
"description": "Kratos logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/kratos/kratos.log"
|
||||
],
|
||||
"data_stream.dataset": "kratos",
|
||||
"tags": ["so-kratos"],
|
||||
"pipeline": "kratos",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
{%- if valid_identities -%}
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos\n- if:\n has_fields:\n - identity_id\n then:{% for id, email in identities %}\n - if:\n equals:\n identity_id: \"{{ id }}\"\n then:\n - add_fields:\n target: ''\n fields:\n user.name: \"{{ email }}\"{% endfor %}",
|
||||
{%- else -%}
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
|
||||
{%- endif -%}
|
||||
"custom": "pipeline: kratos"
|
||||
"tags": [
|
||||
"so-kratos"
|
||||
],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
|
||||
}
|
||||
@@ -2,28 +2,38 @@
|
||||
{%- raw -%}
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"id": "zeek-logs",
|
||||
"name": "zeek-logs",
|
||||
"namespace": "so",
|
||||
"description": "Zeek logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/zeek/logs/current/*.log"
|
||||
],
|
||||
"data_stream.dataset": "zeek",
|
||||
"tags": [],
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}).log$"],
|
||||
"include_files": [],
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
|
||||
"custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n"
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -31,4 +41,4 @@
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
{%- endraw -%}
|
||||
{%- endraw -%}
|
||||
@@ -5,7 +5,7 @@
|
||||
"package": {
|
||||
"name": "endpoint",
|
||||
"title": "Elastic Defend",
|
||||
"version": "8.18.1",
|
||||
"version": "9.0.2",
|
||||
"requires_root": true
|
||||
},
|
||||
"enabled": true,
|
||||
|
||||
@@ -1,26 +1,43 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "hydra-logs",
|
||||
"namespace": "so",
|
||||
"description": "Hydra logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/hydra/hydra.log"
|
||||
],
|
||||
"data_stream.dataset": "hydra",
|
||||
"tags": ["so-hydra"],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra",
|
||||
"custom": "pipeline: hydra"
|
||||
"pipeline": "hydra",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra",
|
||||
"tags": [
|
||||
"so-hydra"
|
||||
],
|
||||
"recursive_glob": true,
|
||||
"ignore_older": "72h",
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -28,3 +45,5 @@
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,30 +1,44 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "idh-logs",
|
||||
"namespace": "so",
|
||||
"description": "IDH integration",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/idh/opencanary.log"
|
||||
],
|
||||
"data_stream.dataset": "idh",
|
||||
"tags": [],
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
|
||||
"custom": "pipeline: common"
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -1,33 +1,46 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "import-evtx-logs",
|
||||
"namespace": "so",
|
||||
"description": "Import Windows EVTX logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/import/*/evtx/*.json"
|
||||
],
|
||||
"data_stream.dataset": "import",
|
||||
"custom": "",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"tags": [
|
||||
"import"
|
||||
]
|
||||
],
|
||||
"recursive_glob": true,
|
||||
"ignore_older": "72h",
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -1,30 +1,45 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "import-suricata-logs",
|
||||
"namespace": "so",
|
||||
"description": "Import Suricata logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/import/*/suricata/eve*.json"
|
||||
],
|
||||
"data_stream.dataset": "import",
|
||||
"pipeline": "suricata.common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n",
|
||||
"tags": [],
|
||||
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"",
|
||||
"custom": "pipeline: suricata.common"
|
||||
"recursive_glob": true,
|
||||
"ignore_older": "72h",
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -1,18 +1,17 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "rita-logs",
|
||||
"namespace": "so",
|
||||
"description": "RITA Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
@@ -20,15 +19,28 @@
|
||||
"/nsm/rita/exploded-dns.csv",
|
||||
"/nsm/rita/long-connections.csv"
|
||||
],
|
||||
"exclude_files": [],
|
||||
"ignore_older": "72h",
|
||||
"data_stream.dataset": "rita",
|
||||
"tags": [],
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
|
||||
"custom": "exclude_lines: ['^Score', '^Source', '^Domain', '^No results']"
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"ignore_older": "72h",
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
|
||||
@@ -1,29 +1,41 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "so-ip-mappings",
|
||||
"namespace": "so",
|
||||
"description": "IP Description mappings",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/custom-mappings/ip-descriptions.csv"
|
||||
],
|
||||
"data_stream.dataset": "hostnamemappings",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n",
|
||||
"tags": [
|
||||
"so-ip-mappings"
|
||||
],
|
||||
"processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n",
|
||||
"custom": ""
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -31,5 +43,3 @@
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,30 +1,44 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-auth-sync-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion - Elastic Auth Sync - Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/sync.log"
|
||||
],
|
||||
"data_stream.dataset": "soc",
|
||||
"tags": ["so-soc"],
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync",
|
||||
"custom": "pipeline: common"
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -1,35 +1,48 @@
|
||||
{
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-detections-logs",
|
||||
"description": "Security Onion Console - Detections Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/detections_runtime-status_sigma.log",
|
||||
"/opt/so/log/soc/detections_runtime-status_yara.log"
|
||||
],
|
||||
"exclude_files": [],
|
||||
"ignore_older": "72h",
|
||||
"data_stream.dataset": "soc",
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
|
||||
"tags": [
|
||||
"so-soc"
|
||||
],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
|
||||
"custom": "pipeline: common"
|
||||
"recursive_glob": true,
|
||||
"ignore_older": "72h",
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -1,30 +1,46 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-salt-relay-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion - Salt Relay - Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/salt-relay.log"
|
||||
],
|
||||
"data_stream.dataset": "soc",
|
||||
"tags": ["so-soc"],
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay",
|
||||
"custom": "pipeline: common"
|
||||
"tags": [
|
||||
"so-soc"
|
||||
],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -1,30 +1,44 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-sensoroni-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion - Sensoroni - Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/sensoroni/sensoroni.log"
|
||||
],
|
||||
"data_stream.dataset": "soc",
|
||||
"tags": [],
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true",
|
||||
"custom": "pipeline: common"
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
"force": true
|
||||
}
|
||||
@@ -1,30 +1,46 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-server-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion Console Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/sensoroni-server.log"
|
||||
],
|
||||
"data_stream.dataset": "soc",
|
||||
"tags": ["so-soc"],
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
|
||||
"custom": "pipeline: common"
|
||||
"tags": [
|
||||
"so-soc"
|
||||
],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -1,30 +1,44 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "strelka-logs",
|
||||
"namespace": "so",
|
||||
"description": "Strelka logs",
|
||||
"description": "Strelka Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/strelka/log/strelka.log"
|
||||
],
|
||||
"data_stream.dataset": "strelka",
|
||||
"tags": [],
|
||||
"pipeline": "strelka.file",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka",
|
||||
"custom": "pipeline: strelka.file"
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -1,30 +1,44 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "suricata-logs",
|
||||
"namespace": "so",
|
||||
"description": "Suricata integration",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/suricata/eve*.json"
|
||||
],
|
||||
"data_stream.dataset": "suricata",
|
||||
"tags": [],
|
||||
"data_stream.dataset": "filestream.generic",
|
||||
"pipeline": "suricata.common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata",
|
||||
"custom": "pipeline: suricata.common"
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,9 @@
|
||||
{% endif %}
|
||||
|
||||
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
|
||||
{% if not AGENT_STATUS %}
|
||||
{% set AGENT_EXISTS = salt['file.file_exists']('/opt/Elastic/Agent/elastic-agent') %}
|
||||
|
||||
{% if not AGENT_STATUS or not AGENT_EXISTS %}
|
||||
|
||||
pull_agent_installer:
|
||||
file.managed:
|
||||
@@ -19,7 +21,7 @@ pull_agent_installer:
|
||||
|
||||
run_installer:
|
||||
cmd.run:
|
||||
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKEN }}
|
||||
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKEN }} -force
|
||||
- cwd: /opt/so
|
||||
- retry:
|
||||
attempts: 3
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
'azure_application_insights.app_state': 'azure.app_state',
|
||||
'azure_billing.billing': 'azure.billing',
|
||||
'azure_functions.metrics': 'azure.function',
|
||||
'azure_ai_foundry.metrics': 'azure.ai_foundry',
|
||||
'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset',
|
||||
'azure_metrics.compute_vm': 'azure.compute_vm',
|
||||
'azure_metrics.container_instance': 'azure.container_instance',
|
||||
@@ -121,6 +122,9 @@
|
||||
"phases": {
|
||||
"cold": {
|
||||
"actions": {
|
||||
"allocate":{
|
||||
"number_of_replicas": ""
|
||||
},
|
||||
"set_priority": {"priority": 0}
|
||||
},
|
||||
"min_age": "60d"
|
||||
@@ -137,12 +141,31 @@
|
||||
"max_age": "30d",
|
||||
"max_primary_shard_size": "50gb"
|
||||
},
|
||||
"forcemerge":{
|
||||
"max_num_segments": ""
|
||||
},
|
||||
"shrink":{
|
||||
"max_primary_shard_size": "",
|
||||
"method": "COUNT",
|
||||
"number_of_shards": ""
|
||||
},
|
||||
"set_priority": {"priority": 100}
|
||||
},
|
||||
"min_age": "0ms"
|
||||
},
|
||||
"warm": {
|
||||
"actions": {
|
||||
"allocate": {
|
||||
"number_of_replicas": ""
|
||||
},
|
||||
"forcemerge": {
|
||||
"max_num_segments": ""
|
||||
},
|
||||
"shrink":{
|
||||
"max_primary_shard_size": "",
|
||||
"method": "COUNT",
|
||||
"number_of_shards": ""
|
||||
},
|
||||
"set_priority": {"priority": 50}
|
||||
},
|
||||
"min_age": "30d"
|
||||
|
||||
@@ -50,6 +50,46 @@ elasticfleet:
|
||||
global: True
|
||||
forcedType: bool
|
||||
helpLink: elastic-fleet.html
|
||||
outputs:
|
||||
logstash:
|
||||
bulk_max_size:
|
||||
description: The maximum number of events to bulk in a single Logstash request.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
helpLink: elastic-fleet.html
|
||||
worker:
|
||||
description: The number of workers per configured host publishing events.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: true
|
||||
helpLink: elastic-fleet.html
|
||||
queue_mem_events:
|
||||
title: queued events
|
||||
description: The number of events the queue can store. This value should be evenly divisible by the smaller of 'bulk_max_size' to avoid sending partial batches to the output.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
helpLink: elastic-fleet.html
|
||||
timeout:
|
||||
description: The number of seconds to wait for responses from the Logstash server before timing out. Eg 30s
|
||||
regex: ^[0-9]+s$
|
||||
advanced: True
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
loadbalance:
|
||||
description: If true and multiple Logstash hosts are configured, the output plugin load balances published events onto all Logstash hosts. If false, the output plugin sends all events to one host (determined at random) and switches to another host if the selected one becomes unresponsive.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
compression_level:
|
||||
description: The gzip compression level. The compression level must be in the range of 1 (best speed) to 9 (best compression).
|
||||
regex: ^[1-9]$
|
||||
forcedType: int
|
||||
advanced: True
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
server:
|
||||
custom_fqdn:
|
||||
description: Custom FQDN for Agents to connect to. One per line.
|
||||
|
||||
186
salt/elasticfleet/ssl.sls
Normal file
186
salt/elasticfleet/ssl.sls
Normal file
@@ -0,0 +1,186 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
{% from 'ca/map.jinja' import CA %}
|
||||
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %}
|
||||
|
||||
{% if grains['role'] not in [ 'so-heavynode', 'so-receiver'] %}
|
||||
# Start -- Elastic Fleet Host Cert
|
||||
etc_elasticfleet_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/elasticfleet-server.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/elasticfleet-server.key') -%}
|
||||
- prereq:
|
||||
- x509: etc_elasticfleet_crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
etc_elasticfleet_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/elasticfleet-server.crt
|
||||
- ca_server: {{ CA.server }}
|
||||
- signing_policy: elasticfleet
|
||||
- private_key: /etc/pki/elasticfleet-server.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }},DNS:{{ GLOBALS.url_base }},IP:{{ GLOBALS.node_ip }}{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %},DNS:{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(',DNS:') }}{% endif %}
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
efperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-server.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
chownelasticfleetcrt:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-server.crt
|
||||
- mode: 640
|
||||
- user: 947
|
||||
- group: 939
|
||||
|
||||
chownelasticfleetkey:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-server.key
|
||||
- mode: 640
|
||||
- user: 947
|
||||
- group: 939
|
||||
# End -- Elastic Fleet Host Cert
|
||||
{% endif %} # endif is for not including HeavyNodes & Receivers
|
||||
|
||||
|
||||
# Start -- Elastic Fleet Client Cert for Agent (Mutual Auth with Logstash Output)
|
||||
etc_elasticfleet_agent_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/elasticfleet-agent.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/elasticfleet-agent.key') -%}
|
||||
- prereq:
|
||||
- x509: etc_elasticfleet_agent_crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
etc_elasticfleet_agent_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/elasticfleet-agent.crt
|
||||
- ca_server: {{ CA.server }}
|
||||
- signing_policy: elasticfleet
|
||||
- private_key: /etc/pki/elasticfleet-agent.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
cmd.run:
|
||||
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-agent.key -topk8 -out /etc/pki/elasticfleet-agent.p8 -nocrypt"
|
||||
- onchanges:
|
||||
- x509: etc_elasticfleet_agent_key
|
||||
|
||||
efagentperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-agent.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
chownelasticfleetagentcrt:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-agent.crt
|
||||
- mode: 640
|
||||
- user: 947
|
||||
- group: 939
|
||||
|
||||
chownelasticfleetagentkey:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-agent.key
|
||||
- mode: 640
|
||||
- user: 947
|
||||
- group: 939
|
||||
# End -- Elastic Fleet Client Cert for Agent (Mutual Auth with Logstash Output)
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone'] %}
|
||||
elasticfleet_kafka_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/elasticfleet-kafka.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/elasticfleet-kafka.key') -%}
|
||||
- prereq:
|
||||
- x509: elasticfleet_kafka_crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
elasticfleet_kafka_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/elasticfleet-kafka.crt
|
||||
- ca_server: {{ CA.server }}
|
||||
- signing_policy: kafka
|
||||
- private_key: /etc/pki/elasticfleet-kafka.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
elasticfleet_kafka_cert_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-kafka.crt
|
||||
- mode: 640
|
||||
- user: 947
|
||||
- group: 939
|
||||
|
||||
elasticfleet_kafka_key_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-kafka.key
|
||||
- mode: 640
|
||||
- user: 947
|
||||
- group: 939
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -14,7 +14,7 @@ if ! is_manager_node; then
|
||||
fi
|
||||
|
||||
# Get current list of Grid Node Agents that need to be upgraded
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true")
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true" --retry 3 --retry-delay 30 --fail 2>/dev/null)
|
||||
|
||||
# Check to make sure that the server responded with good data - else, bail from script
|
||||
CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON")
|
||||
|
||||
@@ -26,7 +26,7 @@ function update_es_urls() {
|
||||
}
|
||||
|
||||
# Get current list of Fleet Elasticsearch URLs
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch')
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch' --retry 3 --retry-delay 30 --fail 2>/dev/null)
|
||||
|
||||
# Check to make sure that the server responded with good data - else, bail from script
|
||||
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
|
||||
|
||||
@@ -86,7 +86,7 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
|
||||
latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list)
|
||||
echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST
|
||||
rm -f $INSTALLED_PACKAGE_LIST
|
||||
echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .savedObject.attributes.install_version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST
|
||||
echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .installationInfo.version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST
|
||||
|
||||
while read -r package; do
|
||||
# get package details
|
||||
|
||||
@@ -3,13 +3,16 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
{%- from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{%- from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
{%- from 'elasticfleet/config.map.jinja' import LOGSTASH_CONFIG_YAML %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
FORCE_UPDATE=false
|
||||
UPDATE_CERTS=false
|
||||
LOGSTASH_PILLAR_CONFIG_YAML="{{ LOGSTASH_CONFIG_YAML }}"
|
||||
LOGSTASH_PILLAR_STATE_FILE="/opt/so/state/esfleet_logstash_config_pillar"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
@@ -19,6 +22,7 @@ while [[ $# -gt 0 ]]; do
|
||||
;;
|
||||
-c| --certs)
|
||||
UPDATE_CERTS=true
|
||||
FORCE_UPDATE=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
@@ -41,38 +45,45 @@ function update_logstash_outputs() {
|
||||
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
|
||||
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
|
||||
LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
# Revert escaped \\n to \n for jq
|
||||
LOGSTASH_PILLAR_CONFIG_YAML=$(printf '%b' "$LOGSTASH_PILLAR_CONFIG_YAML")
|
||||
|
||||
if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then
|
||||
if [[ "$UPDATE_CERTS" != "true" ]]; then
|
||||
# Reuse existing secret
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \
|
||||
--argjson SECRETS "$SECRETS" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||
else
|
||||
# Update certs, creating new secret
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \
|
||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": {"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets": {"ssl":{"key": $LOGSTASHKEY }}}')
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": {"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets": {"ssl":{"key": $LOGSTASHKEY }}}')
|
||||
fi
|
||||
else
|
||||
if [[ "$UPDATE_CERTS" != "true" ]]; then
|
||||
# Reuse existing ssl config
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG}')
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": $SSL_CONFIG}')
|
||||
else
|
||||
# Update ssl config
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \
|
||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": {"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}')
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": {"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}')
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@@ -84,19 +95,42 @@ function update_kafka_outputs() {
|
||||
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
|
||||
if kafka_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
|
||||
SSL_CONFIG=$(echo "$kafka_policy" | jq -r '.item.ssl')
|
||||
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
|
||||
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
|
||||
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
if SECRETS=$(echo "$kafka_policy" | jq -er '.item.secrets' 2>/dev/null); then
|
||||
# Update policy when fleet has secrets enabled
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
--argjson SECRETS "$SECRETS" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||
if [[ "$UPDATE_CERTS" != "true" ]]; then
|
||||
# Update policy when fleet has secrets enabled
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
--argjson SECRETS "$SECRETS" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||
else
|
||||
# Update certs, creating new secret
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": {"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"secrets": {"ssl":{"key": $KAFKAKEY }}}')
|
||||
fi
|
||||
else
|
||||
# Update policy when fleet has secrets disabled or policy hasn't been force updated
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
|
||||
if [[ "$UPDATE_CERTS" != "true" ]]; then
|
||||
# Update policy when fleet has secrets disabled or policy hasn't been force updated
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
|
||||
else
|
||||
# Update ssl config
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }}')
|
||||
fi
|
||||
fi
|
||||
# Update Kafka outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
@@ -108,7 +142,7 @@ function update_kafka_outputs() {
|
||||
|
||||
{% if GLOBALS.pipeline == "KAFKA" %}
|
||||
# Get current list of Kafka Outputs
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka')
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka' --retry 3 --retry-delay 30 --fail 2>/dev/null)
|
||||
|
||||
# Check to make sure that the server responded with good data - else, bail from script
|
||||
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
|
||||
@@ -119,7 +153,7 @@ function update_kafka_outputs() {
|
||||
|
||||
# Get the current list of kafka outputs & hash them
|
||||
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
|
||||
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
|
||||
CURRENT_HASH=$(sha256sum <<< "$CURRENT_LIST" | awk '{print $1}')
|
||||
|
||||
declare -a NEW_LIST=()
|
||||
|
||||
@@ -134,7 +168,7 @@ function update_kafka_outputs() {
|
||||
{# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #}
|
||||
{% else %}
|
||||
# Get current list of Logstash Outputs
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash' --retry 3 --retry-delay 30 --fail 2>/dev/null)
|
||||
|
||||
# Check to make sure that the server responded with good data - else, bail from script
|
||||
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
|
||||
@@ -142,10 +176,19 @@ function update_kafka_outputs() {
|
||||
printf "Failed to query for current Logstash Outputs..."
|
||||
exit 1
|
||||
fi
|
||||
# logstash adv config - compare pillar to last state file value
|
||||
if [[ -f "$LOGSTASH_PILLAR_STATE_FILE" ]]; then
|
||||
PREVIOUS_LOGSTASH_PILLAR_CONFIG_YAML=$(cat "$LOGSTASH_PILLAR_STATE_FILE")
|
||||
if [[ "$LOGSTASH_PILLAR_CONFIG_YAML" != "$PREVIOUS_LOGSTASH_PILLAR_CONFIG_YAML" ]]; then
|
||||
echo "Logstash pillar config has changed - forcing update"
|
||||
FORCE_UPDATE=true
|
||||
fi
|
||||
echo "$LOGSTASH_PILLAR_CONFIG_YAML" > "$LOGSTASH_PILLAR_STATE_FILE"
|
||||
fi
|
||||
|
||||
# Get the current list of Logstash outputs & hash them
|
||||
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
|
||||
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
|
||||
CURRENT_HASH=$(sha256sum <<< "$CURRENT_LIST" | awk '{print $1}')
|
||||
|
||||
declare -a NEW_LIST=()
|
||||
|
||||
@@ -194,7 +237,7 @@ function update_kafka_outputs() {
|
||||
|
||||
# Sort & hash the new list of Logstash Outputs
|
||||
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
|
||||
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
||||
NEW_HASH=$(sha256sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
||||
|
||||
# Compare the current & new list of outputs - if different, update the Logstash outputs
|
||||
if [[ "$NEW_HASH" = "$CURRENT_HASH" ]] && [[ "$FORCE_UPDATE" != "true" ]]; then
|
||||
|
||||
@@ -241,9 +241,11 @@ printf '%s\n'\
|
||||
"" >> "$global_pillar_file"
|
||||
|
||||
# Call Elastic-Fleet Salt State
|
||||
printf "\nApplying elasticfleet state"
|
||||
salt-call state.apply elasticfleet queue=True
|
||||
|
||||
# Generate installers & install Elastic Agent on the node
|
||||
so-elastic-agent-gen-installers
|
||||
printf "\nApplying elasticfleet.install_agent_grid state"
|
||||
salt-call state.apply elasticfleet.install_agent_grid queue=True
|
||||
exit 0
|
||||
|
||||
@@ -23,7 +23,7 @@ function update_fleet_urls() {
|
||||
}
|
||||
|
||||
# Get current list of Fleet Server URLs
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default')
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' --retry 3 --retry-delay 30 --fail 2>/dev/null)
|
||||
|
||||
# Check to make sure that the server responded with good data - else, bail from script
|
||||
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
|
||||
|
||||
@@ -47,7 +47,7 @@ if ! kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||
)
|
||||
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
||||
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
||||
@@ -67,7 +67,7 @@ elif kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l
|
||||
--arg ENABLED_DISABLED "$ENABLED_DISABLED"\
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
--argjson HOSTS "$HOSTS" \
|
||||
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||
)
|
||||
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
||||
echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n"
|
||||
|
||||
@@ -26,14 +26,14 @@ catrustscript:
|
||||
GLOBALS: {{ GLOBALS }}
|
||||
{% endif %}
|
||||
|
||||
cacertz:
|
||||
elasticsearch_cacerts:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/ca/cacerts
|
||||
- source: salt://elasticsearch/cacerts
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
capemz:
|
||||
elasticsearch_capems:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/ca/tls-ca-bundle.pem
|
||||
- source: salt://elasticsearch/tls-ca-bundle.pem
|
||||
|
||||
@@ -5,11 +5,6 @@
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
include:
|
||||
- ssl
|
||||
- elasticsearch.ca
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
elasticsearch:
|
||||
enabled: false
|
||||
version: 8.18.8
|
||||
version: 9.0.8
|
||||
index_clean: true
|
||||
config:
|
||||
action:
|
||||
@@ -72,6 +72,8 @@ elasticsearch:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 0
|
||||
allocate:
|
||||
number_of_replicas: ""
|
||||
min_age: 60d
|
||||
delete:
|
||||
actions:
|
||||
@@ -84,11 +86,25 @@ elasticsearch:
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
forcemerge:
|
||||
max_num_segments: ""
|
||||
shrink:
|
||||
max_primary_shard_size: ""
|
||||
method: COUNT
|
||||
number_of_shards: ""
|
||||
min_age: 0ms
|
||||
warm:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 50
|
||||
forcemerge:
|
||||
max_num_segments: ""
|
||||
shrink:
|
||||
max_primary_shard_size: ""
|
||||
method: COUNT
|
||||
number_of_shards: ""
|
||||
allocate:
|
||||
number_of_replicas: ""
|
||||
min_age: 30d
|
||||
so-case:
|
||||
index_sorting: false
|
||||
@@ -245,7 +261,6 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
warm: 7
|
||||
so-detection:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -284,6 +299,19 @@ elasticsearch:
|
||||
hot:
|
||||
actions: {}
|
||||
min_age: 0ms
|
||||
sos-backup:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of: []
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- sos-backup-*
|
||||
priority: 501
|
||||
template:
|
||||
settings:
|
||||
index:
|
||||
number_of_replicas: 0
|
||||
number_of_shards: 1
|
||||
so-assistant-chat:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -584,7 +612,6 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
warm: 7
|
||||
so-import:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -830,53 +857,11 @@ elasticsearch:
|
||||
composed_of:
|
||||
- agent-mappings
|
||||
- dtc-agent-mappings
|
||||
- base-mappings
|
||||
- dtc-base-mappings
|
||||
- client-mappings
|
||||
- dtc-client-mappings
|
||||
- container-mappings
|
||||
- destination-mappings
|
||||
- dtc-destination-mappings
|
||||
- pb-override-destination-mappings
|
||||
- dll-mappings
|
||||
- dns-mappings
|
||||
- dtc-dns-mappings
|
||||
- ecs-mappings
|
||||
- dtc-ecs-mappings
|
||||
- error-mappings
|
||||
- event-mappings
|
||||
- dtc-event-mappings
|
||||
- file-mappings
|
||||
- dtc-file-mappings
|
||||
- group-mappings
|
||||
- host-mappings
|
||||
- dtc-host-mappings
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
- dtc-observer-mappings
|
||||
- organization-mappings
|
||||
- package-mappings
|
||||
- process-mappings
|
||||
- dtc-process-mappings
|
||||
- related-mappings
|
||||
- rule-mappings
|
||||
- dtc-rule-mappings
|
||||
- server-mappings
|
||||
- service-mappings
|
||||
- dtc-service-mappings
|
||||
- source-mappings
|
||||
- dtc-source-mappings
|
||||
- pb-override-source-mappings
|
||||
- threat-mappings
|
||||
- tls-mappings
|
||||
- url-mappings
|
||||
- user_agent-mappings
|
||||
- dtc-user_agent-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
data_stream:
|
||||
@@ -932,7 +917,6 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
warm: 7
|
||||
so-hydra:
|
||||
close: 30
|
||||
delete: 365
|
||||
@@ -1043,7 +1027,6 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
warm: 7
|
||||
so-lists:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -1127,6 +1110,8 @@ elasticsearch:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 0
|
||||
allocate:
|
||||
number_of_replicas: ""
|
||||
min_age: 60d
|
||||
delete:
|
||||
actions:
|
||||
@@ -1139,11 +1124,25 @@ elasticsearch:
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
forcemerge:
|
||||
max_num_segments: ""
|
||||
shrink:
|
||||
max_primary_shard_size: ""
|
||||
method: COUNT
|
||||
number_of_shards: ""
|
||||
min_age: 0ms
|
||||
warm:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 50
|
||||
allocate:
|
||||
number_of_replicas: ""
|
||||
forcemerge:
|
||||
max_num_segments: ""
|
||||
shrink:
|
||||
max_primary_shard_size: ""
|
||||
method: COUNT
|
||||
number_of_shards: ""
|
||||
min_age: 30d
|
||||
so-logs-detections_x_alerts:
|
||||
index_sorting: false
|
||||
@@ -3123,7 +3122,6 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
warm: 7
|
||||
so-logs-system_x_application:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
|
||||
|
||||
include:
|
||||
- ca
|
||||
- elasticsearch.ca
|
||||
- elasticsearch.ssl
|
||||
- elasticsearch.config
|
||||
- elasticsearch.sostatus
|
||||
|
||||
@@ -61,11 +64,7 @@ so-elasticsearch:
|
||||
- /nsm/elasticsearch:/usr/share/elasticsearch/data:rw
|
||||
- /opt/so/log/elasticsearch:/var/log/elasticsearch:rw
|
||||
- /opt/so/conf/ca/cacerts:/usr/share/elasticsearch/jdk/lib/security/cacerts:ro
|
||||
{% if GLOBALS.is_manager %}
|
||||
- /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro
|
||||
{% else %}
|
||||
- /etc/pki/tls/certs/intca.crt:/usr/share/elasticsearch/config/ca.crt:ro
|
||||
{% endif %}
|
||||
- /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro
|
||||
- /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro
|
||||
- /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro
|
||||
@@ -82,22 +81,21 @@ so-elasticsearch:
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
- file: cacertz
|
||||
- file: trusttheca
|
||||
- x509: elasticsearch_crt
|
||||
- x509: elasticsearch_key
|
||||
- file: elasticsearch_cacerts
|
||||
- file: esyml
|
||||
- require:
|
||||
- file: trusttheca
|
||||
- x509: elasticsearch_crt
|
||||
- x509: elasticsearch_key
|
||||
- file: elasticsearch_cacerts
|
||||
- file: esyml
|
||||
- file: eslog4jfile
|
||||
- file: nsmesdir
|
||||
- file: eslogdir
|
||||
- file: cacertz
|
||||
- x509: /etc/pki/elasticsearch.crt
|
||||
- x509: /etc/pki/elasticsearch.key
|
||||
- file: elasticp12perms
|
||||
{% if GLOBALS.is_manager %}
|
||||
- x509: pki_public_ca_crt
|
||||
{% else %}
|
||||
- x509: trusttheca
|
||||
{% endif %}
|
||||
- cmd: auth_users_roles_inode
|
||||
- cmd: auth_users_inode
|
||||
|
||||
|
||||
@@ -1,9 +1,90 @@
|
||||
{
|
||||
"description" : "kratos",
|
||||
"processors" : [
|
||||
{"set":{"field":"audience","value":"access","override":false,"ignore_failure":true}},
|
||||
{"set":{"field":"event.dataset","ignore_empty_value":true,"ignore_failure":true,"value":"kratos.{{{audience}}}","media_type":"text/plain"}},
|
||||
{"set":{"field":"event.action","ignore_failure":true,"copy_from":"msg" }},
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
"description": "kratos",
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "audience",
|
||||
"value": "access",
|
||||
"override": false,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.dataset",
|
||||
"ignore_empty_value": true,
|
||||
"ignore_failure": true,
|
||||
"value": "kratos.{{{audience}}}",
|
||||
"media_type": "text/plain"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.action",
|
||||
"ignore_failure": true,
|
||||
"copy_from": "msg"
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "http_request",
|
||||
"target_field": "http.request",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "http_response",
|
||||
"target_field": "http.response",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "http.request.path",
|
||||
"target_field": "http.uri",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "http.request.method",
|
||||
"target_field": "http.method",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "http.request.method",
|
||||
"target_field": "http.method",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "http.request.query",
|
||||
"target_field": "http.query",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "http.request.headers.user-agent",
|
||||
"target_field": "http.useragent",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "common"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -131,6 +131,47 @@ elasticsearch:
|
||||
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
shrink:
|
||||
method:
|
||||
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
|
||||
options:
|
||||
- COUNT
|
||||
- SIZE
|
||||
global: True
|
||||
advanced: True
|
||||
forcedType: string
|
||||
number_of_shards:
|
||||
title: shard count
|
||||
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
max_primary_shard_size:
|
||||
title: max shard size
|
||||
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
|
||||
regex: ^[0-9]+(?:gb|tb|pb)$
|
||||
global: True
|
||||
forcedType: string
|
||||
advanced: True
|
||||
allow_write_after_shrink:
|
||||
description: Allow writes after shrink.
|
||||
global: True
|
||||
forcedType: bool
|
||||
default: False
|
||||
advanced: True
|
||||
forcemerge:
|
||||
max_num_segments:
|
||||
description: Reduce the number of segments in each index shard and clean up deleted documents.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
index_codec:
|
||||
title: compression
|
||||
description: Use higher compression for stored fields at the cost of slower performance.
|
||||
forcedType: bool
|
||||
global: True
|
||||
default: False
|
||||
advanced: True
|
||||
cold:
|
||||
min_age:
|
||||
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier.
|
||||
@@ -144,6 +185,12 @@ elasticsearch:
|
||||
description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
allocate:
|
||||
number_of_replicas:
|
||||
description: Set the number of replicas. Remains the same as the previous phase by default.
|
||||
forcedType: int
|
||||
global: True
|
||||
advanced: True
|
||||
warm:
|
||||
min_age:
|
||||
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier.
|
||||
@@ -158,6 +205,52 @@ elasticsearch:
|
||||
forcedType: int
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
shrink:
|
||||
method:
|
||||
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
|
||||
options:
|
||||
- COUNT
|
||||
- SIZE
|
||||
global: True
|
||||
advanced: True
|
||||
number_of_shards:
|
||||
title: shard count
|
||||
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
max_primary_shard_size:
|
||||
title: max shard size
|
||||
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
|
||||
regex: ^[0-9]+(?:gb|tb|pb)$
|
||||
global: True
|
||||
forcedType: string
|
||||
advanced: True
|
||||
allow_write_after_shrink:
|
||||
description: Allow writes after shrink.
|
||||
global: True
|
||||
forcedType: bool
|
||||
default: False
|
||||
advanced: True
|
||||
forcemerge:
|
||||
max_num_segments:
|
||||
description: Reduce the number of segments in each index shard and clean up deleted documents.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
index_codec:
|
||||
title: compression
|
||||
description: Use higher compression for stored fields at the cost of slower performance.
|
||||
forcedType: bool
|
||||
global: True
|
||||
default: False
|
||||
advanced: True
|
||||
allocate:
|
||||
number_of_replicas:
|
||||
description: Set the number of replicas. Remains the same as the previous phase by default.
|
||||
forcedType: int
|
||||
global: True
|
||||
advanced: True
|
||||
delete:
|
||||
min_age:
|
||||
description: Minimum age of index. ex. 90d - This determines when the index should be deleted. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion.
|
||||
@@ -287,6 +380,47 @@ elasticsearch:
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
shrink:
|
||||
method:
|
||||
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
|
||||
options:
|
||||
- COUNT
|
||||
- SIZE
|
||||
global: True
|
||||
advanced: True
|
||||
forcedType: string
|
||||
number_of_shards:
|
||||
title: shard count
|
||||
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
max_primary_shard_size:
|
||||
title: max shard size
|
||||
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
|
||||
regex: ^[0-9]+(?:gb|tb|pb)$
|
||||
global: True
|
||||
forcedType: string
|
||||
advanced: True
|
||||
allow_write_after_shrink:
|
||||
description: Allow writes after shrink.
|
||||
global: True
|
||||
forcedType: bool
|
||||
default: False
|
||||
advanced: True
|
||||
forcemerge:
|
||||
max_num_segments:
|
||||
description: Reduce the number of segments in each index shard and clean up deleted documents.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
index_codec:
|
||||
title: compression
|
||||
description: Use higher compression for stored fields at the cost of slower performance.
|
||||
forcedType: bool
|
||||
global: True
|
||||
default: False
|
||||
advanced: True
|
||||
warm:
|
||||
min_age:
|
||||
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier.
|
||||
@@ -314,6 +448,52 @@ elasticsearch:
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
shrink:
|
||||
method:
|
||||
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
|
||||
options:
|
||||
- COUNT
|
||||
- SIZE
|
||||
global: True
|
||||
advanced: True
|
||||
number_of_shards:
|
||||
title: shard count
|
||||
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
max_primary_shard_size:
|
||||
title: max shard size
|
||||
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
|
||||
regex: ^[0-9]+(?:gb|tb|pb)$
|
||||
global: True
|
||||
forcedType: string
|
||||
advanced: True
|
||||
allow_write_after_shrink:
|
||||
description: Allow writes after shrink.
|
||||
global: True
|
||||
forcedType: bool
|
||||
default: False
|
||||
advanced: True
|
||||
forcemerge:
|
||||
max_num_segments:
|
||||
description: Reduce the number of segments in each index shard and clean up deleted documents.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
index_codec:
|
||||
title: compression
|
||||
description: Use higher compression for stored fields at the cost of slower performance.
|
||||
forcedType: bool
|
||||
global: True
|
||||
default: False
|
||||
advanced: True
|
||||
allocate:
|
||||
number_of_replicas:
|
||||
description: Set the number of replicas. Remains the same as the previous phase by default.
|
||||
forcedType: int
|
||||
global: True
|
||||
advanced: True
|
||||
cold:
|
||||
min_age:
|
||||
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier.
|
||||
@@ -330,6 +510,12 @@ elasticsearch:
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
allocate:
|
||||
number_of_replicas:
|
||||
description: Set the number of replicas. Remains the same as the previous phase by default.
|
||||
forcedType: int
|
||||
global: True
|
||||
advanced: True
|
||||
delete:
|
||||
min_age:
|
||||
description: Minimum age of index. ex. 90d - This determines when the index should be deleted. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion.
|
||||
|
||||
66
salt/elasticsearch/ssl.sls
Normal file
66
salt/elasticsearch/ssl.sls
Normal file
@@ -0,0 +1,66 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'ca/map.jinja' import CA %}
|
||||
|
||||
# Create a cert for elasticsearch
|
||||
elasticsearch_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/elasticsearch.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/elasticsearch.crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
elasticsearch_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/elasticsearch.crt
|
||||
- ca_server: {{ CA.server }}
|
||||
- signing_policy: registry
|
||||
- private_key: /etc/pki/elasticsearch.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
cmd.run:
|
||||
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nodes -passout pass:"
|
||||
- onchanges:
|
||||
- x509: /etc/pki/elasticsearch.key
|
||||
|
||||
elastickeyperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticsearch.key
|
||||
- mode: 640
|
||||
- group: 930
|
||||
|
||||
elasticp12perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticsearch.p12
|
||||
- mode: 640
|
||||
- group: 930
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -61,5 +61,55 @@
|
||||
{% do settings.index_template.template.settings.index.pop('sort') %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{# advanced ilm actions #}
|
||||
{% if settings.policy is defined and settings.policy.phases is defined %}
|
||||
{% set PHASE_NAMES = ["hot", "warm", "cold"] %}
|
||||
{% for P in PHASE_NAMES %}
|
||||
{% if settings.policy.phases[P] is defined and settings.policy.phases[P].actions is defined %}
|
||||
{% set PHASE = settings.policy.phases[P].actions %}
|
||||
{# remove allocate action if number_of_replicas isn't configured #}
|
||||
{% if PHASE.allocate is defined %}
|
||||
{% if PHASE.allocate.number_of_replicas is not defined or PHASE.allocate.number_of_replicas == "" %}
|
||||
{% do PHASE.pop('allocate', none) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{# start shrink action #}
|
||||
{% if PHASE.shrink is defined %}
|
||||
{% if PHASE.shrink.method is defined %}
|
||||
{% if PHASE.shrink.method == 'COUNT' and PHASE.shrink.number_of_shards is defined and PHASE.shrink.number_of_shards %}
|
||||
{# remove max_primary_shard_size value when doing shrink operation by count vs size #}
|
||||
{% do PHASE.shrink.pop('max_primary_shard_size', none) %}
|
||||
{% elif PHASE.shrink.method == 'SIZE' and PHASE.shrink.max_primary_shard_size is defined and PHASE.shrink.max_primary_shard_size %}
|
||||
{# remove number_of_shards value when doing shrink operation by size vs count #}
|
||||
{% do PHASE.shrink.pop('number_of_shards', none) %}
|
||||
{% else %}
|
||||
{# method isn't defined or missing a required config number_of_shards/max_primary_shard_size #}
|
||||
{% do PHASE.pop('shrink', none) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{# always remove shrink method since its only used for SOC config, not in the actual ilm policy #}
|
||||
{% if PHASE.shrink is defined %}
|
||||
{% do PHASE.shrink.pop('method', none) %}
|
||||
{% endif %}
|
||||
{# end shrink action #}
|
||||
{# start force merge #}
|
||||
{% if PHASE.forcemerge is defined %}
|
||||
{% if PHASE.forcemerge.index_codec is defined and PHASE.forcemerge.index_codec %}
|
||||
{% do PHASE.forcemerge.update({'index_codec': 'best_compression'}) %}
|
||||
{% else %}
|
||||
{% do PHASE.forcemerge.pop('index_codec', none) %}
|
||||
{% endif %}
|
||||
{% if PHASE.forcemerge.max_num_segments is not defined or not PHASE.forcemerge.max_num_segments %}
|
||||
{# max_num_segments is empty, drop it #}
|
||||
{% do PHASE.pop('forcemerge', none) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{# end force merge #}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% do ES_INDEX_SETTINGS.update({index | replace("_x_", "."): ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index]}) %}
|
||||
{% endfor %}
|
||||
|
||||
@@ -14,8 +14,9 @@ set -e
|
||||
# Check to see if we have extracted the ca cert.
|
||||
if [ ! -f /opt/so/saltstack/local/salt/elasticsearch/cacerts ]; then
|
||||
docker run -v /etc/pki/ca.crt:/etc/ssl/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:$ELASTIC_AGENT_TARBALL_VERSION -keystore /usr/share/elasticsearch/jdk/lib/security/cacerts -alias SOSCA -import -file /etc/ssl/ca.crt -storepass changeit -noprompt
|
||||
docker cp so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts
|
||||
docker cp so-elasticsearchca:/etc/ssl/certs/ca-certificates.crt /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
|
||||
# Make sure symbolic links are followed when copying from container
|
||||
docker cp -L so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts
|
||||
docker cp -L so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
|
||||
docker rm so-elasticsearchca
|
||||
echo "" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
|
||||
echo "sosca" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
|
||||
|
||||
@@ -121,7 +121,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
|
||||
echo "Loading Security Onion index templates..."
|
||||
shopt -s extglob
|
||||
{% if GLOBALS.role == 'so-heavynode' %}
|
||||
pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*)"
|
||||
pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*|*endpoint*|*elasticsearch*|*generic*|*fleet_server*|*soc*)"
|
||||
{% else %}
|
||||
pattern="*"
|
||||
{% endif %}
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
|
||||
include:
|
||||
- salt.minion
|
||||
- ssl
|
||||
|
||||
# Influx DB
|
||||
influxconfdir:
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
{% set TOKEN = salt['pillar.get']('influxdb:token') %}
|
||||
|
||||
include:
|
||||
- influxdb.ssl
|
||||
- influxdb.config
|
||||
- influxdb.sostatus
|
||||
|
||||
@@ -59,6 +60,8 @@ so-influxdb:
|
||||
{% endif %}
|
||||
- watch:
|
||||
- file: influxdbconf
|
||||
- x509: influxdb_key
|
||||
- x509: influxdb_crt
|
||||
- require:
|
||||
- file: influxdbconf
|
||||
- x509: influxdb_key
|
||||
|
||||
55
salt/influxdb/ssl.sls
Normal file
55
salt/influxdb/ssl.sls
Normal file
@@ -0,0 +1,55 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'ca/map.jinja' import CA %}
|
||||
|
||||
influxdb_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/influxdb.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/influxdb.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/influxdb.crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
# Create a cert for the talking to influxdb
|
||||
influxdb_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/influxdb.crt
|
||||
- ca_server: {{ CA.server }}
|
||||
- signing_policy: influxdb
|
||||
- private_key: /etc/pki/influxdb.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
influxkeyperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/influxdb.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -68,6 +68,8 @@ so-kafka:
|
||||
- file: kafka_server_jaas_properties
|
||||
{% endif %}
|
||||
- file: kafkacertz
|
||||
- x509: kafka_crt
|
||||
- file: kafka_pkcs12_perms
|
||||
- require:
|
||||
- file: kafkacertz
|
||||
|
||||
@@ -95,4 +97,4 @@ include:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -6,22 +6,13 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'ca/map.jinja' import CA %}
|
||||
{% set kafka_password = salt['pillar.get']('kafka:config:password') %}
|
||||
|
||||
include:
|
||||
- ca.dirs
|
||||
{% set global_ca_server = [] %}
|
||||
{% set x509dict = salt['mine.get'](GLOBALS.manager | lower~'*', 'x509.get_pem_entries') %}
|
||||
{% for host in x509dict %}
|
||||
{% if 'manager' in host.split('_')|last or host.split('_')|last == 'standalone' %}
|
||||
{% do global_ca_server.append(host) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% set ca_server = global_ca_server[0] %}
|
||||
- ca
|
||||
|
||||
{% if GLOBALS.pipeline == "KAFKA" %}
|
||||
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||
kafka_client_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/kafka-client.key
|
||||
@@ -39,12 +30,12 @@ kafka_client_key:
|
||||
kafka_client_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/kafka-client.crt
|
||||
- ca_server: {{ ca_server }}
|
||||
- ca_server: {{ CA.server }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- signing_policy: kafka
|
||||
- private_key: /etc/pki/kafka-client.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- days_remaining: 0
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
@@ -67,9 +58,9 @@ kafka_client_crt_perms:
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch','so-receiver', 'so-standalone'] %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch','so-receiver', 'so-standalone'] %}
|
||||
kafka_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/kafka.key
|
||||
@@ -87,12 +78,12 @@ kafka_key:
|
||||
kafka_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/kafka.crt
|
||||
- ca_server: {{ ca_server }}
|
||||
- ca_server: {{ CA.server }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- signing_policy: kafka
|
||||
- private_key: /etc/pki/kafka.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- days_remaining: 0
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
@@ -103,6 +94,7 @@ kafka_crt:
|
||||
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:{{ kafka_password }}"
|
||||
- onchanges:
|
||||
- x509: /etc/pki/kafka.key
|
||||
|
||||
kafka_key_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
@@ -126,11 +118,11 @@ kafka_pkcs12_perms:
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
# Standalone needs kafka-logstash for automated testing. Searchnode/manager search need it for logstash to consume from Kafka.
|
||||
# Manager will have cert, but be unused until a pipeline is created and logstash enabled.
|
||||
{% if GLOBALS.role in ['so-standalone', 'so-managersearch', 'so-searchnode', 'so-manager'] %}
|
||||
{% if GLOBALS.role in ['so-standalone', 'so-managersearch', 'so-searchnode', 'so-manager'] %}
|
||||
kafka_logstash_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/kafka-logstash.key
|
||||
@@ -148,12 +140,12 @@ kafka_logstash_key:
|
||||
kafka_logstash_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/kafka-logstash.crt
|
||||
- ca_server: {{ ca_server }}
|
||||
- ca_server: {{ CA.server }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- signing_policy: kafka
|
||||
- private_key: /etc/pki/kafka-logstash.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- days_remaining: 0
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
@@ -189,7 +181,6 @@ kafka_logstash_pkcs12_perms:
|
||||
- user: 931
|
||||
- group: 939
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
@@ -198,4 +189,4 @@ kafka_logstash_pkcs12_perms:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -25,11 +25,10 @@ kibana:
|
||||
discardCorruptObjects: "8.18.8"
|
||||
telemetry:
|
||||
enabled: False
|
||||
security:
|
||||
showInsecureClusterWarning: False
|
||||
xpack:
|
||||
security:
|
||||
secureCookies: true
|
||||
showInsecureClusterWarning: false
|
||||
reporting:
|
||||
kibanaServer:
|
||||
hostname: localhost
|
||||
|
||||
@@ -75,6 +75,7 @@ kratosconfig:
|
||||
- group: 928
|
||||
- mode: 600
|
||||
- template: jinja
|
||||
- show_changes: False
|
||||
- defaults:
|
||||
KRATOSMERGED: {{ KRATOSMERGED }}
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@ kratos:
|
||||
ui_url: https://URL_BASE/
|
||||
login:
|
||||
ui_url: https://URL_BASE/login/
|
||||
lifespan: 60m
|
||||
error:
|
||||
ui_url: https://URL_BASE/login/
|
||||
registration:
|
||||
|
||||
@@ -182,6 +182,10 @@ kratos:
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: kratos.html
|
||||
lifespan:
|
||||
description: Defines the duration that a login form will remain valid.
|
||||
global: True
|
||||
helpLink: kratos.html
|
||||
error:
|
||||
ui_url:
|
||||
description: User accessible URL containing the Security Onion login page. Leave as default to ensure proper operation.
|
||||
|
||||
@@ -10,11 +10,10 @@
|
||||
{% from 'logstash/map.jinja' import LOGSTASH_MERGED %}
|
||||
{% set ASSIGNED_PIPELINES = LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %}
|
||||
|
||||
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
|
||||
include:
|
||||
- ssl
|
||||
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
|
||||
- elasticsearch
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
# Create the logstash group
|
||||
logstashgroup:
|
||||
|
||||
@@ -63,7 +63,7 @@ logstash:
|
||||
settings:
|
||||
lsheap: 500m
|
||||
config:
|
||||
http_x_host: 0.0.0.0
|
||||
api_x_http_x_host: 0.0.0.0
|
||||
path_x_logs: /var/log/logstash
|
||||
pipeline_x_workers: 1
|
||||
pipeline_x_batch_x_size: 125
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
{% set lsheap = LOGSTASH_MERGED.settings.lsheap %}
|
||||
|
||||
include:
|
||||
- ca
|
||||
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
|
||||
- elasticsearch.ca
|
||||
{% endif %}
|
||||
@@ -20,9 +21,9 @@ include:
|
||||
- kafka.ca
|
||||
- kafka.ssl
|
||||
{% endif %}
|
||||
- logstash.ssl
|
||||
- logstash.config
|
||||
- logstash.sostatus
|
||||
- ssl
|
||||
|
||||
so-logstash:
|
||||
docker_container.running:
|
||||
@@ -65,22 +66,18 @@ so-logstash:
|
||||
- /opt/so/log/logstash:/var/log/logstash:rw
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
- /opt/so/conf/logstash/etc/certs:/usr/share/logstash/certs:ro
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
||||
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
|
||||
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
|
||||
{% endif %}
|
||||
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %}
|
||||
- /etc/pki/elasticfleet-logstash.crt:/usr/share/logstash/elasticfleet-logstash.crt:ro
|
||||
- /etc/pki/elasticfleet-logstash.key:/usr/share/logstash/elasticfleet-logstash.key:ro
|
||||
- /etc/pki/elasticfleet-lumberjack.crt:/usr/share/logstash/elasticfleet-lumberjack.crt:ro
|
||||
- /etc/pki/elasticfleet-lumberjack.key:/usr/share/logstash/elasticfleet-lumberjack.key:ro
|
||||
{% if GLOBALS.role != 'so-fleet' %}
|
||||
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
|
||||
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
|
||||
{% else %}
|
||||
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
|
||||
{% endif %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %}
|
||||
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
|
||||
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
|
||||
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
|
||||
{% endif %}
|
||||
@@ -100,11 +97,22 @@ so-logstash:
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-receiver'] %}
|
||||
- x509: etc_elasticfleet_logstash_key
|
||||
- x509: etc_elasticfleet_logstash_crt
|
||||
{% endif %}
|
||||
- file: lsetcsync
|
||||
- file: trusttheca
|
||||
{% if GLOBALS.is_manager %}
|
||||
- file: elasticsearch_cacerts
|
||||
- file: elasticsearch_capems
|
||||
{% endif %}
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %}
|
||||
- x509: etc_elasticfleet_logstash_crt
|
||||
- x509: etc_elasticfleet_logstash_key
|
||||
- x509: etc_elasticfleetlumberjack_crt
|
||||
- x509: etc_elasticfleetlumberjack_key
|
||||
{% if GLOBALS.role != 'so-fleet' %}
|
||||
- x509: etc_filebeat_crt
|
||||
- file: logstash_filebeat_p8
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% for assigned_pipeline in LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %}
|
||||
- file: ls_pipeline_{{assigned_pipeline}}
|
||||
{% for CONFIGFILE in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %}
|
||||
@@ -115,17 +123,20 @@ so-logstash:
|
||||
- file: kafkacertz
|
||||
{% endif %}
|
||||
- require:
|
||||
{% if grains['role'] in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
||||
- file: trusttheca
|
||||
{% if GLOBALS.is_manager %}
|
||||
- file: elasticsearch_cacerts
|
||||
- file: elasticsearch_capems
|
||||
{% endif %}
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %}
|
||||
- x509: etc_elasticfleet_logstash_crt
|
||||
- x509: etc_elasticfleet_logstash_key
|
||||
- x509: etc_elasticfleetlumberjack_crt
|
||||
- x509: etc_elasticfleetlumberjack_key
|
||||
{% if GLOBALS.role != 'so-fleet' %}
|
||||
- x509: etc_filebeat_crt
|
||||
{% endif %}
|
||||
{% if grains['role'] in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||
- x509: pki_public_ca_crt
|
||||
{% else %}
|
||||
- x509: trusttheca
|
||||
{% endif %}
|
||||
{% if grains.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||
- file: cacertz
|
||||
- file: capemz
|
||||
- file: logstash_filebeat_p8
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||
- file: kafkacertz
|
||||
|
||||
@@ -5,10 +5,10 @@ input {
|
||||
codec => es_bulk
|
||||
request_headers_target_field => client_headers
|
||||
remote_host_target_field => client_host
|
||||
ssl => true
|
||||
ssl_enabled => true
|
||||
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
|
||||
ssl_certificate => "/usr/share/logstash/filebeat.crt"
|
||||
ssl_key => "/usr/share/logstash/filebeat.key"
|
||||
ssl_verify_mode => "peer"
|
||||
ssl_client_authentication => "required"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,11 +2,11 @@ input {
|
||||
elastic_agent {
|
||||
port => 5055
|
||||
tags => [ "elastic-agent", "input-{{ GLOBALS.hostname }}" ]
|
||||
ssl => true
|
||||
ssl_enabled => true
|
||||
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
|
||||
ssl_certificate => "/usr/share/logstash/elasticfleet-logstash.crt"
|
||||
ssl_key => "/usr/share/logstash/elasticfleet-logstash.key"
|
||||
ssl_verify_mode => "force_peer"
|
||||
ssl_client_authentication => "required"
|
||||
ecs_compatibility => v8
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ input {
|
||||
elastic_agent {
|
||||
port => 5056
|
||||
tags => [ "elastic-agent", "fleet-lumberjack-input" ]
|
||||
ssl => true
|
||||
ssl_enabled => true
|
||||
ssl_certificate => "/usr/share/logstash/elasticfleet-lumberjack.crt"
|
||||
ssl_key => "/usr/share/logstash/elasticfleet-lumberjack.key"
|
||||
ecs_compatibility => v8
|
||||
|
||||
@@ -8,8 +8,8 @@ output {
|
||||
document_id => "%{[metadata][_id]}"
|
||||
index => "so-ip-mappings"
|
||||
silence_errors_in_log => ["version_conflict_engine_exception"]
|
||||
ssl => true
|
||||
ssl_certificate_verification => false
|
||||
ssl_enabled => true
|
||||
ssl_verification_mode => "none"
|
||||
}
|
||||
}
|
||||
else {
|
||||
@@ -25,8 +25,8 @@ output {
|
||||
document_id => "%{[metadata][_id]}"
|
||||
pipeline => "%{[metadata][pipeline]}"
|
||||
silence_errors_in_log => ["version_conflict_engine_exception"]
|
||||
ssl => true
|
||||
ssl_certificate_verification => false
|
||||
ssl_enabled => true
|
||||
ssl_verification_mode => "none"
|
||||
}
|
||||
}
|
||||
else {
|
||||
@@ -37,8 +37,8 @@ output {
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
pipeline => "%{[metadata][pipeline]}"
|
||||
ssl => true
|
||||
ssl_certificate_verification => false
|
||||
ssl_enabled => true
|
||||
ssl_verification_mode => "none"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -49,8 +49,8 @@ output {
|
||||
data_stream => true
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
ssl => true
|
||||
ssl_certificate_verification => false
|
||||
ssl_enabled => true
|
||||
ssl_verification_mode=> "none"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,8 +13,8 @@ output {
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
index => "endgame-%{+YYYY.MM.dd}"
|
||||
ssl => true
|
||||
ssl_certificate_verification => false
|
||||
ssl_enabled => true
|
||||
ssl_verification_mode => "none"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ logstash:
|
||||
helpLink: logstash.html
|
||||
global: False
|
||||
config:
|
||||
http_x_host:
|
||||
api_x_http_x_host:
|
||||
description: Host interface to listen to connections.
|
||||
helpLink: logstash.html
|
||||
readonly: True
|
||||
|
||||
287
salt/logstash/ssl.sls
Normal file
287
salt/logstash/ssl.sls
Normal file
@@ -0,0 +1,287 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls in allowed_states or sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
{% from 'ca/map.jinja' import CA %}
|
||||
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %}
|
||||
|
||||
{% if grains['role'] not in [ 'so-heavynode'] %}
|
||||
# Start -- Elastic Fleet Logstash Input Cert
|
||||
etc_elasticfleet_logstash_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/elasticfleet-logstash.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/elasticfleet-logstash.key') -%}
|
||||
- prereq:
|
||||
- x509: etc_elasticfleet_logstash_crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
etc_elasticfleet_logstash_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/elasticfleet-logstash.crt
|
||||
- ca_server: {{ CA.server }}
|
||||
- signing_policy: elasticfleet
|
||||
- private_key: /etc/pki/elasticfleet-logstash.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }},DNS:{{ GLOBALS.url_base }},IP:{{ GLOBALS.node_ip }}{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %},DNS:{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(',DNS:') }}{% endif %}
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
cmd.run:
|
||||
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-logstash.key -topk8 -out /etc/pki/elasticfleet-logstash.p8 -nocrypt"
|
||||
- onchanges:
|
||||
- x509: etc_elasticfleet_logstash_key
|
||||
|
||||
eflogstashperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-logstash.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
chownelasticfleetlogstashcrt:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-logstash.crt
|
||||
- mode: 640
|
||||
- user: 931
|
||||
- group: 939
|
||||
|
||||
chownelasticfleetlogstashkey:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-logstash.key
|
||||
- mode: 640
|
||||
- user: 931
|
||||
- group: 939
|
||||
# End -- Elastic Fleet Logstash Input Cert
|
||||
{% endif %} # endif is for not including HeavyNodes
|
||||
|
||||
# Start -- Elastic Fleet Node - Logstash Lumberjack Input / Output
|
||||
# Cert needed on: Managers, Receivers
|
||||
etc_elasticfleetlumberjack_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/elasticfleet-lumberjack.key
|
||||
- bits: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/elasticfleet-lumberjack.key') -%}
|
||||
- prereq:
|
||||
- x509: etc_elasticfleetlumberjack_crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
etc_elasticfleetlumberjack_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/elasticfleet-lumberjack.crt
|
||||
- ca_server: {{ CA.server }}
|
||||
- signing_policy: elasticfleet
|
||||
- private_key: /etc/pki/elasticfleet-lumberjack.key
|
||||
- CN: {{ GLOBALS.node_ip }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
cmd.run:
|
||||
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-lumberjack.key -topk8 -out /etc/pki/elasticfleet-lumberjack.p8 -nocrypt"
|
||||
- onchanges:
|
||||
- x509: etc_elasticfleetlumberjack_key
|
||||
|
||||
eflogstashlumberjackperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-lumberjack.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
chownilogstashelasticfleetlumberjackp8:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-lumberjack.p8
|
||||
- mode: 640
|
||||
- user: 931
|
||||
- group: 939
|
||||
|
||||
chownilogstashelasticfleetlogstashlumberjackcrt:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-lumberjack.crt
|
||||
- mode: 640
|
||||
- user: 931
|
||||
- group: 939
|
||||
|
||||
chownilogstashelasticfleetlogstashlumberjackkey:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-lumberjack.key
|
||||
- mode: 640
|
||||
- user: 931
|
||||
- group: 939
|
||||
# End -- Elastic Fleet Node - Logstash Lumberjack Input / Output
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-receiver'] %}
|
||||
etc_filebeat_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/filebeat.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/filebeat.key') -%}
|
||||
- prereq:
|
||||
- x509: etc_filebeat_crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
# Request a cert and drop it where it needs to go to be distributed
|
||||
etc_filebeat_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/filebeat.crt
|
||||
- ca_server: {{ CA.server }}
|
||||
- signing_policy: filebeat
|
||||
- private_key: /etc/pki/filebeat.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
cmd.run:
|
||||
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/filebeat.key -topk8 -out /etc/pki/filebeat.p8 -nocrypt"
|
||||
- onchanges:
|
||||
- x509: etc_filebeat_key
|
||||
|
||||
fbperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/filebeat.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
logstash_filebeat_p8:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/filebeat.p8
|
||||
- mode: 640
|
||||
- user: 931
|
||||
- group: 939
|
||||
|
||||
{% if grains.role not in ['so-heavynode', 'so-receiver'] %}
|
||||
# Create Symlinks to the keys so I can distribute it to all the things
|
||||
filebeatdir:
|
||||
file.directory:
|
||||
- name: /opt/so/saltstack/local/salt/filebeat/files
|
||||
- makedirs: True
|
||||
|
||||
fbkeylink:
|
||||
file.symlink:
|
||||
- name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.p8
|
||||
- target: /etc/pki/filebeat.p8
|
||||
- user: socore
|
||||
- group: socore
|
||||
|
||||
fbcrtlink:
|
||||
file.symlink:
|
||||
- name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.crt
|
||||
- target: /etc/pki/filebeat.crt
|
||||
- user: socore
|
||||
- group: socore
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-sensor', 'so-searchnode', 'so-heavynode', 'so-fleet', 'so-idh', 'so-receiver'] %}
|
||||
|
||||
fbcertdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/filebeat/etc/pki
|
||||
- makedirs: True
|
||||
|
||||
conf_filebeat_key:
|
||||
x509.private_key_managed:
|
||||
- name: /opt/so/conf/filebeat/etc/pki/filebeat.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/opt/so/conf/filebeat/etc/pki/filebeat.key') -%}
|
||||
- prereq:
|
||||
- x509: conf_filebeat_crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
# Request a cert and drop it where it needs to go to be distributed
|
||||
conf_filebeat_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /opt/so/conf/filebeat/etc/pki/filebeat.crt
|
||||
- ca_server: {{ CA.server }}
|
||||
- signing_policy: filebeat
|
||||
- private_key: /opt/so/conf/filebeat/etc/pki/filebeat.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
# Convert the key to pkcs#8 so logstash will work correctly.
|
||||
filebeatpkcs:
|
||||
cmd.run:
|
||||
- name: "/usr/bin/openssl pkcs8 -in /opt/so/conf/filebeat/etc/pki/filebeat.key -topk8 -out /opt/so/conf/filebeat/etc/pki/filebeat.p8 -passout pass:"
|
||||
- onchanges:
|
||||
- x509: conf_filebeat_key
|
||||
|
||||
filebeatkeyperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /opt/so/conf/filebeat/etc/pki/filebeat.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
chownfilebeatp8:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /opt/so/conf/filebeat/etc/pki/filebeat.p8
|
||||
- mode: 640
|
||||
- user: 931
|
||||
- group: 939
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,3 +1,8 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
elastic_curl_config_distributed:
|
||||
file.managed:
|
||||
- name: /opt/so/saltstack/local/salt/elasticsearch/curl.config
|
||||
|
||||
@@ -214,7 +214,7 @@ git_config_set_safe_dirs:
|
||||
|
||||
surinsmrulesdir:
|
||||
file.directory:
|
||||
- name: /nsm/rules/suricata
|
||||
- name: /nsm/rules/suricata/etopen
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
kibana_curl_config_distributed:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/kibana/curl.config
|
||||
@@ -5,4 +10,4 @@ kibana_curl_config_distributed:
|
||||
- template: jinja
|
||||
- mode: 600
|
||||
- show_changes: False
|
||||
- makedirs: True
|
||||
- makedirs: True
|
||||
|
||||
@@ -25,13 +25,11 @@
|
||||
{% set index_settings = es.get('index_settings', {}) %}
|
||||
{% set input = index_settings.get('so-logs', {}) %}
|
||||
{% for k in matched_integration_names %}
|
||||
{% if k not in index_settings %}
|
||||
{% set _ = index_settings.update({k: input}) %}
|
||||
{% endif %}
|
||||
{% do index_settings.update({k: input}) %}
|
||||
{% endfor %}
|
||||
{% for k in addon_integration_keys %}
|
||||
{% if k not in matched_integration_names and k in index_settings %}
|
||||
{% set _ = index_settings.pop(k) %}
|
||||
{% do index_settings.pop(k) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{{ data }}
|
||||
@@ -45,14 +43,12 @@
|
||||
{% set es = data.get('elasticsearch', {}) %}
|
||||
{% set index_settings = es.get('index_settings', {}) %}
|
||||
{% for k in matched_integration_names %}
|
||||
{% if k not in index_settings %}
|
||||
{% set input = ADDON_INTEGRATION_DEFAULTS[k] %}
|
||||
{% set _ = index_settings.update({k: input})%}
|
||||
{% endif %}
|
||||
{% set input = ADDON_INTEGRATION_DEFAULTS[k] %}
|
||||
{% do index_settings.update({k: input})%}
|
||||
{% endfor %}
|
||||
{% for k in addon_integration_keys %}
|
||||
{% if k not in matched_integration_names and k in index_settings %}
|
||||
{% set _ = index_settings.pop(k) %}
|
||||
{% do index_settings.pop(k) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{{ data }}
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
include:
|
||||
- elasticsearch.auth
|
||||
- kratos
|
||||
|
||||
@@ -133,7 +133,7 @@ function getinstallinfo() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
source <(echo $INSTALLVARS)
|
||||
export $(echo "$INSTALLVARS" | xargs)
|
||||
if [ $? -ne 0 ]; then
|
||||
log "ERROR" "Failed to source install variables"
|
||||
return 1
|
||||
@@ -716,6 +716,18 @@ function checkMine() {
|
||||
}
|
||||
}
|
||||
|
||||
function create_ca_pillar() {
|
||||
local capillar=/opt/so/saltstack/local/pillar/ca/init.sls
|
||||
printf '%s\n'\
|
||||
"ca:"\
|
||||
" server: $MINION_ID"\
|
||||
" " > $capillar
|
||||
if [ $? -ne 0 ]; then
|
||||
log "ERROR" "Failed to add $MINION_ID to $capillar"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function createEVAL() {
|
||||
log "INFO" "Creating EVAL configuration for minion $MINION_ID"
|
||||
is_pcaplimit=true
|
||||
@@ -1013,6 +1025,7 @@ function setupMinionFiles() {
|
||||
managers=("EVAL" "STANDALONE" "IMPORT" "MANAGER" "MANAGERSEARCH")
|
||||
if echo "${managers[@]}" | grep -qw "$NODETYPE"; then
|
||||
add_sensoroni_with_analyze_to_minion || return 1
|
||||
create_ca_pillar || return 1
|
||||
else
|
||||
add_sensoroni_to_minion || return 1
|
||||
fi
|
||||
|
||||
@@ -17,6 +17,7 @@ def showUsage(args):
|
||||
print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]), file=sys.stderr)
|
||||
print(' General commands:', file=sys.stderr)
|
||||
print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.', file=sys.stderr)
|
||||
print(' removelistitem - Remove a list item from a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.', file=sys.stderr)
|
||||
print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.', file=sys.stderr)
|
||||
print(' get - Displays (to stdout) the value stored in the given key. Requires KEY arg.', file=sys.stderr)
|
||||
print(' remove - Removes a yaml key, if it exists. Requires KEY arg.', file=sys.stderr)
|
||||
@@ -57,6 +58,24 @@ def appendItem(content, key, listItem):
|
||||
return 1
|
||||
|
||||
|
||||
def removeListItem(content, key, listItem):
|
||||
pieces = key.split(".", 1)
|
||||
if len(pieces) > 1:
|
||||
removeListItem(content[pieces[0]], pieces[1], listItem)
|
||||
else:
|
||||
try:
|
||||
if not isinstance(content[key], list):
|
||||
raise AttributeError("Value is not a list")
|
||||
if listItem in content[key]:
|
||||
content[key].remove(listItem)
|
||||
except (AttributeError, TypeError):
|
||||
print("The existing value for the given key is not a list. No action was taken on the file.", file=sys.stderr)
|
||||
return 1
|
||||
except KeyError:
|
||||
print("The key provided does not exist. No action was taken on the file.", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
|
||||
def convertType(value):
|
||||
if isinstance(value, str) and value.startswith("file:"):
|
||||
path = value[5:] # Remove "file:" prefix
|
||||
@@ -103,6 +122,23 @@ def append(args):
|
||||
return 0
|
||||
|
||||
|
||||
def removelistitem(args):
|
||||
if len(args) != 3:
|
||||
print('Missing filename, key arg, or list item to remove', file=sys.stderr)
|
||||
showUsage(None)
|
||||
return 1
|
||||
|
||||
filename = args[0]
|
||||
key = args[1]
|
||||
listItem = args[2]
|
||||
|
||||
content = loadYaml(filename)
|
||||
removeListItem(content, key, convertType(listItem))
|
||||
writeYaml(filename, content)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def addKey(content, key, value):
|
||||
pieces = key.split(".", 1)
|
||||
if len(pieces) > 1:
|
||||
@@ -211,6 +247,7 @@ def main():
|
||||
"help": showUsage,
|
||||
"add": add,
|
||||
"append": append,
|
||||
"removelistitem": removelistitem,
|
||||
"get": get,
|
||||
"remove": remove,
|
||||
"replace": replace,
|
||||
|
||||
@@ -457,3 +457,126 @@ class TestRemove(unittest.TestCase):
|
||||
self.assertEqual(result, 1)
|
||||
self.assertIn("Missing filename or key arg", mock_stderr.getvalue())
|
||||
sysmock.assert_called_once_with(1)
|
||||
|
||||
|
||||
class TestRemoveListItem(unittest.TestCase):
|
||||
|
||||
def test_removelistitem_missing_arg(self):
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stderr', new=StringIO()) as mock_stderr:
|
||||
sys.argv = ["cmd", "help"]
|
||||
soyaml.removelistitem(["file", "key"])
|
||||
sysmock.assert_called()
|
||||
self.assertIn("Missing filename, key arg, or list item to remove", mock_stderr.getvalue())
|
||||
|
||||
def test_removelistitem(self):
|
||||
filename = "/tmp/so-yaml_test-removelistitem.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}")
|
||||
file.close()
|
||||
|
||||
soyaml.removelistitem([filename, "key3", "b"])
|
||||
|
||||
file = open(filename, "r")
|
||||
actual = file.read()
|
||||
file.close()
|
||||
|
||||
expected = "key1:\n child1: 123\n child2: abc\nkey2: false\nkey3:\n- a\n- c\n"
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_removelistitem_nested(self):
|
||||
filename = "/tmp/so-yaml_test-removelistitem.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
soyaml.removelistitem([filename, "key1.child2", "b"])
|
||||
|
||||
file = open(filename, "r")
|
||||
actual = file.read()
|
||||
file.close()
|
||||
|
||||
expected = "key1:\n child1: 123\n child2:\n - a\n - c\nkey2: false\nkey3:\n- e\n- f\n- g\n"
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_removelistitem_nested_deep(self):
|
||||
filename = "/tmp/so-yaml_test-removelistitem.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
soyaml.removelistitem([filename, "key1.child2.deep2", "b"])
|
||||
|
||||
file = open(filename, "r")
|
||||
actual = file.read()
|
||||
file.close()
|
||||
|
||||
expected = "key1:\n child1: 123\n child2:\n deep1: 45\n deep2:\n - a\n - c\nkey2: false\nkey3:\n- e\n- f\n- g\n"
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_removelistitem_item_not_in_list(self):
|
||||
filename = "/tmp/so-yaml_test-removelistitem.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: [a,b,c]}")
|
||||
file.close()
|
||||
|
||||
soyaml.removelistitem([filename, "key1", "d"])
|
||||
|
||||
file = open(filename, "r")
|
||||
actual = file.read()
|
||||
file.close()
|
||||
|
||||
expected = "key1:\n- a\n- b\n- c\n"
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_removelistitem_key_noexist(self):
|
||||
filename = "/tmp/so-yaml_test-removelistitem.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stderr', new=StringIO()) as mock_stderr:
|
||||
sys.argv = ["cmd", "removelistitem", filename, "key4", "h"]
|
||||
soyaml.main()
|
||||
sysmock.assert_called()
|
||||
self.assertEqual("The key provided does not exist. No action was taken on the file.\n", mock_stderr.getvalue())
|
||||
|
||||
def test_removelistitem_key_noexist_deep(self):
|
||||
filename = "/tmp/so-yaml_test-removelistitem.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stderr', new=StringIO()) as mock_stderr:
|
||||
sys.argv = ["cmd", "removelistitem", filename, "key1.child2.deep3", "h"]
|
||||
soyaml.main()
|
||||
sysmock.assert_called()
|
||||
self.assertEqual("The key provided does not exist. No action was taken on the file.\n", mock_stderr.getvalue())
|
||||
|
||||
def test_removelistitem_key_nonlist(self):
|
||||
filename = "/tmp/so-yaml_test-removelistitem.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stderr', new=StringIO()) as mock_stderr:
|
||||
sys.argv = ["cmd", "removelistitem", filename, "key1", "h"]
|
||||
soyaml.main()
|
||||
sysmock.assert_called()
|
||||
self.assertEqual("The existing value for the given key is not a list. No action was taken on the file.\n", mock_stderr.getvalue())
|
||||
|
||||
def test_removelistitem_key_nonlist_deep(self):
|
||||
filename = "/tmp/so-yaml_test-removelistitem.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stderr', new=StringIO()) as mock_stderr:
|
||||
sys.argv = ["cmd", "removelistitem", filename, "key1.child2.deep1", "h"]
|
||||
soyaml.main()
|
||||
sysmock.assert_called()
|
||||
self.assertEqual("The existing value for the given key is not a list. No action was taken on the file.\n", mock_stderr.getvalue())
|
||||
|
||||
@@ -87,6 +87,12 @@ check_err() {
|
||||
113)
|
||||
echo 'No route to host'
|
||||
;;
|
||||
160)
|
||||
echo 'Incompatiable Elasticsearch upgrade'
|
||||
;;
|
||||
161)
|
||||
echo 'Required intermediate Elasticsearch upgrade not complete'
|
||||
;;
|
||||
*)
|
||||
echo 'Unhandled error'
|
||||
echo "$err_msg"
|
||||
@@ -319,6 +325,19 @@ clone_to_tmp() {
|
||||
fi
|
||||
}
|
||||
|
||||
# there is a function like this in so-minion, but we cannot source it since args required for so-minion
|
||||
create_ca_pillar() {
|
||||
local ca_pillar_dir="/opt/so/saltstack/local/pillar/ca"
|
||||
local ca_pillar_file="${ca_pillar_dir}/init.sls"
|
||||
|
||||
echo "Updating CA pillar configuration"
|
||||
mkdir -p "$ca_pillar_dir"
|
||||
echo "ca: {}" > "$ca_pillar_file"
|
||||
|
||||
so-yaml.py add "$ca_pillar_file" ca.server "$MINIONID"
|
||||
chown -R socore:socore "$ca_pillar_dir"
|
||||
}
|
||||
|
||||
disable_logstash_heavynodes() {
|
||||
c=0
|
||||
printf "\nChecking for heavynodes and disabling Logstash if they exist\n"
|
||||
@@ -362,7 +381,6 @@ masterlock() {
|
||||
echo "base:" > $TOPFILE
|
||||
echo " $MINIONID:" >> $TOPFILE
|
||||
echo " - ca" >> $TOPFILE
|
||||
echo " - ssl" >> $TOPFILE
|
||||
echo " - elasticsearch" >> $TOPFILE
|
||||
}
|
||||
|
||||
@@ -427,6 +445,8 @@ preupgrade_changes() {
|
||||
[[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180
|
||||
[[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190
|
||||
[[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200
|
||||
[[ "$INSTALLEDVERSION" == 2.4.200 ]] && up_to_2.4.201
|
||||
[[ "$INSTALLEDVERSION" == 2.4.201 ]] && up_to_2.4.210
|
||||
true
|
||||
}
|
||||
|
||||
@@ -441,24 +461,26 @@ postupgrade_changes() {
|
||||
[[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
|
||||
[[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30
|
||||
[[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40
|
||||
[[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50
|
||||
[[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60
|
||||
[[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
|
||||
[[ "$POSTVERSION" == 2.4.70 ]] && post_to_2.4.80
|
||||
[[ "$POSTVERSION" == 2.4.80 ]] && post_to_2.4.90
|
||||
[[ "$POSTVERSION" == 2.4.90 ]] && post_to_2.4.100
|
||||
[[ "$POSTVERSION" == 2.4.100 ]] && post_to_2.4.110
|
||||
[[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50
|
||||
[[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60
|
||||
[[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
|
||||
[[ "$POSTVERSION" == 2.4.70 ]] && post_to_2.4.80
|
||||
[[ "$POSTVERSION" == 2.4.80 ]] && post_to_2.4.90
|
||||
[[ "$POSTVERSION" == 2.4.90 ]] && post_to_2.4.100
|
||||
[[ "$POSTVERSION" == 2.4.100 ]] && post_to_2.4.110
|
||||
[[ "$POSTVERSION" == 2.4.110 ]] && post_to_2.4.111
|
||||
[[ "$POSTVERSION" == 2.4.111 ]] && post_to_2.4.120
|
||||
[[ "$POSTVERSION" == 2.4.120 ]] && post_to_2.4.130
|
||||
[[ "$POSTVERSION" == 2.4.130 ]] && post_to_2.4.140
|
||||
[[ "$POSTVERSION" == 2.4.140 ]] && post_to_2.4.141
|
||||
[[ "$POSTVERSION" == 2.4.141 ]] && post_to_2.4.150
|
||||
[[ "$POSTVERSION" == 2.4.150 ]] && post_to_2.4.160
|
||||
[[ "$POSTVERSION" == 2.4.160 ]] && post_to_2.4.170
|
||||
[[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180
|
||||
[[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190
|
||||
[[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200
|
||||
[[ "$POSTVERSION" == 2.4.111 ]] && post_to_2.4.120
|
||||
[[ "$POSTVERSION" == 2.4.120 ]] && post_to_2.4.130
|
||||
[[ "$POSTVERSION" == 2.4.130 ]] && post_to_2.4.140
|
||||
[[ "$POSTVERSION" == 2.4.140 ]] && post_to_2.4.141
|
||||
[[ "$POSTVERSION" == 2.4.141 ]] && post_to_2.4.150
|
||||
[[ "$POSTVERSION" == 2.4.150 ]] && post_to_2.4.160
|
||||
[[ "$POSTVERSION" == 2.4.160 ]] && post_to_2.4.170
|
||||
[[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180
|
||||
[[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190
|
||||
[[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200
|
||||
[[ "$POSTVERSION" == 2.4.200 ]] && post_to_2.4.201
|
||||
[[ "$POSTVERSION" == 2.4.201 ]] && post_to_2.4.210
|
||||
true
|
||||
}
|
||||
|
||||
@@ -615,9 +637,6 @@ post_to_2.4.180() {
|
||||
}
|
||||
|
||||
post_to_2.4.190() {
|
||||
echo "Regenerating Elastic Agent Installers"
|
||||
/sbin/so-elastic-agent-gen-installers
|
||||
|
||||
# Only need to update import / eval nodes
|
||||
if [[ "$MINION_ROLE" == "import" ]] || [[ "$MINION_ROLE" == "eval" ]]; then
|
||||
update_import_fleet_output
|
||||
@@ -645,6 +664,22 @@ post_to_2.4.200() {
|
||||
POSTVERSION=2.4.200
|
||||
}
|
||||
|
||||
post_to_2.4.201() {
|
||||
echo "Nothing to apply"
|
||||
POSTVERSION=2.4.201
|
||||
}
|
||||
|
||||
post_to_2.4.210() {
|
||||
echo "Rolling over Kratos index to apply new index template"
|
||||
|
||||
rollover_index "logs-kratos-so"
|
||||
|
||||
echo "Regenerating Elastic Agent Installers"
|
||||
/sbin/so-elastic-agent-gen-installers
|
||||
|
||||
POSTVERSION=2.4.210
|
||||
}
|
||||
|
||||
repo_sync() {
|
||||
echo "Sync the local repo."
|
||||
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
|
||||
@@ -906,9 +941,7 @@ up_to_2.4.180() {
|
||||
}
|
||||
|
||||
up_to_2.4.190() {
|
||||
# Elastic Update for this release, so download Elastic Agent files
|
||||
determine_elastic_agent_upgrade
|
||||
|
||||
echo "Nothing to do for 2.4.190"
|
||||
INSTALLEDVERSION=2.4.190
|
||||
}
|
||||
|
||||
@@ -916,9 +949,25 @@ up_to_2.4.200() {
|
||||
echo "Backing up idstools config..."
|
||||
suricata_idstools_removal_pre
|
||||
|
||||
touch /opt/so/state/esfleet_logstash_config_pillar
|
||||
|
||||
INSTALLEDVERSION=2.4.200
|
||||
}
|
||||
|
||||
up_to_2.4.201() {
|
||||
echo "Nothing to do for 2.4.201"
|
||||
|
||||
INSTALLEDVERSION=2.4.201
|
||||
}
|
||||
|
||||
up_to_2.4.210() {
|
||||
# Elastic Update for this release, so download Elastic Agent files
|
||||
determine_elastic_agent_upgrade
|
||||
create_ca_pillar
|
||||
|
||||
INSTALLEDVERSION=2.4.210
|
||||
}
|
||||
|
||||
add_hydra_pillars() {
|
||||
mkdir -p /opt/so/saltstack/local/pillar/hydra
|
||||
touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
|
||||
@@ -1111,47 +1160,47 @@ suricata_idstools_removal_pre() {
|
||||
install -d -o 939 -g 939 -m 755 /opt/so/conf/soc/fingerprints
|
||||
install -o 939 -g 939 -m 644 /dev/null /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
|
||||
cat > /opt/so/conf/soc/fingerprints/suricataengine.syncBlock << EOF
|
||||
Suricata ruleset sync is blocked until this file is removed. Make sure that you have manually added any custom Suricata rulesets via SOC config - review the documentation for more details: securityonion.net/docs
|
||||
Suricata ruleset sync is blocked until this file is removed. **CRITICAL** Make sure that you have manually added any custom Suricata rulesets via SOC config before removing this file - review the documentation for more details: https://docs.securityonion.net/en/2.4/nids.html#sync-block
|
||||
EOF
|
||||
|
||||
# Remove possible symlink & create salt local rules dir
|
||||
[ -L /opt/so/saltstack/local/salt/suricata/rules ] && rm -f /opt/so/saltstack/local/salt/suricata/rules
|
||||
install -d -o 939 -g 939 /opt/so/saltstack/local/salt/suricata/rules/ || echo "Failed to create Suricata local rules directory"
|
||||
|
||||
# Backup custom rules & overrides
|
||||
mkdir -p /nsm/backup/detections-migration/2-4-200
|
||||
cp /usr/sbin/so-rule-update /nsm/backup/detections-migration/2-4-200
|
||||
cp /opt/so/conf/idstools/etc/rulecat.conf /nsm/backup/detections-migration/2-4-200
|
||||
|
||||
if [[ -f /opt/so/conf/soc/so-detections-backup.py ]]; then
|
||||
python3 /opt/so/conf/soc/so-detections-backup.py
|
||||
# Backup so-detection index via reindex
|
||||
echo "Creating sos-backup index template..."
|
||||
template_result=$(/sbin/so-elasticsearch-query '_index_template/sos-backup' -X PUT \
|
||||
--retry 5 --retry-delay 15 --retry-all-errors \
|
||||
-d '{"index_patterns":["sos-backup-*"],"priority":501,"template":{"settings":{"index":{"number_of_replicas":0,"number_of_shards":1}}}}')
|
||||
|
||||
# Verify backup by comparing counts
|
||||
echo "Verifying detection overrides backup..."
|
||||
es_override_count=$(/sbin/so-elasticsearch-query 'so-detection/_count' \
|
||||
-d '{"query": {"bool": {"must": [{"exists": {"field": "so_detection.overrides"}}]}}}' | jq -r '.count') || {
|
||||
echo " Error: Failed to query Elasticsearch for override count"
|
||||
exit 1
|
||||
}
|
||||
if [[ -z "$template_result" ]] || ! echo "$template_result" | jq -e '.acknowledged == true' > /dev/null 2>&1; then
|
||||
echo "Error: Failed to create sos-backup index template"
|
||||
echo "$template_result"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! "$es_override_count" =~ ^[0-9]+$ ]]; then
|
||||
echo " Error: Invalid override count from Elasticsearch: '$es_override_count'"
|
||||
exit 1
|
||||
fi
|
||||
BACKUP_INDEX="sos-backup-detection-$(date +%Y%m%d-%H%M%S)"
|
||||
echo "Backing up so-detection index to $BACKUP_INDEX..."
|
||||
reindex_result=$(/sbin/so-elasticsearch-query '_reindex?wait_for_completion=true' \
|
||||
--retry 5 --retry-delay 15 --retry-all-errors \
|
||||
-X POST -d "{\"source\": {\"index\": \"so-detection\"}, \"dest\": {\"index\": \"$BACKUP_INDEX\"}}")
|
||||
|
||||
backup_override_count=$(find /nsm/backup/detections/repo/*/overrides -type f 2>/dev/null | wc -l)
|
||||
|
||||
echo " Elasticsearch overrides: $es_override_count"
|
||||
echo " Backed up overrides: $backup_override_count"
|
||||
|
||||
if [[ "$es_override_count" -gt 0 ]]; then
|
||||
if [[ "$backup_override_count" -gt 0 ]]; then
|
||||
echo " Override backup verified successfully"
|
||||
else
|
||||
echo " Error: Elasticsearch has $es_override_count overrides but backup has 0 files"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo " No overrides to backup"
|
||||
fi
|
||||
if [[ -z "$reindex_result" ]]; then
|
||||
echo "Error: Backup of detections failed - no response from Elasticsearch"
|
||||
exit 1
|
||||
elif echo "$reindex_result" | jq -e '.created >= 0' > /dev/null 2>&1; then
|
||||
echo "Backup complete: $(echo "$reindex_result" | jq -r '.created') documents copied"
|
||||
elif echo "$reindex_result" | grep -q "index_not_found_exception"; then
|
||||
echo "so-detection index does not exist, skipping backup"
|
||||
else
|
||||
echo "SOC Detections backup script not found, skipping detection backup"
|
||||
echo "Error: Backup of detections failed"
|
||||
echo "$reindex_result"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
}
|
||||
@@ -1172,21 +1221,47 @@ hash_normalized_file() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
sed -E \
|
||||
# Ensure trailing newline for consistent hashing regardless of source file
|
||||
{ sed -E \
|
||||
-e 's/^[[:space:]]+//; s/[[:space:]]+$//' \
|
||||
-e '/^$/d' \
|
||||
-e 's|--url=http://[^:]+:7788|--url=http://MANAGER:7788|' \
|
||||
"$file" | sha256sum | awk '{print $1}'
|
||||
"$file"; echo; } | sed '/^$/d' | sha256sum | awk '{print $1}'
|
||||
}
|
||||
|
||||
# Known-default hashes
|
||||
# Known-default hashes for so-rule-update (ETOPEN ruleset)
|
||||
KNOWN_SO_RULE_UPDATE_HASHES=(
|
||||
"8f1fe1cb65c08aab78830315b952785c7ccdcc108c5c0474f427e29d4e39ee5f" # non-Airgap
|
||||
"d23ac5a962c709dcb888103effb71444df72b46009b6c426e280dbfbc7d74d40" # Airgap
|
||||
# 2.4.100+ (suricata 7.0.3, non-airgap)
|
||||
"5fbd067ced86c8ec72ffb7e1798aa624123b536fb9d78f4b3ad8d3b45db1eae7" # 2.4.100-2.4.190 non-Airgap
|
||||
# 2.4.90+ airgap (same for 2.4.90 and 2.4.100+)
|
||||
"61f632c55791338c438c071040f1490066769bcce808b595b5cc7974a90e653a" # 2.4.90+ Airgap
|
||||
# 2.4.90 (suricata 6.0, non-airgap, comment inside proxy block)
|
||||
"0380ec52a05933244ab0f0bc506576e1d838483647b40612d5fe4b378e47aedd" # 2.4.90 non-Airgap
|
||||
# 2.4.10-2.4.80 (suricata 6.0, non-airgap, comment outside proxy block)
|
||||
"b6e4d1b5a78d57880ad038a9cd2cc6978aeb2dd27d48ea1a44dd866a2aee7ff4" # 2.4.10-2.4.80 non-Airgap
|
||||
# 2.4.10-2.4.80 airgap
|
||||
"b20146526ace2b142fde4664f1386a9a1defa319b3a1d113600ad33a1b037dad" # 2.4.10-2.4.80 Airgap
|
||||
# 2.4.5 and earlier (no pidof check, non-airgap)
|
||||
"d04f5e4015c348133d28a7840839e82d60009781eaaa1c66f7f67747703590dc" # 2.4.5 non-Airgap
|
||||
)
|
||||
|
||||
# Known-default hashes for rulecat.conf
|
||||
KNOWN_RULECAT_CONF_HASHES=(
|
||||
"17fc663a83b30d4ba43ac6643666b0c96343c5ea6ea833fe6a8362fe415b666b" # default
|
||||
# 2.4.100+ (suricata 7.0.3)
|
||||
"302e75dca9110807f09ade2eec3be1fcfc8b2bf6cf2252b0269bb72efeefe67e" # 2.4.100-2.4.190 without SURICATA md_engine
|
||||
"8029b7718c324a9afa06a5cf180afde703da1277af4bdd30310a6cfa3d6398cb" # 2.4.100-2.4.190 with SURICATA md_engine
|
||||
# 2.4.80-2.4.90 (suricata 6.0, with --suricata-version and --output)
|
||||
"4d8b318e6950a6f60b02f307cf27c929efd39652990c1bd0c8820aa8a307e1e7" # 2.4.80-2.4.90 without SURICATA md_engine
|
||||
"a1ddf264c86c4e91c81c5a317f745a19466d4311e4533ec3a3c91fed04c11678" # 2.4.80-2.4.90 with SURICATA md_engine
|
||||
# 2.4.50-2.4.70 (/suri/ path, no --suricata-version)
|
||||
"86e3afb8d0f00c62337195602636864c98580a13ca9cc85029661a539deae6ae" # 2.4.50-2.4.70 without SURICATA md_engine
|
||||
"5a97604ca5b820a10273a2d6546bb5e00c5122ca5a7dfe0ba0bfbce5fc026f4b" # 2.4.50-2.4.70 with SURICATA md_engine
|
||||
# 2.4.20-2.4.40 (/nids/ path without /suri/)
|
||||
"d098ea9ecd94b5cca35bf33543f8ea8f48066a0785221fabda7fef43d2462c29" # 2.4.20-2.4.40 without SURICATA md_engine
|
||||
"9dbc60df22ae20d65738ba42e620392577857038ba92278e23ec182081d191cd" # 2.4.20-2.4.40 with SURICATA md_engine
|
||||
# 2.4.5-2.4.10 (/sorules/ path for extraction/filters)
|
||||
"490f6843d9fca759ee74db3ada9c702e2440b8393f2cfaf07bbe41aaa6d955c3" # 2.4.5-2.4.10 with SURICATA md_engine
|
||||
# Note: 2.4.5-2.4.10 without SURICATA md_engine has same hash as 2.4.20-2.4.40 without SURICATA md_engine
|
||||
)
|
||||
|
||||
# Check a config file against known hashes
|
||||
@@ -1247,6 +1322,13 @@ custom_found=0
|
||||
check_config_file "$SO_RULE_UPDATE" "KNOWN_SO_RULE_UPDATE_HASHES" || custom_found=1
|
||||
check_config_file "$RULECAT_CONF" "KNOWN_RULECAT_CONF_HASHES" || custom_found=1
|
||||
|
||||
# Check for ETPRO rules on airgap systems
|
||||
if [[ $is_airgap -eq 0 ]] && grep -q 'ETPRO ' /nsm/rules/suricata/emerging-all.rules 2>/dev/null; then
|
||||
echo "ETPRO rules detected on airgap system - custom configuration"
|
||||
echo "ETPRO rules detected on Airgap in /nsm/rules/suricata/emerging-all.rules" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
|
||||
custom_found=1
|
||||
fi
|
||||
|
||||
# If no custom configs found, remove syncBlock
|
||||
if [[ $custom_found -eq 0 ]]; then
|
||||
echo "idstools migration completed successfully - removing Suricata engine syncBlock"
|
||||
@@ -1270,8 +1352,15 @@ if [ -n "$(docker ps -q -f name=^so-idstools$)" ]; then
|
||||
fi
|
||||
|
||||
echo "Removing idstools symlink and scripts..."
|
||||
rm /opt/so/saltstack/local/salt/suricata/rules
|
||||
rm -rf /usr/sbin/so-idstools*
|
||||
sed -i '/^#\?so-idstools$/d' /opt/so/conf/so-status/so-status.conf
|
||||
crontab -l | grep -v 'so-rule-update' | crontab -
|
||||
|
||||
# Backup the salt master config & manager pillar before editing it
|
||||
cp /opt/so/saltstack/local/pillar/minions/$MINIONID.sls /nsm/backup/detections-migration/2-4-200/
|
||||
cp /etc/salt/master /nsm/backup/detections-migration/2-4-200/
|
||||
so-yaml.py remove /opt/so/saltstack/local/pillar/minions/$MINIONID.sls idstools
|
||||
so-yaml.py removelistitem /etc/salt/master file_roots.base /opt/so/rules/nids
|
||||
|
||||
}
|
||||
|
||||
@@ -1321,7 +1410,7 @@ unmount_update() {
|
||||
|
||||
update_airgap_rules() {
|
||||
# Copy the rules over to update them for airgap.
|
||||
rsync -a $UPDATE_DIR/agrules/suricata/* /nsm/rules/suricata/
|
||||
rsync -a --delete $UPDATE_DIR/agrules/suricata/ /nsm/rules/suricata/etopen/
|
||||
rsync -a $UPDATE_DIR/agrules/detect-sigma/* /nsm/rules/detect-sigma/
|
||||
rsync -a $UPDATE_DIR/agrules/detect-yara/* /nsm/rules/detect-yara/
|
||||
# Copy the securityonion-resorces repo over for SOC Detection Summaries and checkout the published summaries branch
|
||||
@@ -1570,6 +1659,252 @@ verify_latest_update_script() {
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
verify_es_version_compatibility() {
|
||||
|
||||
local es_required_version_statefile="/opt/so/state/so_es_required_upgrade_version.txt"
|
||||
local es_verification_script="/tmp/so_intermediate_upgrade_verification.sh"
|
||||
# supported upgrade paths for SO-ES versions
|
||||
declare -A es_upgrade_map=(
|
||||
["8.14.3"]="8.17.3 8.18.4 8.18.6 8.18.8"
|
||||
["8.17.3"]="8.18.4 8.18.6 8.18.8"
|
||||
["8.18.4"]="8.18.6 8.18.8 9.0.8"
|
||||
["8.18.6"]="8.18.8 9.0.8"
|
||||
["8.18.8"]="9.0.8"
|
||||
)
|
||||
|
||||
# Elasticsearch MUST upgrade through these versions
|
||||
declare -A es_to_so_version=(
|
||||
["8.18.8"]="2.4.190-20251024"
|
||||
)
|
||||
|
||||
# Get current Elasticsearch version
|
||||
if es_version_raw=$(so-elasticsearch-query / --fail --retry 5 --retry-delay 10); then
|
||||
es_version=$(echo "$es_version_raw" | jq -r '.version.number' )
|
||||
else
|
||||
echo "Could not determine current Elasticsearch version to validate compatibility with post soup Elasticsearch version."
|
||||
exit 160
|
||||
fi
|
||||
|
||||
if ! target_es_version=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version | sed -n '1p'); then
|
||||
# so-yaml.py failed to get the ES version from upgrade versions elasticsearch/defaults.yaml file. Likely they are upgrading to an SO version older than 2.4.110 prior to the ES version pinning and should be OKAY to continue with the upgrade.
|
||||
|
||||
# if so-yaml.py failed to get the ES version AND the version we are upgrading to is newer than 2.4.110 then we should bail
|
||||
if [[ $(cat $UPDATE_DIR/VERSION | cut -d'.' -f3) > 110 ]]; then
|
||||
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"
|
||||
exit 160
|
||||
fi
|
||||
|
||||
# allow upgrade to version < 2.4.110 without checking ES version compatibility
|
||||
return 0
|
||||
|
||||
fi
|
||||
|
||||
# if this statefile exists then we have done an intermediate upgrade and we need to ensure that ALL ES nodes have been upgraded to the version in the statefile before allowing soup to continue
|
||||
if [[ -f "$es_required_version_statefile" ]]; then
|
||||
# required so verification script should have already been created
|
||||
if [[ ! -f "$es_verification_script" ]]; then
|
||||
create_intermediate_upgrade_verification_script $es_verification_script
|
||||
fi
|
||||
|
||||
local es_required_version_statefile_value=$(cat $es_required_version_statefile)
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
echo "A previously required intermediate Elasticsearch upgrade was detected. Verifying that all Searchnodes/Heavynodes have successfully upgraded Elasticsearch to $es_required_version_statefile_value before proceeding with soup to avoid potential data loss!"
|
||||
# create script using version in statefile
|
||||
timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$es_required_version_statefile"
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
|
||||
|
||||
echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!"
|
||||
|
||||
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
|
||||
exit 161
|
||||
fi
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
fi
|
||||
|
||||
if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then
|
||||
# supported upgrade
|
||||
return 0
|
||||
else
|
||||
compatible_versions=${es_upgrade_map[$es_version]}
|
||||
if [[ -z "$compatible_versions" ]]; then
|
||||
# If current ES version is not explicitly defined in the upgrade map, we know they have an intermediate upgrade to do.
|
||||
# We default to the lowest ES version defined in es_to_so_version as $first_es_required_version
|
||||
local first_es_required_version=$(printf '%s\n' "${!es_to_so_version[@]}" | sort -V | head -n1)
|
||||
next_step_so_version=${es_to_so_version[$first_es_required_version]}
|
||||
required_es_upgrade_version="$first_es_required_version"
|
||||
else
|
||||
next_step_so_version=${es_to_so_version[${compatible_versions##* }]}
|
||||
required_es_upgrade_version="${compatible_versions##* }"
|
||||
fi
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
echo -e "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version $next_step_so_version before updating to $(cat $UPDATE_DIR/VERSION).\n"
|
||||
|
||||
echo "$required_es_upgrade_version" > "$es_required_version_statefile"
|
||||
|
||||
# We expect to upgrade to the latest compatiable minor version of ES
|
||||
create_intermediate_upgrade_verification_script $es_verification_script
|
||||
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
echo "You can download the $next_step_so_version ISO image from https://download.securityonion.net/file/securityonion/securityonion-$next_step_so_version.iso"
|
||||
echo "*** Once you have updated to $next_step_so_version, you can then run soup again to update to $(cat $UPDATE_DIR/VERSION). ***"
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
exit 160
|
||||
else
|
||||
# preserve BRANCH value if set originally
|
||||
if [[ -n "$BRANCH" ]]; then
|
||||
local originally_requested_so_version="$BRANCH"
|
||||
else
|
||||
local originally_requested_so_version="2.4/main"
|
||||
fi
|
||||
|
||||
echo "Starting automated intermediate upgrade to $next_step_so_version."
|
||||
echo "After completion, the system will automatically attempt to upgrade to the latest version."
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
exec bash -c "BRANCH=$next_step_so_version soup -y && BRANCH=$next_step_so_version soup -y && \
|
||||
echo -e \"\n##############################################################################################################################\n\" && \
|
||||
echo -e \"Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n\" \
|
||||
&& timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
|
||||
echo -e \"\n##############################################################################################################################\n\" \
|
||||
&& BRANCH=$originally_requested_so_version soup -y && BRANCH=$originally_requested_so_version soup -y"
|
||||
fi
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
create_intermediate_upgrade_verification_script() {
|
||||
# After an intermediate upgrade, verify that ALL nodes running Elasticsearch are at the expected version BEFORE proceeding to the next upgrade step. This is a CRITICAL step
|
||||
local verification_script="$1"
|
||||
|
||||
cat << 'EOF' > "$verification_script"
|
||||
#!/bin/bash
|
||||
|
||||
SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE="/root/so_intermediate_upgrade_verification_failures.log"
|
||||
CURRENT_TIME=$(date +%Y%m%d.%H%M%S)
|
||||
EXPECTED_ES_VERSION="$1"
|
||||
|
||||
if [[ -z "$EXPECTED_ES_VERSION" ]]; then
|
||||
echo -e "\nExpected Elasticsearch version not provided. Usage: $0 <expected_es_version>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -f "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" ]]; then
|
||||
mv "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE.$CURRENT_TIME"
|
||||
fi
|
||||
|
||||
check_heavynodes_es_version() {
|
||||
# Check if heavynodes are in this grid
|
||||
if ! salt-key -l accepted | grep -q 'heavynode$'; then
|
||||
|
||||
# No heavynodes, skip version check
|
||||
echo "No heavynodes detected in this Security Onion deployment. Skipping heavynode Elasticsearch version verification."
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo -e "\nOne or more heavynodes detected. Verifying their Elasticsearch versions."
|
||||
|
||||
local retries=20
|
||||
local retry_count=0
|
||||
local delay=180
|
||||
|
||||
while [[ $retry_count -lt $retries ]]; do
|
||||
# keep stderr with variable for logging
|
||||
heavynode_versions=$(salt -C 'G@role:so-heavynode' cmd.run 'so-elasticsearch-query / --retry 3 --retry-delay 10 | jq ".version.number"' shell=/bin/bash --out=json 2> /dev/null)
|
||||
local exit_status=$?
|
||||
|
||||
# Check that all heavynodes returned good data
|
||||
if [[ $exit_status -ne 0 ]]; then
|
||||
echo "Failed to retrieve Elasticsearch version from one or more heavynodes... Retrying in $delay seconds. Attempt $((retry_count + 1)) of $retries."
|
||||
((retry_count++))
|
||||
sleep $delay
|
||||
|
||||
continue
|
||||
else
|
||||
if echo "$heavynode_versions" | jq -s --arg expected "\"$EXPECTED_ES_VERSION\"" --exit-status 'all(.[]; . | to_entries | all(.[]; .value == $expected))' > /dev/null; then
|
||||
echo -e "\nAll heavynodes are at the expected Elasticsearch version $EXPECTED_ES_VERSION."
|
||||
|
||||
return 0
|
||||
else
|
||||
echo "One or more heavynodes are not at the expected Elasticsearch version $EXPECTED_ES_VERSION. Rechecking in $delay seconds. Attempt $((retry_count + 1)) of $retries."
|
||||
((retry_count++))
|
||||
sleep $delay
|
||||
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
|
||||
echo "One or more heavynodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION."
|
||||
echo "Current versions:"
|
||||
echo "$heavynode_versions" | jq -s 'add'
|
||||
echo "$heavynode_versions" | jq -s 'add' >> "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
|
||||
echo -e "\n Stopping automatic upgrade to latest Security Onion version. Heavynodes must ALL be at Elasticsearch version $EXPECTED_ES_VERSION before proceeding with the next upgrade step to avoid potential data loss!"
|
||||
echo -e "\n Heavynodes will upgrade themselves to Elasticsearch $EXPECTED_ES_VERSION on their own, but this process can take a long time depending on network link between Manager and Heavynodes."
|
||||
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
check_searchnodes_es_version() {
|
||||
local retries=20
|
||||
local retry_count=0
|
||||
local delay=180
|
||||
|
||||
while [[ $retry_count -lt $retries ]]; do
|
||||
# keep stderr with variable for logging
|
||||
cluster_versions=$(so-elasticsearch-query _nodes/_all/version --retry 5 --retry-delay 10 --fail 2>&1)
|
||||
local exit_status=$?
|
||||
|
||||
if [[ $exit_status -ne 0 ]]; then
|
||||
echo "Failed to retrieve Elasticsearch versions from searchnodes... Retrying in $delay seconds. Attempt $((retry_count + 1)) of $retries."
|
||||
((retry_count++))
|
||||
sleep $delay
|
||||
|
||||
continue
|
||||
else
|
||||
if echo "$cluster_versions" | jq --arg expected "$EXPECTED_ES_VERSION" --exit-status '.nodes | to_entries | all(.[].value.version; . == $expected)' > /dev/null; then
|
||||
echo "All Searchnodes are at the expected Elasticsearch version $EXPECTED_ES_VERSION."
|
||||
|
||||
return 0
|
||||
else
|
||||
echo "One or more Searchnodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION. Rechecking in $delay seconds. Attempt $((retry_count + 1)) of $retries."
|
||||
((retry_count++))
|
||||
sleep $delay
|
||||
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
|
||||
echo "One or more Searchnodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION."
|
||||
echo "Current versions:"
|
||||
echo "$cluster_versions" | jq '.nodes | to_entries | map({(.value.name): .value.version}) | sort | add'
|
||||
echo "$cluster_versions" >> "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
|
||||
echo -e "\nStopping automatic upgrade to latest version. Searchnodes must ALL be at Elasticsearch version $EXPECTED_ES_VERSION before proceeding with the next upgrade step to avoid potential data loss!"
|
||||
echo -e "\nSearchnodes will upgrade themselves to Elasticsearch $EXPECTED_ES_VERSION on their own, but this process can take a while depending on cluster size / network link between Manager and Searchnodes."
|
||||
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
|
||||
|
||||
echo "$cluster_versions" > "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
|
||||
|
||||
return 1
|
||||
|
||||
}
|
||||
|
||||
# Need to add a check for heavynodes and ensure all heavynodes get their own "cluster" upgraded before moving on to final upgrade.
|
||||
check_searchnodes_es_version || exit 1
|
||||
check_heavynodes_es_version || exit 1
|
||||
|
||||
# Remove required version state file after successful verification
|
||||
rm -f "$2"
|
||||
|
||||
exit 0
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Keeping this block in case we need to do a hotfix that requires salt update
|
||||
apply_hotfix() {
|
||||
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
|
||||
@@ -1596,7 +1931,7 @@ apply_hotfix() {
|
||||
mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old
|
||||
mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old
|
||||
systemctl_func "start" "salt-minion"
|
||||
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
|
||||
(wait_for_salt_minion "$MINIONID" "120" "4" "$SOUP_LOG" || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
|
||||
fi
|
||||
else
|
||||
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
|
||||
@@ -1666,6 +2001,8 @@ main() {
|
||||
echo "Verifying we have the latest soup script."
|
||||
verify_latest_update_script
|
||||
|
||||
verify_es_version_compatibility
|
||||
|
||||
echo "Let's see if we need to update Security Onion."
|
||||
upgrade_check
|
||||
upgrade_space
|
||||
@@ -1793,7 +2130,7 @@ main() {
|
||||
echo ""
|
||||
echo "Running a highstate. This could take several minutes."
|
||||
set +e
|
||||
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
|
||||
(wait_for_salt_minion "$MINIONID" "120" "4" "$SOUP_LOG" || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
|
||||
highstate
|
||||
set -e
|
||||
|
||||
@@ -1806,7 +2143,7 @@ main() {
|
||||
check_saltmaster_status
|
||||
|
||||
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
|
||||
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
|
||||
(wait_for_salt_minion "$MINIONID" "120" "4" "$SOUP_LOG" || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
|
||||
|
||||
# Stop long-running scripts to allow potentially updated scripts to load on the next execution.
|
||||
killall salt-relay.sh
|
||||
@@ -1831,7 +2168,7 @@ main() {
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
echo ""
|
||||
echo "Cleaning repos on remote Security Onion nodes."
|
||||
salt -C 'not *_eval and not *_manager and not *_managersearch and not *_standalone and G@os:CentOS' cmd.run "yum clean all"
|
||||
salt -C 'not *_eval and not *_manager* and not *_standalone and G@os:OEL' cmd.run "dnf clean all"
|
||||
echo ""
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -6,9 +6,6 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
include:
|
||||
- ssl
|
||||
|
||||
# Drop the correct nginx config based on role
|
||||
nginxconfdir:
|
||||
file.directory:
|
||||
|
||||
@@ -8,81 +8,14 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'nginx/map.jinja' import NGINXMERGED %}
|
||||
{% set ca_server = GLOBALS.minion_id %}
|
||||
|
||||
include:
|
||||
- nginx.ssl
|
||||
- nginx.config
|
||||
- nginx.sostatus
|
||||
|
||||
|
||||
{% if grains.role not in ['so-fleet'] %}
|
||||
|
||||
{# if the user has selected to replace the crt and key in the ui #}
|
||||
{% if NGINXMERGED.ssl.replace_cert %}
|
||||
|
||||
managerssl_key:
|
||||
file.managed:
|
||||
- name: /etc/pki/managerssl.key
|
||||
- source: salt://nginx/ssl/ssl.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
- watch_in:
|
||||
- docker_container: so-nginx
|
||||
|
||||
managerssl_crt:
|
||||
file.managed:
|
||||
- name: /etc/pki/managerssl.crt
|
||||
- source: salt://nginx/ssl/ssl.crt
|
||||
- mode: 644
|
||||
- watch_in:
|
||||
- docker_container: so-nginx
|
||||
|
||||
{% else %}
|
||||
|
||||
managerssl_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/managerssl.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/managerssl.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/managerssl.crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
- watch_in:
|
||||
- docker_container: so-nginx
|
||||
|
||||
# Create a cert for the reverse proxy
|
||||
managerssl_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/managerssl.crt
|
||||
- ca_server: {{ ca_server }}
|
||||
- signing_policy: managerssl
|
||||
- private_key: /etc/pki/managerssl.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- subjectAltName: "DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}, DNS:{{ GLOBALS.url_base }}"
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
- watch_in:
|
||||
- docker_container: so-nginx
|
||||
|
||||
{% endif %}
|
||||
|
||||
msslkeyperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/managerssl.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
{% if GLOBALS.role != 'so-fleet' %}
|
||||
{% set container_config = 'so-nginx' %}
|
||||
make-rule-dir-nginx:
|
||||
file.directory:
|
||||
- name: /nsm/rules
|
||||
@@ -92,15 +25,11 @@ make-rule-dir-nginx:
|
||||
- user
|
||||
- group
|
||||
- show_changes: False
|
||||
|
||||
{% endif %}
|
||||
|
||||
{# if this is an so-fleet node then we want to use the port bindings, custom bind mounts defined for fleet #}
|
||||
{% if GLOBALS.role == 'so-fleet' %}
|
||||
{% set container_config = 'so-nginx-fleet-node' %}
|
||||
{% else %}
|
||||
{% set container_config = 'so-nginx' %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{# if this is an so-fleet node then we want to use the port bindings, custom bind mounts defined for fleet #}
|
||||
{% set container_config = 'so-nginx-fleet-node' %}
|
||||
{% endif %}
|
||||
|
||||
so-nginx:
|
||||
docker_container.running:
|
||||
@@ -154,18 +83,27 @@ so-nginx:
|
||||
- watch:
|
||||
- file: nginxconf
|
||||
- file: nginxconfdir
|
||||
- require:
|
||||
- file: nginxconf
|
||||
{% if GLOBALS.is_manager %}
|
||||
{% if NGINXMERGED.ssl.replace_cert %}
|
||||
{% if GLOBALS.is_manager %}
|
||||
{% if NGINXMERGED.ssl.replace_cert %}
|
||||
- file: managerssl_key
|
||||
- file: managerssl_crt
|
||||
{% else %}
|
||||
{% else %}
|
||||
- x509: managerssl_key
|
||||
- x509: managerssl_crt
|
||||
{% endif%}
|
||||
{% endif%}
|
||||
{% endif %}
|
||||
- require:
|
||||
- file: nginxconf
|
||||
{% if GLOBALS.is_manager %}
|
||||
{% if NGINXMERGED.ssl.replace_cert %}
|
||||
- file: managerssl_key
|
||||
- file: managerssl_crt
|
||||
{% else %}
|
||||
- x509: managerssl_key
|
||||
- x509: managerssl_crt
|
||||
{% endif%}
|
||||
- file: navigatorconfig
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
delete_so-nginx_so-status.disabled:
|
||||
file.uncomment:
|
||||
|
||||
87
salt/nginx/ssl.sls
Normal file
87
salt/nginx/ssl.sls
Normal file
@@ -0,0 +1,87 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'nginx/map.jinja' import NGINXMERGED %}
|
||||
{% from 'ca/map.jinja' import CA %}
|
||||
|
||||
{% if GLOBALS.role != 'so-fleet' %}
|
||||
{# if the user has selected to replace the crt and key in the ui #}
|
||||
{% if NGINXMERGED.ssl.replace_cert %}
|
||||
|
||||
managerssl_key:
|
||||
file.managed:
|
||||
- name: /etc/pki/managerssl.key
|
||||
- source: salt://nginx/ssl/ssl.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
- watch_in:
|
||||
- docker_container: so-nginx
|
||||
|
||||
managerssl_crt:
|
||||
file.managed:
|
||||
- name: /etc/pki/managerssl.crt
|
||||
- source: salt://nginx/ssl/ssl.crt
|
||||
- mode: 644
|
||||
- watch_in:
|
||||
- docker_container: so-nginx
|
||||
|
||||
{% else %}
|
||||
|
||||
managerssl_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/managerssl.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/managerssl.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/managerssl.crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
- watch_in:
|
||||
- docker_container: so-nginx
|
||||
|
||||
# Create a cert for the reverse proxy
|
||||
managerssl_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/managerssl.crt
|
||||
- ca_server: {{ CA.server }}
|
||||
- signing_policy: managerssl
|
||||
- private_key: /etc/pki/managerssl.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- subjectAltName: "DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}, DNS:{{ GLOBALS.url_base }}"
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
- watch_in:
|
||||
- docker_container: so-nginx
|
||||
|
||||
{% endif %}
|
||||
|
||||
msslkeyperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/managerssl.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
22
salt/pcap/ca.sls
Normal file
22
salt/pcap/ca.sls
Normal file
@@ -0,0 +1,22 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states or sls in allowed_states%}
|
||||
|
||||
stenoca:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/steno/certs
|
||||
- user: 941
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -57,12 +57,6 @@ stenoconf:
|
||||
PCAPMERGED: {{ PCAPMERGED }}
|
||||
STENO_BPF_COMPILED: "{{ STENO_BPF_COMPILED }}"
|
||||
|
||||
stenoca:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/steno/certs
|
||||
- user: 941
|
||||
- group: 939
|
||||
|
||||
pcaptmpdir:
|
||||
file.directory:
|
||||
- name: /nsm/pcaptmp
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
|
||||
include:
|
||||
- pcap.ca
|
||||
- pcap.config
|
||||
- pcap.sostatus
|
||||
|
||||
|
||||
@@ -7,9 +7,6 @@
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'redis/map.jinja' import REDISMERGED %}
|
||||
|
||||
include:
|
||||
- ssl
|
||||
|
||||
# Redis Setup
|
||||
redisconfdir:
|
||||
file.directory:
|
||||
|
||||
@@ -9,6 +9,8 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
include:
|
||||
- ca
|
||||
- redis.ssl
|
||||
- redis.config
|
||||
- redis.sostatus
|
||||
|
||||
@@ -31,11 +33,7 @@ so-redis:
|
||||
- /nsm/redis/data:/data:rw
|
||||
- /etc/pki/redis.crt:/certs/redis.crt:ro
|
||||
- /etc/pki/redis.key:/certs/redis.key:ro
|
||||
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||
- /etc/pki/ca.crt:/certs/ca.crt:ro
|
||||
{% else %}
|
||||
- /etc/pki/tls/certs/intca.crt:/certs/ca.crt:ro
|
||||
{% endif %}
|
||||
{% if DOCKER.containers['so-redis'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-redis'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
@@ -55,16 +53,14 @@ so-redis:
|
||||
{% endif %}
|
||||
- entrypoint: "redis-server /usr/local/etc/redis/redis.conf"
|
||||
- watch:
|
||||
- file: /opt/so/conf/redis/etc
|
||||
- require:
|
||||
- file: redisconf
|
||||
- file: trusttheca
|
||||
- x509: redis_crt
|
||||
- x509: redis_key
|
||||
- file: /opt/so/conf/redis/etc
|
||||
- require:
|
||||
- file: trusttheca
|
||||
- x509: redis_crt
|
||||
- x509: redis_key
|
||||
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||
- x509: pki_public_ca_crt
|
||||
{% else %}
|
||||
- x509: trusttheca
|
||||
{% endif %}
|
||||
|
||||
delete_so-redis_so-status.disabled:
|
||||
file.uncomment:
|
||||
|
||||
54
salt/redis/ssl.sls
Normal file
54
salt/redis/ssl.sls
Normal file
@@ -0,0 +1,54 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'ca/map.jinja' import CA %}
|
||||
|
||||
redis_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/redis.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/redis.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/redis.crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
redis_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/redis.crt
|
||||
- ca_server: {{ CA.server }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- signing_policy: registry
|
||||
- private_key: /etc/pki/redis.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
rediskeyperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/redis.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user