mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Compare commits
160 Commits
2.4.4-2023
...
2.4.5-2023
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a13b3f305a | ||
|
|
38089c6662 | ||
|
|
2d863f09eb | ||
|
|
37b98ba188 | ||
|
|
65d1e57ccd | ||
|
|
9ae32e2bd6 | ||
|
|
6e8f31e083 | ||
|
|
3c5cd941c7 | ||
|
|
2ea2a4d0a7 | ||
|
|
90102b1148 | ||
|
|
ec81cbd70d | ||
|
|
59c0109c91 | ||
|
|
9af2a731ca | ||
|
|
9b656ebbc0 | ||
|
|
9d3744aa25 | ||
|
|
9fddd56c96 | ||
|
|
89c4f58296 | ||
|
|
0ba1e7521a | ||
|
|
36747cf940 | ||
|
|
118088c35f | ||
|
|
63373710b4 | ||
|
|
209da766ba | ||
|
|
433cde0f9e | ||
|
|
9fe9256a0f | ||
|
|
014aeffb2a | ||
|
|
3b86b60207 | ||
|
|
0f52530d07 | ||
|
|
726ec72350 | ||
|
|
560ec9106d | ||
|
|
a51acfc314 | ||
|
|
78950ebfbb | ||
|
|
d3ae2b03f0 | ||
|
|
dd1fa51eb5 | ||
|
|
682289ef23 | ||
|
|
593cdbd060 | ||
|
|
4ed0ba5040 | ||
|
|
2472d6a727 | ||
|
|
18e31a4490 | ||
|
|
2caca92082 | ||
|
|
abf74e0ae4 | ||
|
|
dc7ce5ba8f | ||
|
|
6b5343f582 | ||
|
|
ca6276b922 | ||
|
|
3e4136e641 | ||
|
|
15b8e1a753 | ||
|
|
b7197bbd16 | ||
|
|
8966617508 | ||
|
|
9319c3f2e1 | ||
|
|
d4fbf7d6a6 | ||
|
|
e78fcbc6cb | ||
|
|
27b70cbf68 | ||
|
|
ffb54135d1 | ||
|
|
d40a8927c3 | ||
|
|
9172e10dba | ||
|
|
1907ea805c | ||
|
|
80598d7f8d | ||
|
|
13c3e7f5ff | ||
|
|
d4389d5057 | ||
|
|
cf2233bbb6 | ||
|
|
3847863b3d | ||
|
|
3368789b43 | ||
|
|
1bc7bbc76e | ||
|
|
e108bb9bcd | ||
|
|
5414b0756c | ||
|
|
11c827927c | ||
|
|
3054b8dcb9 | ||
|
|
399758cd5f | ||
|
|
1c8a8c460c | ||
|
|
ab28cee7cf | ||
|
|
5a3c1f0373 | ||
|
|
435da77388 | ||
|
|
da2910e36f | ||
|
|
eb512d9aa2 | ||
|
|
03f5e44be7 | ||
|
|
f153c1125d | ||
|
|
99b61b5e1d | ||
|
|
8036df4b20 | ||
|
|
aab55c8cf6 | ||
|
|
f3c5d26a4e | ||
|
|
64776936cc | ||
|
|
c17b324108 | ||
|
|
72e1cbbfb6 | ||
|
|
f102351052 | ||
|
|
ac28f90af3 | ||
|
|
f6c6204555 | ||
|
|
9873121000 | ||
|
|
5630b353c4 | ||
|
|
04ed5835ae | ||
|
|
407cb2a537 | ||
|
|
b520c1abb7 | ||
|
|
25b11c35fb | ||
|
|
ef0301d364 | ||
|
|
e694019027 | ||
|
|
22ebb2faf6 | ||
|
|
0d5ed2e835 | ||
|
|
8ab1769d70 | ||
|
|
6692fffb9b | ||
|
|
23414599ee | ||
|
|
8b3a38f573 | ||
|
|
9ec4322bf4 | ||
|
|
7037fc52f8 | ||
|
|
0e047cffad | ||
|
|
44b086a028 | ||
|
|
4e2eb86b36 | ||
|
|
1cbf60825d | ||
|
|
2d13bf1a61 | ||
|
|
968fee3488 | ||
|
|
da51fd59a0 | ||
|
|
3fa0a98830 | ||
|
|
e7bef745eb | ||
|
|
82b335ed04 | ||
|
|
f35f42c83d | ||
|
|
4adaddf13f | ||
|
|
b6579d7d45 | ||
|
|
87a5d20ac9 | ||
|
|
2875a7a2e5 | ||
|
|
f27ebc47c1 | ||
|
|
63b4bdcebe | ||
|
|
ba3660d0da | ||
|
|
83265d9d6c | ||
|
|
527a6ba454 | ||
|
|
f84b0a3219 | ||
|
|
ae6997a6b7 | ||
|
|
9d59e4250f | ||
|
|
48d9c14563 | ||
|
|
29b64eadd4 | ||
|
|
5dd5f9fc1c | ||
|
|
44c926ba8d | ||
|
|
6a55a8e5c0 | ||
|
|
64bad0a9cf | ||
|
|
b6dd347eb8 | ||
|
|
a89508f1ae | ||
|
|
ed7b674fbb | ||
|
|
0c2a4cbaba | ||
|
|
57562ad5e3 | ||
|
|
95581f505a | ||
|
|
599de60dc8 | ||
|
|
77101fec12 | ||
|
|
069d32be1a | ||
|
|
e78e6b74ed | ||
|
|
16217912db | ||
|
|
635ddc9b21 | ||
|
|
18d8f0d448 | ||
|
|
1c42d70d30 | ||
|
|
282f13a774 | ||
|
|
f867be9e04 | ||
|
|
4939447764 | ||
|
|
5a59975cb8 | ||
|
|
20f3cedc01 | ||
|
|
e563d71856 | ||
|
|
1ca78fd297 | ||
|
|
e76ee718e0 | ||
|
|
5c90a5f27e | ||
|
|
ecbb353d68 | ||
|
|
aa56085758 | ||
|
|
4c8373452d | ||
|
|
3a22ef8e86 | ||
|
|
54080c42fe | ||
|
|
12486599e0 | ||
|
|
3c16218c5a |
@@ -1,18 +1,18 @@
|
||||
### 2.4.4-20230728 ISO image built on 2023/07/28
|
||||
### 2.4.5-20230807 ISO image released on 2023/08/07
|
||||
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.4.4-20230728 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.4-20230728.iso
|
||||
|
||||
MD5: F63E76245F3E745B5BDE9E6E647A7CB6
|
||||
SHA1: 6CE4E4A3399CD282D4F8592FB19D510388AB3EEA
|
||||
SHA256: BF8FEB91B1D94B67C3D4A79D209B068F4A46FEC7C15EEF65B0FCE9851D7E6C9F
|
||||
2.4.5-20230807 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.5-20230807.iso
|
||||
|
||||
MD5: F83FD635025A3A65B380EAFCEB61A92E
|
||||
SHA1: 5864D4CD520617E3328A3D956CAFCC378A8D2D08
|
||||
SHA256: D333BAE0DD198DFD80DF59375456D228A4E18A24EDCDB15852CD4CA3F92B69A7
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.4-20230728.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.5-20230807.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
||||
@@ -26,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.4-20230728.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.5-20230807.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.4-20230728.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.5-20230807.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.4.4-20230728.iso.sig securityonion-2.4.4-20230728.iso
|
||||
gpg --verify securityonion-2.4.5-20230807.iso.sig securityonion-2.4.5-20230807.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Tue 11 Jul 2023 06:23:37 PM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Sat 05 Aug 2023 10:12:46 AM EDT using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## Security Onion 2.4 Release Candidate 1 (RC1)
|
||||
## Security Onion 2.4 Release Candidate 2 (RC2)
|
||||
|
||||
Security Onion 2.4 Release Candidate 1 (RC1) is here!
|
||||
Security Onion 2.4 Release Candidate 2 (RC2) is here!
|
||||
|
||||
## Screenshots
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ commonpkgs:
|
||||
- netcat-openbsd
|
||||
- sqlite3
|
||||
- libssl-dev
|
||||
- procps
|
||||
- python3-dateutil
|
||||
- python3-docker
|
||||
- python3-packaging
|
||||
@@ -70,6 +71,7 @@ commonpkgs:
|
||||
- net-tools
|
||||
- nmap-ncat
|
||||
- openssl
|
||||
- procps-ng
|
||||
- python3-dnf-plugin-versionlock
|
||||
- python3-docker
|
||||
- python3-m2crypto
|
||||
|
||||
@@ -5,7 +5,16 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
ELASTIC_AGENT_TARBALL_VERSION="8.7.1"
|
||||
# Elastic agent is not managed by salt. Because of this we must store this base information in a
|
||||
# script that accompanies the soup system. Since so-common is one of those special soup files,
|
||||
# and since this same logic is required during installation, it's included in this file.
|
||||
ELASTIC_AGENT_TARBALL_VERSION="8.8.2"
|
||||
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
|
||||
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
|
||||
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
|
||||
ELASTIC_AGENT_MD5="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
|
||||
ELASTIC_AGENT_EXPANSION_DIR=/nsm/elastic-fleet/artifacts/beats/elastic-agent
|
||||
|
||||
DEFAULT_SALT_DIR=/opt/so/saltstack/default
|
||||
DOC_BASE_URL="https://docs.securityonion.net/en/2.4"
|
||||
|
||||
@@ -161,6 +170,34 @@ disable_fastestmirror() {
|
||||
sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf
|
||||
}
|
||||
|
||||
download_and_verify() {
|
||||
source_url=$1
|
||||
source_md5_url=$2
|
||||
dest_file=$3
|
||||
md5_file=$4
|
||||
expand_dir=$5
|
||||
|
||||
if [[ -n "$expand_dir" ]]; then
|
||||
mkdir -p "$expand_dir"
|
||||
fi
|
||||
|
||||
if ! verify_md5_checksum "$dest_file" "$md5_file"; then
|
||||
retry 15 10 "curl --fail --retry 5 --retry-delay 15 -L '$source_url' --output '$dest_file'" "" ""
|
||||
retry 15 10 "curl --fail --retry 5 --retry-delay 15 -L '$source_md5_url' --output '$md5_file'" "" ""
|
||||
|
||||
if verify_md5_checksum "$dest_file" "$md5_file"; then
|
||||
echo "Source file and checksum are good."
|
||||
else
|
||||
echo "Unable to download and verify the source file and checksum."
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "$expand_dir" ]]; then
|
||||
tar -xf "$dest_file" -C "$expand_dir"
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_license() {
|
||||
|
||||
read -r -d '' message <<- EOM
|
||||
@@ -211,7 +248,7 @@ gpg_rpm_import() {
|
||||
echo "Imported $RPMKEY"
|
||||
done
|
||||
elif [[ $is_rpm ]]; then
|
||||
info "Importing the security onion GPG key"
|
||||
echo "Importing the security onion GPG key"
|
||||
rpm --import ../salt/repo/client/files/oracle/keys/securityonion.pub
|
||||
fi
|
||||
}
|
||||
@@ -225,12 +262,15 @@ init_monitor() {
|
||||
|
||||
if [[ $MONITORNIC == "bond0" ]]; then
|
||||
BIFACES=$(lookup_bond_interfaces)
|
||||
for i in rx tx sg tso ufo gso gro lro rx-vlan-offload tx-vlan-offload generic-receive-offload generic-segmentation-offload tcp-segmentation-offload; do
|
||||
ethtool -K "$MONITORNIC" "$i" off;
|
||||
done
|
||||
else
|
||||
BIFACES=$MONITORNIC
|
||||
fi
|
||||
|
||||
for DEVICE_IFACE in $BIFACES; do
|
||||
for i in rx tx sg tso ufo gso gro lro; do
|
||||
for i in rx tx sg tso ufo gso gro lro rx-vlan-offload tx-vlan-offload generic-receive-offload generic-segmentation-offload tcp-segmentation-offload; do
|
||||
ethtool -K "$DEVICE_IFACE" "$i" off;
|
||||
done
|
||||
ip link set dev "$DEVICE_IFACE" arp off multicast off allmulticast off promisc on
|
||||
@@ -467,6 +507,11 @@ has_uppercase() {
|
||||
|| return 1
|
||||
}
|
||||
|
||||
update_elastic_agent() {
|
||||
echo "Checking if Elastic Agent update is necessary..."
|
||||
download_and_verify "$ELASTIC_AGENT_URL" "$ELASTIC_AGENT_MD5_URL" "$ELASTIC_AGENT_FILE" "$ELASTIC_AGENT_MD5" "$ELASTIC_AGENT_EXPANSION_DIR"
|
||||
}
|
||||
|
||||
valid_cidr() {
|
||||
# Verify there is a backslash in the string
|
||||
echo "$1" | grep -qP "^[^/]+/[^/]+$" || return 1
|
||||
@@ -620,6 +665,23 @@ valid_username() {
|
||||
echo "$user" | grep -qP '^[a-z_]([a-z0-9_-]{0,31}|[a-z0-9_-]{0,30}\$)$' && return 0 || return 1
|
||||
}
|
||||
|
||||
verify_md5_checksum() {
|
||||
data_file=$1
|
||||
md5_file=${2:-${data_file}.md5}
|
||||
|
||||
if [[ ! -f "$dest_file" || ! -f "$md5_file" ]]; then
|
||||
return 2
|
||||
fi
|
||||
|
||||
SOURCEHASH=$(md5sum "$data_file" | awk '{ print $1 }')
|
||||
HASH=$(cat "$md5_file")
|
||||
|
||||
if [[ "$HASH" == "$SOURCEHASH" ]]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
wait_for_web_response() {
|
||||
url=$1
|
||||
expected=$2
|
||||
|
||||
@@ -103,7 +103,7 @@ def output(options, console, code, data):
|
||||
def check_container_status(options, console):
|
||||
code = 0
|
||||
cli = "docker"
|
||||
proc = subprocess.run([cli, 'ps', '--format', '{{json .}}'], stdout=subprocess.PIPE, encoding="utf-8")
|
||||
proc = subprocess.run([cli, 'ps', '--format', 'json'], stdout=subprocess.PIPE, encoding="utf-8")
|
||||
if proc.returncode != 0:
|
||||
fail("Container system error; unable to obtain container process statuses")
|
||||
|
||||
|
||||
@@ -27,6 +27,8 @@ Imports one or more evtx files into Security Onion. The evtx files will be analy
|
||||
Options:
|
||||
--json Outputs summary in JSON format. Implies --quiet.
|
||||
--quiet Silences progress information to stdout.
|
||||
--shift Adds a time shift. Accepts a single argument that is intended to be the date of the last record, and shifts the dates of the previous records accordingly.
|
||||
Ex. sudo so-import-evtx --shift "2023-08-01 01:01:01" example.evtx
|
||||
EOF
|
||||
}
|
||||
|
||||
@@ -44,6 +46,10 @@ while [[ $# -gt 0 ]]; do
|
||||
--quiet)
|
||||
quiet=1
|
||||
;;
|
||||
--shift)
|
||||
SHIFTDATE=$1
|
||||
shift
|
||||
;;
|
||||
-*)
|
||||
echo "Encountered unexpected parameter: $param"
|
||||
usage
|
||||
@@ -68,8 +74,10 @@ function status {
|
||||
function evtx2es() {
|
||||
EVTX=$1
|
||||
HASH=$2
|
||||
SHIFTDATE=$3
|
||||
|
||||
docker run --rm \
|
||||
-e "SHIFTTS=$SHIFTDATE" \
|
||||
-v "$EVTX:/tmp/data.evtx" \
|
||||
-v "/nsm/import/$HASH/evtx/:/tmp/evtx/" \
|
||||
-v "/nsm/import/evtx-end_newest:/tmp/newest" \
|
||||
@@ -113,7 +121,9 @@ echo $END_NEWEST > /nsm/import/evtx-end_newest
|
||||
for EVTX in $INPUT_FILES; do
|
||||
EVTX=$(/usr/bin/realpath "$EVTX")
|
||||
status "Processing Import: ${EVTX}"
|
||||
|
||||
if ! [ -z "$SHIFTDATE" ]; then
|
||||
status "- timeshifting logs to end date of $SHIFTDATE"
|
||||
fi
|
||||
# generate a unique hash to assist with dedupe checks
|
||||
HASH=$(md5sum "${EVTX}" | awk '{ print $1 }')
|
||||
HASH_DIR=/nsm/import/${HASH}
|
||||
@@ -136,7 +146,7 @@ for EVTX in $INPUT_FILES; do
|
||||
|
||||
# import evtx and write them to import ingest pipeline
|
||||
status "- importing logs to Elasticsearch..."
|
||||
evtx2es "${EVTX}" $HASH
|
||||
evtx2es "${EVTX}" $HASH "$SHIFTDATE"
|
||||
if [[ $? -ne 0 ]]; then
|
||||
INVALID_EVTXS_COUNT=$((INVALID_EVTXS_COUNT + 1))
|
||||
status "- WARNING: This evtx file may not have fully imported successfully"
|
||||
@@ -222,4 +232,4 @@ if [[ $json -eq 1 ]]; then
|
||||
}'''
|
||||
fi
|
||||
|
||||
exit $RESULT
|
||||
exit $RESULT
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
@@ -9,25 +9,26 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
appliance_check() {
|
||||
{%- if salt['grains.get']('sosmodel', '') %}
|
||||
APPLIANCE=1
|
||||
{%- if grains['sosmodel'] in ['SO2AMI01', 'SO2GCI01', 'SO2AZI01'] %}
|
||||
exit 0
|
||||
{%- endif %}
|
||||
DUDEYOUGOTADELL=$(dmidecode |grep Dell)
|
||||
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
APPTYPE=dell
|
||||
else
|
||||
APPTYPE=sm
|
||||
fi
|
||||
mkdir -p /opt/so/log/raid
|
||||
|
||||
{%- else %}
|
||||
echo "This is not an appliance"
|
||||
exit 0
|
||||
{%- endif %}
|
||||
}
|
||||
{%- if salt['grains.get']('sosmodel', '') %}
|
||||
{%- set model = salt['grains.get']('sosmodel') %}
|
||||
model={{ model }}
|
||||
# Don't need cloud images to use this
|
||||
if [[ $model =~ ^(SO2AMI01|SO2AZI01|SO2GCI01)$ ]]; then
|
||||
exit 0
|
||||
fi
|
||||
{%- else %}
|
||||
echo "This is not an appliance"
|
||||
exit 0
|
||||
{%- endif %}
|
||||
if [[ $model =~ ^(SOS10K|SOS500|SOS1000|SOS1000F|SOS4000|SOSSN7200|SOSSNNV|SOSMN)$ ]]; then
|
||||
is_bossraid=true
|
||||
fi
|
||||
if [[ $model =~ ^(SOSSNNV|SOSMN)$ ]]; then
|
||||
is_swraid=true
|
||||
fi
|
||||
if [[ $model =~ ^(SOS10K|SOS500|SOS1000|SOS1000F|SOS4000|SOSSN7200)$ ]]; then
|
||||
is_hwraid=true
|
||||
fi
|
||||
|
||||
check_nsm_raid() {
|
||||
PERCCLI=$(/opt/raidtools/perccli/perccli64 /c0/v0 show|grep RAID|grep Optl)
|
||||
@@ -49,61 +50,44 @@ check_nsm_raid() {
|
||||
check_boss_raid() {
|
||||
MVCLI=$(/usr/local/bin/mvcli info -o vd |grep status |grep functional)
|
||||
|
||||
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
if [[ -n $MVCLI ]]; then
|
||||
BOSSRAID=0
|
||||
else
|
||||
BOSSRAID=1
|
||||
fi
|
||||
if [[ -n $MVCLI ]]; then
|
||||
BOSSRAID=0
|
||||
else
|
||||
BOSSRAID=1
|
||||
fi
|
||||
}
|
||||
|
||||
check_software_raid() {
|
||||
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
SWRC=$(grep "_" /proc/mdstat)
|
||||
|
||||
if [[ -n $SWRC ]]; then
|
||||
# RAID is failed in some way
|
||||
SWRAID=1
|
||||
else
|
||||
SWRAID=0
|
||||
fi
|
||||
SWRC=$(grep "_" /proc/mdstat)
|
||||
if [[ -n $SWRC ]]; then
|
||||
# RAID is failed in some way
|
||||
SWRAID=1
|
||||
else
|
||||
SWRAID=0
|
||||
fi
|
||||
}
|
||||
|
||||
# This script checks raid status if you use SO appliances
|
||||
# Set everything to 0
|
||||
SWRAID=0
|
||||
BOSSRAID=0
|
||||
HWRAID=0
|
||||
|
||||
# See if this is an appliance
|
||||
|
||||
appliance_check
|
||||
check_nsm_raid
|
||||
check_boss_raid
|
||||
{%- if salt['grains.get']('sosmodel', '') %}
|
||||
{%- if grains['sosmodel'] in ['SOSMN', 'SOSSNNV'] %}
|
||||
check_software_raid
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
|
||||
if [[ -n $SWRAID ]]; then
|
||||
if [[ $SWRAID == '0' && $BOSSRAID == '0' ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
elif [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
if [[ $BOSSRAID == '0' && $HWRAID == '0' ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
elif [[ "$APPTYPE" == 'sm' ]]; then
|
||||
if [[ -n "$HWRAID" ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
if [[ $is_hwraid ]]; then
|
||||
check_nsm_raid
|
||||
fi
|
||||
if [[ $is_bossraid ]]; then
|
||||
check_boss_raid
|
||||
fi
|
||||
if [[ $is_swraid ]]; then
|
||||
check_software_raid
|
||||
fi
|
||||
|
||||
echo "nsmraid=$RAIDSTATUS" > /opt/so/log/raid/status.log
|
||||
sum=$(($SWRAID + $BOSSRAID + $HWRAID))
|
||||
|
||||
if [[ $sum == "0" ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
|
||||
echo "nsmraid=$RAIDSTATUS" > /opt/so/log/raid/status.log
|
||||
7
salt/desktop/files/session.jinja
Normal file
7
salt/desktop/files/session.jinja
Normal file
@@ -0,0 +1,7 @@
|
||||
# This file is managed by Salt in the desktop.xwindows state
|
||||
# It will not be overwritten if it already exists
|
||||
|
||||
[User]
|
||||
Session=gnome-classic
|
||||
Icon=/home/{{USERNAME}}/.face
|
||||
SystemAccount=false
|
||||
@@ -181,6 +181,7 @@ desktop_packages:
|
||||
- gstreamer1-plugins-good-gtk
|
||||
- gstreamer1-plugins-ugly-free
|
||||
- gtk-update-icon-cache
|
||||
- gtk2
|
||||
- gtk3
|
||||
- gtk4
|
||||
- gtkmm30
|
||||
@@ -295,6 +296,7 @@ desktop_packages:
|
||||
- mesa-vulkan-drivers
|
||||
- microcode_ctl
|
||||
- mobile-broadband-provider-info
|
||||
- mono-devel
|
||||
- mpfr
|
||||
- mpg123-libs
|
||||
- mtdev
|
||||
@@ -347,6 +349,7 @@ desktop_packages:
|
||||
- snappy
|
||||
- sound-theme-freedesktop
|
||||
- soundtouch
|
||||
- securityonion-networkminer
|
||||
- speech-dispatcher
|
||||
- speech-dispatcher-espeak-ng
|
||||
- speex
|
||||
|
||||
4
salt/desktop/scripts/convert-gnome-classic.sh
Normal file
4
salt/desktop/scripts/convert-gnome-classic.sh
Normal file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
echo "Setting default session to gnome-classic"
|
||||
cp /usr/share/accountsservice/user-templates/standard /etc/accountsservice/user-templates/
|
||||
sed -i 's|Session=gnome|Session=gnome-classic|g' /etc/accountsservice/user-templates/standard
|
||||
@@ -14,6 +14,27 @@ graphical_target:
|
||||
- require:
|
||||
- desktop_packages
|
||||
|
||||
convert_gnome_classic:
|
||||
cmd.script:
|
||||
- name: salt://desktop/scripts/convert-gnome-classic.sh
|
||||
|
||||
{% for username in salt['file.find'](path='/home/',mindepth=1,maxdepth=1,type='d') %}
|
||||
{% set username = username.split('/')[2] %}
|
||||
{% if username != 'zeek' %}
|
||||
{% if not salt['file.file_exists']('/var/lib/AccountsService/users/' ~ username) %}
|
||||
|
||||
{{username}}_session:
|
||||
file.managed:
|
||||
- name: /var/lib/AccountsService/users/{{username}}
|
||||
- source: salt://desktop/files/session.jinja
|
||||
- template: jinja
|
||||
- defaults:
|
||||
USERNAME: {{username}}
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{% else %}
|
||||
|
||||
desktop_xwindows_os_fail:
|
||||
|
||||
@@ -28,6 +28,13 @@ elasticagentconfdir:
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
elasticagentlogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/elasticagent
|
||||
- user: 949
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
elasticagent_sbin_jinja:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
|
||||
@@ -33,20 +33,25 @@ so-elastic-agent:
|
||||
{% endif %}
|
||||
- binds:
|
||||
- /opt/so/conf/elastic-agent/elastic-agent.yml:/usr/share/elastic-agent/elastic-agent.yml:ro
|
||||
- /opt/so/log/elasticagent:/usr/share/elastic-agent/logs
|
||||
- /etc/pki/tls/certs/intca.crt:/etc/pki/tls/certs/intca.crt:ro
|
||||
- /nsm:/nsm:ro
|
||||
- /opt/so/log:/opt/so/log:ro
|
||||
{% if DOCKER.containers['so-elastic-agent'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-elastic-agent'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
- environment:
|
||||
- FLEET_CA=/etc/pki/tls/certs/intca.crt
|
||||
- LOGS_PATH=logs
|
||||
{% if DOCKER.containers['so-elastic-agent'].extra_env %}
|
||||
{% for XTRAENV in DOCKER.containers['so-elastic-agent'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- require:
|
||||
- file: create-elastic-agent-config
|
||||
- watch:
|
||||
- file: create-elastic-agent-config
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
|
||||
id: aea1ba80-1065-11ee-a369-97538913b6a9
|
||||
revision: 2
|
||||
revision: 1
|
||||
outputs:
|
||||
default:
|
||||
type: elasticsearch
|
||||
@@ -22,56 +22,369 @@ agent:
|
||||
metrics: false
|
||||
features: {}
|
||||
inputs:
|
||||
- id: logfile-logs-80ffa884-2cfc-459a-964a-34df25714d85
|
||||
name: suricata-logs
|
||||
revision: 1
|
||||
- id: logfile-logs-fefef78c-422f-4cfa-8abf-4cd1b9428f62
|
||||
name: import-evtx-logs
|
||||
revision: 2
|
||||
type: logfile
|
||||
use_output: default
|
||||
meta:
|
||||
package:
|
||||
name: log
|
||||
version:
|
||||
version:
|
||||
data_stream:
|
||||
namespace: so
|
||||
package_policy_id: 80ffa884-2cfc-459a-964a-34df25714d85
|
||||
package_policy_id: fefef78c-422f-4cfa-8abf-4cd1b9428f62
|
||||
streams:
|
||||
- id: logfile-log.log-80ffa884-2cfc-459a-964a-34df25714d85
|
||||
- id: logfile-log.log-fefef78c-422f-4cfa-8abf-4cd1b9428f62
|
||||
data_stream:
|
||||
dataset: import
|
||||
paths:
|
||||
- /nsm/import/*/evtx/*.json
|
||||
processors:
|
||||
- dissect:
|
||||
field: log.file.path
|
||||
tokenizer: '/nsm/import/%{import.id}/evtx/%{import.file}'
|
||||
target_prefix: ''
|
||||
- decode_json_fields:
|
||||
fields:
|
||||
- message
|
||||
target: ''
|
||||
- drop_fields:
|
||||
ignore_missing: true
|
||||
fields:
|
||||
- host
|
||||
- add_fields:
|
||||
fields:
|
||||
dataset: system.security
|
||||
type: logs
|
||||
namespace: default
|
||||
target: data_stream
|
||||
- add_fields:
|
||||
fields:
|
||||
dataset: system.security
|
||||
module: system
|
||||
imported: true
|
||||
target: event
|
||||
- then:
|
||||
- add_fields:
|
||||
fields:
|
||||
dataset: windows.sysmon_operational
|
||||
target: data_stream
|
||||
- add_fields:
|
||||
fields:
|
||||
dataset: windows.sysmon_operational
|
||||
module: windows
|
||||
imported: true
|
||||
target: event
|
||||
if:
|
||||
equals:
|
||||
winlog.channel: Microsoft-Windows-Sysmon/Operational
|
||||
- then:
|
||||
- add_fields:
|
||||
fields:
|
||||
dataset: system.application
|
||||
target: data_stream
|
||||
- add_fields:
|
||||
fields:
|
||||
dataset: system.application
|
||||
target: event
|
||||
if:
|
||||
equals:
|
||||
winlog.channel: Application
|
||||
- then:
|
||||
- add_fields:
|
||||
fields:
|
||||
dataset: system.system
|
||||
target: data_stream
|
||||
- add_fields:
|
||||
fields:
|
||||
dataset: system.system
|
||||
target: event
|
||||
if:
|
||||
equals:
|
||||
winlog.channel: System
|
||||
- then:
|
||||
- add_fields:
|
||||
fields:
|
||||
dataset: windows.powershell_operational
|
||||
target: data_stream
|
||||
- add_fields:
|
||||
fields:
|
||||
dataset: windows.powershell_operational
|
||||
module: windows
|
||||
target: event
|
||||
if:
|
||||
equals:
|
||||
winlog.channel: Microsoft-Windows-PowerShell/Operational
|
||||
tags:
|
||||
- import
|
||||
- id: logfile-redis-fc98c947-7d17-4861-a318-7ad075f6d1b0
|
||||
name: redis-logs
|
||||
revision: 2
|
||||
type: logfile
|
||||
use_output: default
|
||||
meta:
|
||||
package:
|
||||
name: redis
|
||||
version:
|
||||
data_stream:
|
||||
namespace: default
|
||||
package_policy_id: fc98c947-7d17-4861-a318-7ad075f6d1b0
|
||||
streams:
|
||||
- id: logfile-redis.log-fc98c947-7d17-4861-a318-7ad075f6d1b0
|
||||
data_stream:
|
||||
dataset: redis.log
|
||||
type: logs
|
||||
exclude_files:
|
||||
- .gz$
|
||||
paths:
|
||||
- /opt/so/log/redis/redis.log
|
||||
tags:
|
||||
- redis-log
|
||||
exclude_lines:
|
||||
- '^\s+[\-`(''.|_]'
|
||||
- id: logfile-logs-3b56803d-5ade-4c93-b25e-9b37182f66b8
|
||||
name: import-suricata-logs
|
||||
revision: 2
|
||||
type: logfile
|
||||
use_output: default
|
||||
meta:
|
||||
package:
|
||||
name: log
|
||||
version:
|
||||
data_stream:
|
||||
namespace: so
|
||||
package_policy_id: 3b56803d-5ade-4c93-b25e-9b37182f66b8
|
||||
streams:
|
||||
- id: logfile-log.log-3b56803d-5ade-4c93-b25e-9b37182f66b8
|
||||
data_stream:
|
||||
dataset: import
|
||||
pipeline: suricata.common
|
||||
paths:
|
||||
- /nsm/import/*/suricata/eve*.json
|
||||
processors:
|
||||
- add_fields:
|
||||
fields:
|
||||
module: suricata
|
||||
imported: true
|
||||
category: network
|
||||
target: event
|
||||
- dissect:
|
||||
field: log.file.path
|
||||
tokenizer: '/nsm/import/%{import.id}/suricata/%{import.file}'
|
||||
target_prefix: ''
|
||||
- id: logfile-logs-c327e1a3-1ebe-449c-a8eb-f6f35032e69d
|
||||
name: soc-server-logs
|
||||
revision: 2
|
||||
type: logfile
|
||||
use_output: default
|
||||
meta:
|
||||
package:
|
||||
name: log
|
||||
version:
|
||||
data_stream:
|
||||
namespace: so
|
||||
package_policy_id: c327e1a3-1ebe-449c-a8eb-f6f35032e69d
|
||||
streams:
|
||||
- id: logfile-log.log-c327e1a3-1ebe-449c-a8eb-f6f35032e69d
|
||||
data_stream:
|
||||
dataset: soc
|
||||
pipeline: common
|
||||
paths:
|
||||
- /opt/so/log/soc/sensoroni-server.log
|
||||
processors:
|
||||
- decode_json_fields:
|
||||
add_error_key: true
|
||||
process_array: true
|
||||
max_depth: 2
|
||||
fields:
|
||||
- message
|
||||
target: soc
|
||||
- add_fields:
|
||||
fields:
|
||||
module: soc
|
||||
dataset_temp: server
|
||||
category: host
|
||||
target: event
|
||||
- rename:
|
||||
ignore_missing: true
|
||||
fields:
|
||||
- from: soc.fields.sourceIp
|
||||
to: source.ip
|
||||
- from: soc.fields.status
|
||||
to: http.response.status_code
|
||||
- from: soc.fields.method
|
||||
to: http.request.method
|
||||
- from: soc.fields.path
|
||||
to: url.path
|
||||
- from: soc.message
|
||||
to: event.action
|
||||
- from: soc.level
|
||||
to: log.level
|
||||
tags:
|
||||
- so-soc
|
||||
- id: logfile-logs-906e0d4c-9ec3-4c6a-bef6-e347ec9fd073
|
||||
name: soc-sensoroni-logs
|
||||
revision: 2
|
||||
type: logfile
|
||||
use_output: default
|
||||
meta:
|
||||
package:
|
||||
name: log
|
||||
version:
|
||||
data_stream:
|
||||
namespace: so
|
||||
package_policy_id: 906e0d4c-9ec3-4c6a-bef6-e347ec9fd073
|
||||
streams:
|
||||
- id: logfile-log.log-906e0d4c-9ec3-4c6a-bef6-e347ec9fd073
|
||||
data_stream:
|
||||
dataset: soc
|
||||
pipeline: common
|
||||
paths:
|
||||
- /opt/so/log/sensoroni/sensoroni.log
|
||||
processors:
|
||||
- decode_json_fields:
|
||||
add_error_key: true
|
||||
process_array: true
|
||||
max_depth: 2
|
||||
fields:
|
||||
- message
|
||||
target: sensoroni
|
||||
- add_fields:
|
||||
fields:
|
||||
module: soc
|
||||
dataset_temp: sensoroni
|
||||
category: host
|
||||
target: event
|
||||
- rename:
|
||||
ignore_missing: true
|
||||
fields:
|
||||
- from: sensoroni.fields.sourceIp
|
||||
to: source.ip
|
||||
- from: sensoroni.fields.status
|
||||
to: http.response.status_code
|
||||
- from: sensoroni.fields.method
|
||||
to: http.request.method
|
||||
- from: sensoroni.fields.path
|
||||
to: url.path
|
||||
- from: sensoroni.message
|
||||
to: event.action
|
||||
- from: sensoroni.level
|
||||
to: log.level
|
||||
- id: logfile-logs-df0d7f2c-221f-433b-b18b-d1cf83250515
|
||||
name: soc-salt-relay-logs
|
||||
revision: 2
|
||||
type: logfile
|
||||
use_output: default
|
||||
meta:
|
||||
package:
|
||||
name: log
|
||||
version:
|
||||
data_stream:
|
||||
namespace: so
|
||||
package_policy_id: df0d7f2c-221f-433b-b18b-d1cf83250515
|
||||
streams:
|
||||
- id: logfile-log.log-df0d7f2c-221f-433b-b18b-d1cf83250515
|
||||
data_stream:
|
||||
dataset: soc
|
||||
pipeline: common
|
||||
paths:
|
||||
- /opt/so/log/soc/salt-relay.log
|
||||
processors:
|
||||
- dissect:
|
||||
field: message
|
||||
tokenizer: '%{soc.ts} | %{event.action}'
|
||||
target_prefix: ''
|
||||
- add_fields:
|
||||
fields:
|
||||
module: soc
|
||||
dataset_temp: salt_relay
|
||||
category: host
|
||||
target: event
|
||||
tags:
|
||||
- so-soc
|
||||
- id: logfile-logs-74bd2366-fe52-493c-bddc-843a017fc4d0
|
||||
name: soc-auth-sync-logs
|
||||
revision: 2
|
||||
type: logfile
|
||||
use_output: default
|
||||
meta:
|
||||
package:
|
||||
name: log
|
||||
version:
|
||||
data_stream:
|
||||
namespace: so
|
||||
package_policy_id: 74bd2366-fe52-493c-bddc-843a017fc4d0
|
||||
streams:
|
||||
- id: logfile-log.log-74bd2366-fe52-493c-bddc-843a017fc4d0
|
||||
data_stream:
|
||||
dataset: soc
|
||||
pipeline: common
|
||||
paths:
|
||||
- /opt/so/log/soc/sync.log
|
||||
processors:
|
||||
- dissect:
|
||||
field: message
|
||||
tokenizer: '%{event.action}'
|
||||
target_prefix: ''
|
||||
- add_fields:
|
||||
fields:
|
||||
module: soc
|
||||
dataset_temp: auth_sync
|
||||
category: host
|
||||
target: event
|
||||
tags:
|
||||
- so-soc
|
||||
- id: logfile-logs-d151d9bf-ff2a-4529-9520-c99244bc0253
|
||||
name: suricata-logs
|
||||
revision: 2
|
||||
type: logfile
|
||||
use_output: default
|
||||
meta:
|
||||
package:
|
||||
name: log
|
||||
version:
|
||||
data_stream:
|
||||
namespace: so
|
||||
package_policy_id: d151d9bf-ff2a-4529-9520-c99244bc0253
|
||||
streams:
|
||||
- id: logfile-log.log-d151d9bf-ff2a-4529-9520-c99244bc0253
|
||||
data_stream:
|
||||
dataset: suricata
|
||||
pipeline: suricata.common
|
||||
paths:
|
||||
- /nsm/suricata/eve*.json
|
||||
processors:
|
||||
- add_fields:
|
||||
target: event
|
||||
fields:
|
||||
category: network
|
||||
module: suricata
|
||||
pipeline: suricata.common
|
||||
- id: logfile-logs-90103ac4-f6bd-4a4a-b596-952c332390fc
|
||||
category: network
|
||||
target: event
|
||||
- id: logfile-logs-31f94d05-ae75-40ee-b9c5-0e0356eff327
|
||||
name: strelka-logs
|
||||
revision: 1
|
||||
revision: 2
|
||||
type: logfile
|
||||
use_output: default
|
||||
meta:
|
||||
package:
|
||||
name: log
|
||||
version:
|
||||
version:
|
||||
data_stream:
|
||||
namespace: so
|
||||
package_policy_id: 90103ac4-f6bd-4a4a-b596-952c332390fc
|
||||
package_policy_id: 31f94d05-ae75-40ee-b9c5-0e0356eff327
|
||||
streams:
|
||||
- id: logfile-log.log-90103ac4-f6bd-4a4a-b596-952c332390fc
|
||||
- id: logfile-log.log-31f94d05-ae75-40ee-b9c5-0e0356eff327
|
||||
data_stream:
|
||||
dataset: strelka
|
||||
pipeline: strelka.file
|
||||
paths:
|
||||
- /nsm/strelka/log/strelka.log
|
||||
processors:
|
||||
- add_fields:
|
||||
target: event
|
||||
fields:
|
||||
category: file
|
||||
module: strelka
|
||||
pipeline: strelka.file
|
||||
category: file
|
||||
target: event
|
||||
- id: logfile-logs-6197fe84-9b58-4d9b-8464-3d517f28808d
|
||||
name: zeek-logs
|
||||
revision: 1
|
||||
|
||||
@@ -2,7 +2,7 @@ elasticfleet:
|
||||
enabled: False
|
||||
config:
|
||||
server:
|
||||
custom_fqdn: ''
|
||||
custom_fqdn: []
|
||||
enable_auto_configuration: True
|
||||
endpoints_enrollment: ''
|
||||
es_token: ''
|
||||
@@ -32,4 +32,5 @@ elasticfleet:
|
||||
- fim
|
||||
- github
|
||||
- google_workspace
|
||||
- log
|
||||
- 1password
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
include:
|
||||
- elasticfleet.config
|
||||
- elasticfleet.sostatus
|
||||
- ssl
|
||||
|
||||
# If enabled, automatically update Fleet Logstash Outputs
|
||||
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %}
|
||||
@@ -61,11 +62,14 @@ so-elastic-fleet:
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
- binds:
|
||||
- /etc/pki:/etc/pki:ro
|
||||
- /etc/pki/elasticfleet-server.crt:/etc/pki/elasticfleet-server.crt:ro
|
||||
- /etc/pki/elasticfleet-server.key:/etc/pki/elasticfleet-server.key:ro
|
||||
- /etc/pki/tls/certs/intca.crt:/etc/pki/tls/certs/intca.crt:ro
|
||||
{% if GLOBALS.os_family == 'Debian' %}
|
||||
- /etc/ssl:/etc/ssl:ro
|
||||
- /etc/ssl/elasticfleet-server.crt:/etc/ssl/elasticfleet-server.crt:ro
|
||||
- /etc/ssl/elasticfleet-server.key:/etc/ssl/elasticfleet-server.key:ro
|
||||
- /etc/ssl/tls/certs/intca.crt:/etc/ssl/tls/certs/intca.crt:ro
|
||||
{% endif %}
|
||||
#- /opt/so/conf/elastic-fleet/state:/usr/share/elastic-agent/state:rw
|
||||
- /opt/so/log/elasticfleet:/usr/share/elastic-agent/logs
|
||||
{% if DOCKER.containers['so-elastic-fleet'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-elastic-fleet'].custom_bind_mounts %}
|
||||
@@ -93,6 +97,9 @@ so-elastic-fleet:
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
- x509: etc_elasticfleet_key
|
||||
- x509: etc_elasticfleet_crt
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.role != "so-fleet" %}
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"log.logs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"log.logs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
|
||||
@@ -5,17 +5,16 @@
|
||||
"package": {
|
||||
"name": "endpoint",
|
||||
"title": "Elastic Defend",
|
||||
"version": ""
|
||||
"version": "8.8.0"
|
||||
},
|
||||
"enabled": true,
|
||||
"policy_id": "endpoints-initial",
|
||||
"vars": {},
|
||||
"inputs": [{
|
||||
"type": "endpoint",
|
||||
"type": "ENDPOINT_INTEGRATION_CONFIG",
|
||||
"enabled": true,
|
||||
"streams": [],
|
||||
"config": {
|
||||
"integration_config": {
|
||||
"_config": {
|
||||
"value": {
|
||||
"type": "endpoint",
|
||||
"endpointConfig": {
|
||||
@@ -25,4 +24,4 @@
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"log.logs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"log.logs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"log.logs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"log.logs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"log.logs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"log.logs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"log.logs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"log.logs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"log.logs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"log.logs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "elasticsearch",
|
||||
"version": ""
|
||||
},
|
||||
"name": "elasticsearch-logs",
|
||||
"namespace": "default",
|
||||
"description": "Elasticsearch Logs",
|
||||
"policy_id": "so-grid-nodes_heavy",
|
||||
"inputs": {
|
||||
"elasticsearch-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"elasticsearch.audit": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/var/log/elasticsearch/*_audit.json"
|
||||
]
|
||||
}
|
||||
},
|
||||
"elasticsearch.deprecation": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/var/log/elasticsearch/*_deprecation.json"
|
||||
]
|
||||
}
|
||||
},
|
||||
"elasticsearch.gc": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/var/log/elasticsearch/gc.log.[0-9]*",
|
||||
"/var/log/elasticsearch/gc.log"
|
||||
]
|
||||
}
|
||||
},
|
||||
"elasticsearch.server": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/elasticsearch/*.log"
|
||||
]
|
||||
}
|
||||
},
|
||||
"elasticsearch.slowlog": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/var/log/elasticsearch/*_index_search_slowlog.json",
|
||||
"/var/log/elasticsearch/*_index_indexing_slowlog.json"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"elasticsearch-elasticsearch/metrics": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"hosts": [
|
||||
"http://localhost:9200"
|
||||
],
|
||||
"scope": "node"
|
||||
},
|
||||
"streams": {
|
||||
"elasticsearch.stack_monitoring.ccr": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.cluster_stats": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.enrich": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.index": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.index_recovery": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"active.only": true
|
||||
}
|
||||
},
|
||||
"elasticsearch.stack_monitoring.index_summary": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.ml_job": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.node": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.node_stats": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.pending_tasks": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.shard": {
|
||||
"enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"version": ""
|
||||
},
|
||||
"name": "kratos-logs",
|
||||
"namespace": "so",
|
||||
"description": "Kratos logs",
|
||||
"policy_id": "so-grid-nodes_heavy",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/kratos/kratos.log"
|
||||
],
|
||||
"data_stream.dataset": "kratos",
|
||||
"tags": ["so-kratos"],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
|
||||
"custom": "pipeline: kratos"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,7 @@
|
||||
"name": "osquery_manager",
|
||||
"version": ""
|
||||
},
|
||||
"name": "osquery-grid-nodes",
|
||||
"name": "osquery-grid-nodes_heavy",
|
||||
"namespace": "default",
|
||||
"policy_id": "so-grid-nodes_heavy",
|
||||
"inputs": {
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "redis",
|
||||
"version": ""
|
||||
},
|
||||
"name": "redis-logs",
|
||||
"namespace": "default",
|
||||
"description": "Redis logs",
|
||||
"policy_id": "so-grid-nodes_heavy",
|
||||
"inputs": {
|
||||
"redis-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"redis.log": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/redis/redis.log"
|
||||
],
|
||||
"tags": [
|
||||
"redis-log"
|
||||
],
|
||||
"preserve_original_event": false
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"redis-redis": {
|
||||
"enabled": false,
|
||||
"streams": {
|
||||
"redis.slowlog": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"hosts": [
|
||||
"127.0.0.1:6379"
|
||||
],
|
||||
"password": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"redis-redis/metrics": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"hosts": [
|
||||
"127.0.0.1:6379"
|
||||
],
|
||||
"idle_timeout": "20s",
|
||||
"maxconn": 10,
|
||||
"network": "tcp",
|
||||
"password": ""
|
||||
},
|
||||
"streams": {
|
||||
"redis.info": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"period": "10s"
|
||||
}
|
||||
},
|
||||
"redis.key": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"key.patterns": "- limit: 20\n pattern: *\n",
|
||||
"period": "10s"
|
||||
}
|
||||
},
|
||||
"redis.keyspace": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"period": "10s"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-auth-sync-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion - Elastic Auth Sync - Logs",
|
||||
"policy_id": "so-grid-nodes_heavy",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/sync.log"
|
||||
],
|
||||
"data_stream.dataset": "soc",
|
||||
"tags": ["so-soc"],
|
||||
"processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync",
|
||||
"custom": "pipeline: common"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-salt-relay-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion - Salt Relay - Logs",
|
||||
"policy_id": "so-grid-nodes_heavy",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/salt-relay.log"
|
||||
],
|
||||
"data_stream.dataset": "soc",
|
||||
"tags": ["so-soc"],
|
||||
"processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay",
|
||||
"custom": "pipeline: common"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-sensoroni-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion - Sensoroni - Logs",
|
||||
"policy_id": "so-grid-nodes_heavy",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/sensoroni/sensoroni.log"
|
||||
],
|
||||
"data_stream.dataset": "soc",
|
||||
"tags": [],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true",
|
||||
"custom": "pipeline: common"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-server-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion Console Logs",
|
||||
"policy_id": "so-grid-nodes_heavy",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/sensoroni-server.log"
|
||||
],
|
||||
"data_stream.dataset": "soc",
|
||||
"tags": ["so-soc"],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
|
||||
"custom": "pipeline: common"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
"name": "system",
|
||||
"version": ""
|
||||
},
|
||||
"name": "system-grid-nodes",
|
||||
"name": "system-grid-nodes_heavy",
|
||||
"namespace": "default",
|
||||
"inputs": {
|
||||
"system-logfile": {
|
||||
|
||||
@@ -12,10 +12,11 @@ elasticfleet:
|
||||
config:
|
||||
server:
|
||||
custom_fqdn:
|
||||
description: Custom FQDN for Agents to connect to.
|
||||
description: Custom FQDN for Agents to connect to. One per line.
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
advanced: True
|
||||
forcedType: "[]string"
|
||||
enable_auto_configuration:
|
||||
description: Enable auto-configuration of Logstash Outputs & Fleet Host URLs.
|
||||
global: True
|
||||
|
||||
@@ -15,10 +15,8 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
printf "\n\nInitial Endpoints Policy - Loading $INTEGRATION\n"
|
||||
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
if [ "$NAME" != "elastic-defend-endpoints" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
||||
fi
|
||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
|
||||
15
salt/elasticfleet/tools/sbin/so-elastic-fleet-package-list
Executable file
15
salt/elasticfleet/tools/sbin/so-elastic-fleet-package-list
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
|
||||
# Let's snag a cookie from Kibana
|
||||
SESSIONCOOKIE=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
|
||||
|
||||
# List configured package policies
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages" -H 'kbn-xsrf: true' | jq
|
||||
|
||||
echo
|
||||
@@ -11,6 +11,12 @@
|
||||
. /usr/sbin/so-common
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
|
||||
LOG="/opt/so/log/elasticfleet/so-elastic-agent-gen-installers.log"
|
||||
|
||||
# Check to see if we are already running
|
||||
NUM_RUNNING=$(pgrep -cf "/bin/bash /sbin/so-elastic-agent-gen-installers")
|
||||
[ "$NUM_RUNNING" -gt 1 ] && echo "$(date) - $NUM_RUNNING gen installers script processes running...exiting." >>$LOG && exit 0
|
||||
|
||||
for i in {1..30}
|
||||
do
|
||||
ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set CUSTOMFQDN = salt['pillar.get']('elasticfleet:config:server:custom_fqdn') %}
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
@@ -41,9 +41,14 @@ else
|
||||
NEW_LIST=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055")
|
||||
fi
|
||||
|
||||
{% if CUSTOMFQDN != "" %}
|
||||
# Add Custom Hostname to list
|
||||
NEW_LIST+=("{{ CUSTOMFQDN }}:5055")
|
||||
# Query for FQDN entries & add them to the list
|
||||
{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %}
|
||||
CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}')
|
||||
readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST")
|
||||
for CUSTOMNAME in "${CUSTOMFQDN[@]}"
|
||||
do
|
||||
NEW_LIST+=("$CUSTOMNAME:5055")
|
||||
done
|
||||
{% endif %}
|
||||
|
||||
# Query for the current Grid Nodes that are running Logstash
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set CUSTOMFQDN = salt['pillar.get']('elasticfleet:config:server:custom_fqdn') %}
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
@@ -41,9 +41,14 @@ else
|
||||
NEW_LIST=("https://{{ GLOBALS.url_base }}:8220" "https://{{ GLOBALS.hostname }}:8220")
|
||||
fi
|
||||
|
||||
{% if CUSTOMFQDN != "" %}
|
||||
# Add Custom Hostname to list
|
||||
NEW_LIST+=("https://{{ CUSTOMFQDN }}:8220")
|
||||
# Query for FQDN entries & add them to the list
|
||||
{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %}
|
||||
CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}')
|
||||
readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST")
|
||||
for CUSTOMNAME in "${CUSTOMFQDN[@]}"
|
||||
do
|
||||
NEW_LIST+=("https://$CUSTOMNAME:8220")
|
||||
done
|
||||
{% endif %}
|
||||
|
||||
# Query for the current Grid Nodes that are running Logstash (which includes Fleet Nodes)
|
||||
@@ -62,7 +67,7 @@ fi
|
||||
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
|
||||
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
||||
|
||||
# Compare the current & new list of URLs - if different, update the Fleet Server URLs
|
||||
# Compare the current & new list of URLs - if different, update the Fleet Server URLs & regenerate the agent installer
|
||||
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
|
||||
printf "\nHashes match - no update needed.\n"
|
||||
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
||||
@@ -71,4 +76,5 @@ else
|
||||
printf "\nHashes don't match - update needed.\n"
|
||||
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
||||
update_fleet_urls
|
||||
/sbin/so-elastic-agent-gen-installers >> /opt/so/log/elasticfleet/so-elastic-agent-gen-installers.log &
|
||||
fi
|
||||
|
||||
@@ -78,6 +78,7 @@
|
||||
{ "set": { "if": "ctx.network?.direction == 'ingress'", "override": true, "field": "network.initiated", "value": "false" } },
|
||||
{ "set": { "if": "ctx.network?.type == 'ipv4'", "override": true, "field": "destination.ipv6", "value": "false" } },
|
||||
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
|
||||
{"community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
],
|
||||
"on_failure": [
|
||||
|
||||
@@ -49,11 +49,10 @@
|
||||
"on_failure" : [ {"set" : {"field" : "error.message","value" : "{{ _ingest.on_failure_message }}"}}]
|
||||
}
|
||||
},
|
||||
{ "set": { "field": "_index", "value": "so-firewall", "override": true } },
|
||||
{ "set": { "if": "ctx.network?.transport_id == '0'", "field": "network.transport", "value": "icmp", "override": true } },
|
||||
{ "community_id": {} },
|
||||
{ "set": { "field": "module", "value": "pfsense", "override": true } },
|
||||
{ "set": { "field": "dataset", "value": "firewall", "override": true } },
|
||||
{ "set": { "field": "event.module", "value": "pfsense", "override": true } },
|
||||
{ "set": { "field": "event.dataset", "value": "firewall", "override": true } },
|
||||
{ "set": { "field": "category", "value": "network", "override": true } },
|
||||
{ "remove": { "field": ["real_message", "ip_sub_msg", "firewall.sub_message"], "ignore_failure": true } }
|
||||
]
|
||||
|
||||
@@ -26,6 +26,7 @@ firewall:
|
||||
standalone: []
|
||||
strelka_frontend: []
|
||||
syslog: []
|
||||
workstation: []
|
||||
customhostgroup0: []
|
||||
customhostgroup1: []
|
||||
customhostgroup2: []
|
||||
@@ -198,9 +199,6 @@ firewall:
|
||||
portgroups:
|
||||
- redis
|
||||
- elasticsearch_node
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
@@ -218,9 +216,6 @@ firewall:
|
||||
strelka_frontend:
|
||||
portgroups:
|
||||
- strelka_frontend
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
analyst:
|
||||
portgroups:
|
||||
- nginx
|
||||
@@ -255,6 +250,12 @@ firewall:
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -370,6 +371,7 @@ firewall:
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- localrules
|
||||
- sensoroni
|
||||
fleet:
|
||||
portgroups:
|
||||
- elasticsearch_rest
|
||||
@@ -383,6 +385,17 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
idh:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
sensor:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
@@ -393,6 +406,7 @@ firewall:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
searchnode:
|
||||
portgroups:
|
||||
- redis
|
||||
@@ -405,6 +419,7 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
heavynode:
|
||||
portgroups:
|
||||
- redis
|
||||
@@ -417,6 +432,7 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
receiver:
|
||||
portgroups:
|
||||
- yum
|
||||
@@ -425,12 +441,10 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
self:
|
||||
- sensoroni
|
||||
analyst:
|
||||
portgroups:
|
||||
- syslog
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
- nginx
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
@@ -448,9 +462,9 @@ firewall:
|
||||
endgame:
|
||||
portgroups:
|
||||
- endgame
|
||||
analyst:
|
||||
workstation:
|
||||
portgroups:
|
||||
- nginx
|
||||
- yum
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -482,6 +496,9 @@ firewall:
|
||||
fleet:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
idh:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
@@ -497,6 +514,15 @@ firewall:
|
||||
receiver:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
workstation:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -535,6 +561,7 @@ firewall:
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- localrules
|
||||
- sensoroni
|
||||
fleet:
|
||||
portgroups:
|
||||
- elasticsearch_rest
|
||||
@@ -548,6 +575,17 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
idh:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
sensor:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
@@ -558,6 +596,7 @@ firewall:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
searchnode:
|
||||
portgroups:
|
||||
- redis
|
||||
@@ -569,6 +608,7 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
heavynode:
|
||||
portgroups:
|
||||
- redis
|
||||
@@ -580,6 +620,7 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
receiver:
|
||||
portgroups:
|
||||
- yum
|
||||
@@ -588,9 +629,10 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
self:
|
||||
- sensoroni
|
||||
analyst:
|
||||
portgroups:
|
||||
- syslog
|
||||
- nginx
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
@@ -608,12 +650,9 @@ firewall:
|
||||
endgame:
|
||||
portgroups:
|
||||
- endgame
|
||||
syslog:
|
||||
workstation:
|
||||
portgroups:
|
||||
- syslog
|
||||
analyst:
|
||||
portgroups:
|
||||
- nginx
|
||||
- yum
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -645,6 +684,9 @@ firewall:
|
||||
fleet:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
idh:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
@@ -660,6 +702,15 @@ firewall:
|
||||
receiver:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
workstation:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -723,6 +774,17 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
idh:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
sensor:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
@@ -760,9 +822,10 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
self:
|
||||
- sensoroni
|
||||
analyst:
|
||||
portgroups:
|
||||
- syslog
|
||||
- nginx
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
@@ -783,12 +846,9 @@ firewall:
|
||||
strelka_frontend:
|
||||
portgroups:
|
||||
- strelka_frontend
|
||||
syslog:
|
||||
workstation:
|
||||
portgroups:
|
||||
- syslog
|
||||
analyst:
|
||||
portgroups:
|
||||
- nginx
|
||||
- yum
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -819,7 +879,10 @@ firewall:
|
||||
- all
|
||||
fleet:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
- salt_manager
|
||||
idh:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
@@ -838,6 +901,15 @@ firewall:
|
||||
receiver:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
workstation:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -866,6 +938,14 @@ firewall:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
managersearch:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
standalone:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
dockernet:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
@@ -876,9 +956,6 @@ firewall:
|
||||
searchnode:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -910,6 +987,12 @@ firewall:
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -934,9 +1017,6 @@ firewall:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
strelka_frontend:
|
||||
portgroups:
|
||||
- strelka_frontend
|
||||
@@ -971,6 +1051,12 @@ firewall:
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -1022,6 +1108,9 @@ firewall:
|
||||
strelka_frontend:
|
||||
portgroups:
|
||||
- strelka_frontend
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -1111,6 +1200,9 @@ firewall:
|
||||
analyst:
|
||||
portgroups:
|
||||
- nginx
|
||||
workstation:
|
||||
portgroups:
|
||||
- yum
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -1181,11 +1273,7 @@ firewall:
|
||||
self:
|
||||
portgroups:
|
||||
- redis
|
||||
- syslog
|
||||
- beats_5644
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
@@ -1226,6 +1314,12 @@ firewall:
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
|
||||
@@ -1,15 +1,29 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls in allowed_states %}
|
||||
{% from 'firewall/ipt.map.jinja' import iptmap %}
|
||||
|
||||
install_iptables:
|
||||
pkg.installed:
|
||||
- name: {{ iptmap.iptpkg }}
|
||||
|
||||
iptables_persist:
|
||||
pkg.installed:
|
||||
- name: {{ iptmap.persistpkg }}
|
||||
|
||||
iptables_service:
|
||||
service.running:
|
||||
- name: {{ iptmap.service }}
|
||||
- enable: True
|
||||
|
||||
create_sysconfig_iptables:
|
||||
file.touch:
|
||||
- name: /etc/sysconfig/iptables
|
||||
- name: {{ iptmap.configfile }}
|
||||
- makedirs: True
|
||||
- unless: 'ls /etc/sysconfig/iptables'
|
||||
- unless: 'ls {{ iptmap.configfile }}'
|
||||
|
||||
iptables_config:
|
||||
file.managed:
|
||||
- name: /etc/sysconfig/iptables
|
||||
- name: {{ iptmap.configfile }}
|
||||
- source: salt://firewall/iptables.jinja
|
||||
- template: jinja
|
||||
|
||||
@@ -24,11 +38,11 @@ disable_firewalld:
|
||||
|
||||
iptables_restore:
|
||||
cmd.run:
|
||||
- name: iptables-restore < /etc/sysconfig/iptables
|
||||
- name: iptables-restore < {{ iptmap.configfile }}
|
||||
- require:
|
||||
- file: iptables_config
|
||||
- onlyif:
|
||||
- iptables-restore --test /etc/sysconfig/iptables
|
||||
- iptables-restore --test {{ iptmap.configfile }}
|
||||
|
||||
{% if grains.os_family == 'RedHat' %}
|
||||
enable_firewalld:
|
||||
|
||||
14
salt/firewall/ipt.map.jinja
Normal file
14
salt/firewall/ipt.map.jinja
Normal file
@@ -0,0 +1,14 @@
|
||||
{% set iptmap = salt['grains.filter_by']({
|
||||
'Debian': {
|
||||
'service': 'netfilter-persistent',
|
||||
'iptpkg': 'iptables',
|
||||
'persistpkg': 'iptables-persistent',
|
||||
'configfile': '/etc/iptables/rules.v4'
|
||||
},
|
||||
'RedHat': {
|
||||
'service': 'iptables',
|
||||
'iptpkg': 'iptables-nft',
|
||||
'persistpkg': 'iptables-nft-services',
|
||||
'configfile': '/etc/sysconfig/iptables'
|
||||
},
|
||||
}) %}
|
||||
@@ -45,6 +45,7 @@ firewall:
|
||||
standalone: *hostgroupsettings
|
||||
strelka_frontend: *hostgroupsettings
|
||||
syslog: *hostgroupsettings
|
||||
workstation: *hostgroupsettings
|
||||
customhostgroup0: &customhostgroupsettings
|
||||
description: List of IP or CIDR blocks to allow to this hostgroup.
|
||||
forcedType: "[]string"
|
||||
@@ -191,6 +192,7 @@ firewall:
|
||||
description: Portgroups to add access to the docker containers for this role.
|
||||
advanced: True
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
helpLink: firewall.html
|
||||
sensor:
|
||||
portgroups: *portgroupsdocker
|
||||
@@ -214,6 +216,8 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
analyst:
|
||||
portgroups: *portgroupsdocker
|
||||
workstation:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup1:
|
||||
@@ -241,6 +245,7 @@ firewall:
|
||||
description: Portgroups to add access to the host.
|
||||
advanced: True
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
helpLink: firewall.html
|
||||
dockernet:
|
||||
portgroups: *portgroupshost
|
||||
@@ -336,7 +341,9 @@ firewall:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
manager:
|
||||
portgroups: *portgroupsdocker
|
||||
portgroups: *portgroupsdocker
|
||||
idh:
|
||||
portgroups: *portgroupsdocker
|
||||
sensor:
|
||||
portgroups: *portgroupsdocker
|
||||
searchnode:
|
||||
@@ -359,6 +366,8 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
analyst:
|
||||
portgroups: *portgroupsdocker
|
||||
workstation:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup1:
|
||||
@@ -387,12 +396,16 @@ firewall:
|
||||
portgroups: *portgroupshost
|
||||
localhost:
|
||||
portgroups: *portgroupshost
|
||||
idh:
|
||||
portgroups: *portgroupshost
|
||||
sensor:
|
||||
portgroups: *portgroupshost
|
||||
searchnode:
|
||||
portgroups: *portgroupshost
|
||||
heavynode:
|
||||
portgroups: *portgroupshost
|
||||
workstation:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup1:
|
||||
@@ -420,6 +433,8 @@ firewall:
|
||||
hostgroups:
|
||||
managersearch:
|
||||
portgroups: *portgroupsdocker
|
||||
idh:
|
||||
portgroups: *portgroupsdocker
|
||||
sensor:
|
||||
portgroups: *portgroupsdocker
|
||||
searchnode:
|
||||
@@ -442,6 +457,8 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
analyst:
|
||||
portgroups: *portgroupsdocker
|
||||
workstation:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup1:
|
||||
@@ -470,12 +487,16 @@ firewall:
|
||||
portgroups: *portgroupshost
|
||||
localhost:
|
||||
portgroups: *portgroupshost
|
||||
idh:
|
||||
portgroups: *portgroupshost
|
||||
sensor:
|
||||
portgroups: *portgroupshost
|
||||
searchnode:
|
||||
portgroups: *portgroupshost
|
||||
heavynode:
|
||||
portgroups: *portgroupshost
|
||||
workstation:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup1:
|
||||
@@ -507,6 +528,8 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
fleet:
|
||||
portgroups: *portgroupsdocker
|
||||
idh:
|
||||
portgroups: *portgroupsdocker
|
||||
sensor:
|
||||
portgroups: *portgroupsdocker
|
||||
searchnode:
|
||||
@@ -531,6 +554,8 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
analyst:
|
||||
portgroups: *portgroupsdocker
|
||||
workstation:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup1:
|
||||
@@ -563,12 +588,16 @@ firewall:
|
||||
portgroups: *portgroupshost
|
||||
standalone:
|
||||
portgroups: *portgroupshost
|
||||
idh:
|
||||
portgroups: *portgroupshost
|
||||
sensor:
|
||||
portgroups: *portgroupshost
|
||||
searchnode:
|
||||
portgroups: *portgroupshost
|
||||
heavynode:
|
||||
portgroups: *portgroupshost
|
||||
workstation:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup1:
|
||||
@@ -793,6 +822,8 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
analyst:
|
||||
portgroups: *portgroupsdocker
|
||||
workstation:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup1:
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
|
||||
include:
|
||||
- idh.config
|
||||
|
||||
@@ -3,17 +3,21 @@
|
||||
|
||||
{%- from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{%- from 'idstools/map.jinja' import IDSTOOLSMERGED %}
|
||||
{%- set proxy = salt['pillar.get']('manager:proxy') %}
|
||||
|
||||
{%- set proxy = salt['pillar.get']('manager:proxy') %}
|
||||
{%- set noproxy = salt['pillar.get']('manager:no_proxy', '') %}
|
||||
|
||||
# Download the rules from the internet
|
||||
{%- if proxy %}
|
||||
export http_proxy={{ proxy }}
|
||||
export https_proxy={{ proxy }}
|
||||
export no_proxy="{{ noproxy }}"
|
||||
{%- endif %}
|
||||
|
||||
mkdir -p /nsm/rules/suricata
|
||||
chown -R socore:socore /nsm/rules/suricata
|
||||
# Download the rules from the internet
|
||||
{%- if GLOBALS.airgap != 'True' %}
|
||||
{%- if proxy %}
|
||||
export http_proxy={{ proxy }}
|
||||
export https_proxy={{ proxy }}
|
||||
export no_proxy= salt['pillar.get']('manager:no_proxy')
|
||||
{%- endif %}
|
||||
{%- if IDSTOOLSMERGED.config.ruleset == 'ETOPEN' %}
|
||||
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force
|
||||
{%- elif IDSTOOLSMERGED.config.ruleset == 'ETPRO' %}
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.7.1","id": "8.7.1","migrationVersion": {"config": "7.13.0"},"references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
|
||||
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.8.2","id": "8.8.2","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
|
||||
|
||||
@@ -63,7 +63,7 @@ update() {
|
||||
|
||||
IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))'
|
||||
for i in "${LINES[@]}"; do
|
||||
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.7.1" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
|
||||
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.8.2" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
|
||||
echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi
|
||||
done
|
||||
|
||||
|
||||
@@ -90,6 +90,26 @@ logrotate:
|
||||
- extension .log
|
||||
- dateext
|
||||
- dateyesterday
|
||||
/opt/so/log/elasticagent/*_x_log:
|
||||
- daily
|
||||
- rotate 14
|
||||
- missingok
|
||||
- copytruncate
|
||||
- compress
|
||||
- create
|
||||
- extension .log
|
||||
- dateext
|
||||
- dateyesterday
|
||||
/opt/so/log/elasticagent/*_x_ndjson:
|
||||
- daily
|
||||
- rotate 14
|
||||
- missingok
|
||||
- copytruncate
|
||||
- compress
|
||||
- create
|
||||
- extension .ndjson
|
||||
- dateext
|
||||
- dateyesterday
|
||||
/opt/so/log/elasticfleet/*_x_log:
|
||||
- daily
|
||||
- rotate 14
|
||||
|
||||
@@ -9,6 +9,11 @@
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'logstash/map.jinja' import LOGSTASH_MERGED %}
|
||||
{% from 'logstash/map.jinja' import REDIS_NODES %}
|
||||
{# we append the manager here so that it is added to extra_hosts so the heavynode can resolve it #}
|
||||
{# we cannont append in the logstash/map.jinja because then it would be added to the 0900_input_redis.conf #}
|
||||
{% if GLOBALS.role == 'so-heavynode' %}
|
||||
{% do REDIS_NODES.append({GLOBALS.manager:GLOBALS.manager_ip}) %}
|
||||
{% endif %}
|
||||
{% set lsheap = LOGSTASH_MERGED.settings.lsheap %}
|
||||
|
||||
include:
|
||||
@@ -17,6 +22,7 @@ include:
|
||||
{% endif %}
|
||||
- logstash.config
|
||||
- logstash.sostatus
|
||||
- ssl
|
||||
|
||||
so-logstash:
|
||||
docker_container.running:
|
||||
@@ -85,6 +91,10 @@ so-logstash:
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-fleet', 'so-receiver'] %}
|
||||
- x509: etc_elasticfleet_logstash_key
|
||||
- x509: etc_elasticfleet_logstash_crt
|
||||
{% endif %}
|
||||
- file: lsetcsync
|
||||
{% for assigned_pipeline in LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %}
|
||||
- file: ls_pipeline_{{assigned_pipeline}}
|
||||
|
||||
@@ -74,9 +74,12 @@ fi
|
||||
so-firewall includehost heavynode "$IP" --apply
|
||||
;;
|
||||
'IDH')
|
||||
so-firewall includehost sensor "$IP" --apply
|
||||
so-firewall includehost idh "$IP" --apply
|
||||
;;
|
||||
'RECEIVER')
|
||||
so-firewall includehost receiver "$IP" --apply
|
||||
;;
|
||||
esac
|
||||
'WORKSTATION')
|
||||
so-firewall includehost workstation "$IP" --apply
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -179,12 +179,12 @@ update_registry() {
|
||||
|
||||
check_airgap() {
|
||||
# See if this is an airgap install
|
||||
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global/soc_global.sls | grep airgap: | awk '{print $2}')
|
||||
if [[ "$AIRGAP" == "True" ]]; then
|
||||
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global/soc_global.sls | grep airgap: | awk '{print $2}' | tr '[:upper:]' '[:lower:]')
|
||||
if [[ "$AIRGAP" == "true" ]]; then
|
||||
is_airgap=0
|
||||
UPDATE_DIR=/tmp/soagupdate/SecurityOnion
|
||||
AGDOCKER=/tmp/soagupdate/docker
|
||||
AGREPO=/tmp/soagupdate/Packages
|
||||
AGREPO=/tmp/soagupdate/minimal/Packages
|
||||
else
|
||||
is_airgap=1
|
||||
fi
|
||||
@@ -346,7 +346,7 @@ clone_to_tmp() {
|
||||
# Make a temp location for the files
|
||||
mkdir -p /tmp/sogh
|
||||
cd /tmp/sogh
|
||||
SOUP_BRANCH=""
|
||||
SOUP_BRANCH="-b 2.4/main"
|
||||
if [ -n "$BRANCH" ]; then
|
||||
SOUP_BRANCH="-b $BRANCH"
|
||||
fi
|
||||
@@ -391,6 +391,8 @@ preupgrade_changes() {
|
||||
echo "Checking to see if changes are needed."
|
||||
|
||||
[[ "$INSTALLEDVERSION" == 2.4.2 ]] && up_to_2.4.3
|
||||
[[ "$INSTALLEDVERSION" == 2.4.3 ]] && up_to_2.4.4
|
||||
[[ "$INSTALLEDVERSION" == 2.4.4 ]] && up_to_2.4.5
|
||||
true
|
||||
}
|
||||
|
||||
@@ -399,8 +401,8 @@ postupgrade_changes() {
|
||||
echo "Running post upgrade processes."
|
||||
|
||||
[[ "$POSTVERSION" == 2.4.2 ]] && post_to_2.4.3
|
||||
|
||||
|
||||
[[ "$POSTVERSION" == 2.4.3 ]] && post_to_2.4.4
|
||||
[[ "$POSTVERSION" == 2.4.4 ]] && post_to_2.4.5
|
||||
true
|
||||
}
|
||||
|
||||
@@ -409,6 +411,16 @@ post_to_2.4.3() {
|
||||
POSTVERSION=2.4.3
|
||||
}
|
||||
|
||||
post_to_2.4.4() {
|
||||
echo "Nothing to apply"
|
||||
POSTVERSION=2.4.4
|
||||
}
|
||||
|
||||
post_to_2.4.5() {
|
||||
echo "Regenerating Elastic Agent Installers"
|
||||
/sbin/so-elastic-agent-gen-installers
|
||||
POSTVERSION=2.4.5
|
||||
}
|
||||
|
||||
stop_salt_master() {
|
||||
# kill all salt jobs across the grid because the hang indefinitely if they are queued and salt-master restarts
|
||||
@@ -423,7 +435,7 @@ stop_salt_master() {
|
||||
|
||||
echo ""
|
||||
echo "Storing salt-master pid."
|
||||
MASTERPID=$(pgrep salt-master | head -1)
|
||||
MASTERPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-master MainProcess')
|
||||
echo "Found salt-master PID $MASTERPID"
|
||||
systemctl_func "stop" "salt-master"
|
||||
timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option."
|
||||
@@ -442,7 +454,7 @@ stop_salt_minion() {
|
||||
set -e
|
||||
|
||||
echo "Storing salt-minion pid."
|
||||
MINIONPID=$(pgrep salt-minion | head -1)
|
||||
MINIONPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-minion' | head -1)
|
||||
echo "Found salt-minion PID $MINIONPID"
|
||||
systemctl_func "stop" "salt-minion"
|
||||
|
||||
@@ -453,9 +465,33 @@ stop_salt_minion() {
|
||||
|
||||
|
||||
up_to_2.4.3() {
|
||||
echo "Nothing to do for 2.4.3"
|
||||
##
|
||||
INSTALLEDVERSION=2.3.140
|
||||
echo "Nothing to do for 2.4.3"
|
||||
|
||||
INSTALLEDVERSION=2.4.3
|
||||
}
|
||||
|
||||
up_to_2.4.4() {
|
||||
echo "Nothing to do for 2.4.4"
|
||||
|
||||
INSTALLEDVERSION=2.4.4
|
||||
}
|
||||
|
||||
up_to_2.4.5() {
|
||||
determine_elastic_agent_upgrade
|
||||
|
||||
INSTALLEDVERSION=2.4.5
|
||||
}
|
||||
|
||||
determine_elastic_agent_upgrade() {
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
update_elastic_agent_airgap
|
||||
else
|
||||
update_elastic_agent
|
||||
fi
|
||||
}
|
||||
|
||||
update_elastic_agent_airgap() {
|
||||
rsync -av /tmp/soagupdate/fleet/* /nsm/elastic-fleet/artifacts/
|
||||
}
|
||||
|
||||
verify_upgradespace() {
|
||||
@@ -495,6 +531,7 @@ update_centos_repo() {
|
||||
echo "Syncing new updates to /nsm/repo"
|
||||
rsync -av $AGREPO/* /nsm/repo/
|
||||
echo "Creating repo"
|
||||
dnf -y install yum-utils createrepo
|
||||
createrepo /nsm/repo
|
||||
}
|
||||
|
||||
@@ -834,7 +871,7 @@ main() {
|
||||
set +e
|
||||
|
||||
echo "Checking the number of minions."
|
||||
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | wc -l)
|
||||
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | grep -v adv_ | wc -l)
|
||||
if [[ $UPGRADESALT -eq 1 ]] && [[ $NUM_MINIONS -gt 1 ]]; then
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
echo ""
|
||||
@@ -850,9 +887,6 @@ main() {
|
||||
echo "Checking sudoers file."
|
||||
check_sudoers
|
||||
|
||||
echo "Checking for necessary user migrations."
|
||||
so-user migrate
|
||||
|
||||
systemctl_func "start" "$cron_service_name"
|
||||
|
||||
if [[ -n $lsl_msg ]]; then
|
||||
@@ -938,6 +972,11 @@ while getopts ":b:f:y" opt; do
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
if [ -f $SOUP_LOG ]; then
|
||||
CURRENT_TIME=$(date +%Y%m%d.%H%M%S)
|
||||
mv $SOUP_LOG $SOUP_LOG.$INSTALLEDVERSION.$CURRENT_TIME
|
||||
fi
|
||||
|
||||
if [[ -z $UNATTENDED ]]; then
|
||||
cat << EOF
|
||||
|
||||
|
||||
@@ -3,12 +3,13 @@ NOROOT=1
|
||||
. /usr/sbin/so-common
|
||||
|
||||
{%- set proxy = salt['pillar.get']('manager:proxy') %}
|
||||
{%- set noproxy = salt['pillar.get']('manager:no_proxy', '') %}
|
||||
|
||||
# Download the rules from the internet
|
||||
{%- if proxy %}
|
||||
export http_proxy={{ proxy }}
|
||||
export https_proxy={{ proxy }}
|
||||
export no_proxy= salt['pillar.get']('manager:no_proxy')
|
||||
export no_proxy="{{ noproxy }}"
|
||||
{%- endif %}
|
||||
|
||||
repos="/opt/so/conf/strelka/repos.txt"
|
||||
|
||||
@@ -296,7 +296,9 @@ http {
|
||||
error_page 429 = @error429;
|
||||
|
||||
location @error401 {
|
||||
add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400";
|
||||
if ($request_uri ~* ^/(?!(^/api/.*))) {
|
||||
add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400";
|
||||
}
|
||||
return 302 /auth/self-service/login/browser;
|
||||
}
|
||||
|
||||
|
||||
14
salt/sensor/files/99-so-checksum-offload-disable
Executable file
14
salt/sensor/files/99-so-checksum-offload-disable
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
{% set MNIC = salt['pillar.get']('sensor:interface') %}
|
||||
|
||||
init_monitor {{ MNIC }}
|
||||
12
salt/sensor/init.sls
Normal file
12
salt/sensor/init.sls
Normal file
@@ -0,0 +1,12 @@
|
||||
offload_script:
|
||||
file.managed:
|
||||
- name: /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable
|
||||
- source: salt://sensor/files/99-so-checksum-offload-disable
|
||||
- mode: 755
|
||||
- template: jinja
|
||||
|
||||
execute_checksum:
|
||||
cmd.run:
|
||||
- name: /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable
|
||||
- onchanges:
|
||||
- file: offload_script
|
||||
@@ -1,7 +1,9 @@
|
||||
sensor:
|
||||
interface:
|
||||
description: Main sensor monitoring interface.
|
||||
helpLink: sensor.html
|
||||
helpLink: network.html
|
||||
readonly: True
|
||||
mtu:
|
||||
description: Main IP address of the grid host.
|
||||
helpLink: host.html
|
||||
description: Maximum Transmission Unit (MTU) of the sensor monitoring interface.
|
||||
helpLink: network.html
|
||||
readonly: True
|
||||
|
||||
@@ -10,6 +10,14 @@ soc:
|
||||
target:
|
||||
links:
|
||||
- '/#/hunt?q="{value|escape}" | groupby event.module* event.dataset'
|
||||
- name: actionAddToCase
|
||||
description: actionAddToCaseHelp
|
||||
icon: fa-briefcase
|
||||
jsCall: openAddToCaseDialog
|
||||
categories:
|
||||
- hunt
|
||||
- alerts
|
||||
- dashboards
|
||||
- name: actionCorrelate
|
||||
description: actionCorrelateHelp
|
||||
icon: fab fa-searchengin
|
||||
@@ -1132,7 +1140,7 @@ soc:
|
||||
showSubtitle: true
|
||||
- name: SOC - Auth
|
||||
description: Users authenticated to SOC grouped by IP address and identity
|
||||
query: 'event.dataset:kratos.audit AND msg:authenticated | groupby http_request.headers.x-real-ip identity_id'
|
||||
query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby http_request.headers.x-real-ip identity_id'
|
||||
showSubtitle: true
|
||||
- name: SOC - App
|
||||
description: Logs generated by the Security Onion Console (SOC) server and modules
|
||||
@@ -1397,7 +1405,7 @@ soc:
|
||||
query: '* | groupby -sankey event.dataset event.category* | groupby -pie event.category | groupby -bar event.module* | groupby event.dataset | groupby event.module* | groupby event.category | groupby observer.name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name'
|
||||
- name: SOC Auth
|
||||
description: SOC (Security Onion Console) authentication logs
|
||||
query: 'event.dataset:kratos.audit AND msg:authenticated | groupby -sankey http_request.headers.x-real-ip identity_id | groupby http_request.headers.x-real-ip | groupby identity_id | groupby http_request.headers.user-agent'
|
||||
query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby -sankey http_request.headers.x-real-ip identity_id | groupby http_request.headers.x-real-ip | groupby identity_id | groupby http_request.headers.user-agent'
|
||||
- name: Elastalerts
|
||||
description: Elastalert logs
|
||||
query: '_index: "*:elastalert*" | groupby rule_name | groupby alert_info.type'
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
{% if sls in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
{% set CUSTOMFQDN = salt['pillar.get']('elasticfleet:config:server:custom_fqdn') %}
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
|
||||
{% set global_ca_text = [] %}
|
||||
{% set global_ca_server = [] %}
|
||||
@@ -154,7 +154,7 @@ etc_elasticfleet_crt:
|
||||
- signing_policy: elasticfleet
|
||||
- private_key: /etc/pki/elasticfleet-server.key
|
||||
- CN: {{ GLOBALS.url_base }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }},IP:{{ GLOBALS.node_ip }} {% if CUSTOMFQDN != "" %},DNS:{{ CUSTOMFQDN }}{% endif %}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }},IP:{{ GLOBALS.node_ip }}{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %},DNS:{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(',DNS:') }}{% endif %}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
@@ -211,7 +211,7 @@ etc_elasticfleet_logstash_crt:
|
||||
- signing_policy: elasticfleet
|
||||
- private_key: /etc/pki/elasticfleet-logstash.key
|
||||
- CN: {{ GLOBALS.url_base }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }},IP:{{ GLOBALS.node_ip }} {% if CUSTOMFQDN != "" %},DNS:{{ CUSTOMFQDN }}{% endif %}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }},IP:{{ GLOBALS.node_ip }}{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %},DNS:{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(',DNS:') }}{% endif %}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
|
||||
@@ -8,7 +8,7 @@ NOROOT=1
|
||||
{%- if proxy %}
|
||||
export http_proxy={{ proxy }}
|
||||
export https_proxy={{ proxy }}
|
||||
export no_proxy= salt['pillar.get']('manager:no_proxy')
|
||||
export no_proxy=salt['pillar.get']('manager:no_proxy')
|
||||
{%- endif %}
|
||||
|
||||
mkdir -p /tmp/yara
|
||||
|
||||
@@ -36,6 +36,7 @@ base:
|
||||
|
||||
'*_sensor and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- sensor
|
||||
- ssl
|
||||
- sensoroni
|
||||
- telegraf
|
||||
@@ -52,6 +53,7 @@ base:
|
||||
'*_eval and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- salt.master
|
||||
- sensor
|
||||
- ca
|
||||
- ssl
|
||||
- registry
|
||||
@@ -118,6 +120,7 @@ base:
|
||||
'*_standalone and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- salt.master
|
||||
- sensor
|
||||
- ca
|
||||
- ssl
|
||||
- registry
|
||||
@@ -196,6 +199,7 @@ base:
|
||||
|
||||
'*_heavynode and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- sensor
|
||||
- ssl
|
||||
- sensoroni
|
||||
- nginx
|
||||
@@ -216,6 +220,7 @@ base:
|
||||
'*_import and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- salt.master
|
||||
- sensor
|
||||
- ca
|
||||
- ssl
|
||||
- registry
|
||||
|
||||
@@ -85,12 +85,13 @@ analyze_system() {
|
||||
|
||||
desktop_salt_local() {
|
||||
|
||||
SALTVERSION=$(egrep 'version: [0-9]{4}' ../salt/salt/master.defaults.yaml | sed 's/^.*version: //')
|
||||
# Install everything using local salt
|
||||
# Set the repo
|
||||
securityonion_repo
|
||||
gpg_rpm_import
|
||||
# Install salt
|
||||
logCmd "yum -y install salt-minion-3004.1 httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq"
|
||||
logCmd "yum -y install salt-minion-$SALTVERSION httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq"
|
||||
logCmd "yum -y update --exclude=salt*"
|
||||
|
||||
logCmd "salt-call state.apply desktop --local --file-root=../salt/ -l info"
|
||||
@@ -1014,25 +1015,9 @@ detect_os() {
|
||||
}
|
||||
|
||||
download_elastic_agent_artifacts() {
|
||||
agentArchive=/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz
|
||||
agentMd5=/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5
|
||||
beatsDir=/nsm/elastic-fleet/artifacts/beats/elastic-agent
|
||||
logCmd "mkdir -p $beatsDir"
|
||||
if [[ ! -f "$agentArchive" ]]; then
|
||||
retry 15 10 "curl --fail --retry 5 --retry-delay 15 -L https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz --output $agentArchive" "" ""
|
||||
retry 15 10 "curl --fail --retry 5 --retry-delay 15 -L https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5 --output $agentMd5" "" ""
|
||||
|
||||
SOURCEHASH=$(md5sum $agentArchive | awk '{ print $1 }')
|
||||
HASH=$(cat $agentMd5)
|
||||
|
||||
if [[ "$HASH" == "$SOURCEHASH" ]]; then
|
||||
info "Elastic Agent source hash is good."
|
||||
else
|
||||
info "Unable to download the Elastic Agent source files."
|
||||
fail_setup
|
||||
fi
|
||||
if ! update_elastic_agent 2>&1 | tee -a "$setup_log"; then
|
||||
fail_setup
|
||||
fi
|
||||
logCmd "tar -xf $agentArchive -C $beatsDir"
|
||||
}
|
||||
|
||||
installer_progress_loop() {
|
||||
@@ -1897,7 +1882,9 @@ securityonion_repo() {
|
||||
if [[ $is_oracle ]]; then
|
||||
logCmd "dnf -v clean all"
|
||||
logCmd "mkdir -vp /root/oldrepos"
|
||||
logCmd "mv -v /etc/yum.repos.d/* /root/oldrepos/"
|
||||
if [ -n "$(ls -A /etc/yum.repos.d/ 2>/dev/null)" ]; then
|
||||
logCmd "mv -v /etc/yum.repos.d/* /root/oldrepos/"
|
||||
fi
|
||||
if [[ $is_desktop_iso ]]; then
|
||||
gpg_rpm_import
|
||||
if [[ ! $is_airgap ]]; then
|
||||
|
||||
@@ -661,6 +661,7 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
logCmd "salt-call state.show_top"
|
||||
sleep 2 # Debug RSA Key format errors
|
||||
logCmd "salt-key -ya $MINION_ID"
|
||||
logCmd "salt-call saltutil.sync_all"
|
||||
|
||||
logCmd "salt-call state.apply common.packages"
|
||||
logCmd "salt-call state.apply common"
|
||||
@@ -694,9 +695,11 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
logCmd "so-rule-update"
|
||||
title "Downloading YARA rules"
|
||||
logCmd "su socore -c '/usr/sbin/so-yara-download'"
|
||||
if [[ $monints ]]; then
|
||||
if [[ $monints || $is_import ]]; then
|
||||
title "Restarting Suricata to pick up the new rules"
|
||||
logCmd "so-suricata-restart"
|
||||
fi
|
||||
if [[ $monints ]]; then
|
||||
title "Restarting Strelka to use new rules"
|
||||
logCmd "so-strelka-restart"
|
||||
fi
|
||||
|
||||
@@ -51,6 +51,7 @@ log_has_errors() {
|
||||
grep -vE "/nsm/rules/sigma*" | \
|
||||
grep -vE "/nsm/rules/yara*" | \
|
||||
grep -vE "Failed to restart snapd" | \
|
||||
grep -vE "Login Failed Details" | \
|
||||
grep -vE "Running scope as unit" &> "$error_log"
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
|
||||
@@ -1012,9 +1012,9 @@ whiptail_manager_unreachable() {
|
||||
|
||||
local msg
|
||||
read -r -d '' msg <<- EOM
|
||||
Setup is unable to access the manager at this time.
|
||||
Setup is unable to access the manager. This most likely means that you need to allow this machine to connect through the manager's firewall.
|
||||
|
||||
Run the following on the manager:
|
||||
You can either go to SOC --> Administration --> Configuration and choose the correct firewall option from the list OR you can run the following command on the manager:
|
||||
|
||||
sudo so-firewall-minion --role=$install_type --ip=$MAINIP
|
||||
|
||||
|
||||
BIN
sigs/securityonion-2.4.5-20230807.iso.sig
Normal file
BIN
sigs/securityonion-2.4.5-20230807.iso.sig
Normal file
Binary file not shown.
Reference in New Issue
Block a user