fix nginx merge conflicts

This commit is contained in:
m0duspwnens
2023-05-15 11:40:12 -04:00
46 changed files with 234 additions and 174 deletions

View File

@@ -238,7 +238,7 @@ gpg_rpm_import() {
local RPMKEYSLOC="$UPDATE_DIR/salt/repo/client/files/rocky/keys"
fi
RPMKEYS=('RPM-GPG-KEY-EPEL-9' 'SALT-PROJECT-GPG-PUBKEY-2023.pub' 'docker.pub' 'securityonion.pub')
RPMKEYS=('RPM-GPG-KEY-rockyofficial' 'RPM-GPG-KEY-EPEL-9' 'SALT-PROJECT-GPG-PUBKEY-2023.pub' 'docker.pub' 'securityonion.pub')
for RPMKEY in "${RPMKEYS[@]}"; do
rpm --import $RPMKEYSLOC/$RPMKEY

View File

@@ -8,30 +8,44 @@ docker:
final_octet: 20
port_bindings:
- 0.0.0.0:5000:5000
custom_bind_mounts: []
extra_hosts: []
'so-elastic-fleet':
final_octet: 21
port_bindings:
- 0.0.0.0:8220:8220/tcp
custom_bind_mounts: []
extra_hosts: []
'so-elasticsearch':
final_octet: 22
port_bindings:
- 0.0.0.0:9200:9200/tcp
- 0.0.0.0:9300:9300/tcp
custom_bind_mounts: []
extra_hosts: []
'so-idstools':
final_octet: 25
custom_bind_mounts: []
extra_hosts: []
'so-influxdb':
final_octet: 26
port_bindings:
- 0.0.0.0:8086:8086
custom_bind_mounts: []
extra_hosts: []
'so-kibana':
final_octet: 27
port_bindings:
- 0.0.0.0:5601:5601
custom_bind_mounts: []
extra_hosts: []
'so-kratos':
final_octet: 28
port_bindings:
- 0.0.0.0:4433:4433
- 0.0.0.0:4434:4434
custom_bind_mounts: []
extra_hosts: []
'so-logstash':
final_octet: 29
port_bindings:
@@ -45,58 +59,92 @@ docker:
- 0.0.0.0:6052:6052
- 0.0.0.0:6053:6053
- 0.0.0.0:9600:9600
custom_bind_mounts: []
extra_hosts: []
'so-mysql':
final_octet: 30
port_bindings:
- 0.0.0.0:3306:3306
custom_bind_mounts: []
extra_hosts: []
'so-nginx':
final_octet: 31
port_bindings:
- 80:80
- 443:443
- 8443:8443
custom_bind_mounts: []
extra_hosts: []
'so-playbook':
final_octet: 32
port_bindings:
- 0.0.0.0:3000:3000
custom_bind_mounts: []
extra_hosts: []
'so-redis':
final_octet: 33
port_bindings:
- 0.0.0.0:6379:6379
- 0.0.0.0:9696:9696
custom_bind_mounts: []
extra_hosts: []
'so-soc':
final_octet: 34
port_bindings:
- 0.0.0.0:9822:9822
custom_bind_mounts: []
extra_hosts: []
'so-soctopus':
final_octet: 35
port_bindings:
- 0.0.0.0:7000:7000
custom_bind_mounts: []
extra_hosts: []
'so-strelka-backend':
final_octet: 36
custom_bind_mounts: []
extra_hosts: []
'so-strelka-filestream':
final_octet: 37
custom_bind_mounts: []
extra_hosts: []
'so-strelka-frontend':
final_octet: 38
port_bindings:
- 0.0.0.0:57314:57314
custom_bind_mounts: []
extra_hosts: []
'so-strelka-manager':
final_octet: 39
custom_bind_mounts: []
extra_hosts: []
'so-strelka-gatekeeper':
final_octet: 40
port_bindings:
- 0.0.0.0:6381:6379
custom_bind_mounts: []
extra_hosts: []
'so-strelka-coordinator':
final_octet: 41
port_bindings:
- 0.0.0.0:6380:6379
custom_bind_mounts: []
extra_hosts: []
'so-elastalert':
final_octet: 42
custom_bind_mounts: []
extra_hosts: []
'so-curator':
final_octet: 43
custom_bind_mounts: []
extra_hosts: []
'so-elastic-fleet-package-registry':
final_octet: 44
port_bindings:
- 0.0.0.0:8080:8080/tcp
custom_bind_mounts: []
extra_hosts: []
'so-idh':
final_octet: 45
final_octet: 45
custom_bind_mounts: []
extra_hosts: []

View File

@@ -28,6 +28,18 @@ docker:
helpLink: docker.html
advanced: True
multiline: True
custom_bind_mounts:
description: List of custom local volume bindings.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
extra_hosts:
description: List of additional host entries for the container.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
so-dockerregistry: *dockerOptions
so-elastalert: *dockerOptions
so-elastic-fleet-package-registry: *dockerOptions

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "osquery_manager",
"version": "1.6.0"
"version": ""
},
"name": "osquery-endpoints",
"namespace": "default",

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "system",
"version": "1.25.2"
"version": ""
},
"name": "system-endpoints",
"namespace": "default",

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "windows",
"version": "1.19.1"
"version": ""
},
"name": "windows-endpoints",
"namespace": "default",

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "elasticsearch",
"version": "1.0.0"
"version": ""
},
"name": "elasticsearch-logs",
"namespace": "default",

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "log",
"version": "1.1.1"
"version": ""
},
"name": "idh-logs",
"namespace": "so",

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "log",
"version": "1.1.0"
"version": ""
},
"name": "import-evtx-logs",
"namespace": "so",

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "log",
"version": "1.1.0"
"version": ""
},
"name": "import-suricata-logs",
"namespace": "so",

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "log",
"version": "1.1.0"
"version": ""
},
"name": "import-zeek-logs",
"namespace": "so",

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "log",
"version": "1.1.0"
"version": ""
},
"name": "kratos-logs",
"namespace": "so",
@@ -18,7 +18,7 @@
"/opt/so/log/kratos/kratos.log"
],
"data_stream.dataset": "kratos",
"tags": [],
"tags": ["so-kratos"],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
"custom": "pipeline: kratos"
}

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "osquery_manager",
"version": "1.6.0"
"version": ""
},
"name": "osquery-grid-nodes",
"namespace": "default",

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "redis",
"version": "1.4.0"
"version": ""
},
"name": "redis-logs",
"namespace": "default",

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "log",
"version": "1.1.2"
"version": ""
},
"name": "soc-auth-sync-logs",
"namespace": "so",
@@ -18,7 +18,7 @@
"/opt/so/log/soc/sync.log"
],
"data_stream.dataset": "soc",
"tags": [],
"tags": ["so-soc"],
"processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync",
"custom": "pipeline: common"
}

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "log",
"version": "1.1.2"
"version": ""
},
"name": "soc-salt-relay-logs",
"namespace": "so",
@@ -18,7 +18,7 @@
"/opt/so/log/soc/salt-relay.log"
],
"data_stream.dataset": "soc",
"tags": [],
"tags": ["so-soc"],
"processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay",
"custom": "pipeline: common"
}

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "log",
"version": "1.1.2"
"version": ""
},
"name": "soc-sensoroni-logs",
"namespace": "so",

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "log",
"version": "1.1.2"
"version": ""
},
"name": "soc-server-logs",
"namespace": "so",
@@ -18,7 +18,7 @@
"/opt/so/log/soc/sensoroni-server.log"
],
"data_stream.dataset": "soc",
"tags": [],
"tags": ["so-soc"],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"custom": "pipeline: common"
}

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "log",
"version": "1.1.0"
"version": ""
},
"name": "strelka-logs",
"namespace": "so",

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "log",
"version": "1.1.0"
"version": ""
},
"name": "suricata-logs",
"namespace": "so",

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "tcp",
"version": "1.5.0"
"version": ""
},
"name": "syslog-tcp-514",
"namespace": "so",

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "udp",
"version": "1.5.0"
"version": ""
},
"name": "syslog-udp-514",
"namespace": "so",

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "system",
"version": "1.25.2"
"version": ""
},
"name": "system-grid-nodes",
"namespace": "default",

View File

@@ -1,7 +1,7 @@
{
"package": {
"name": "log",
"version": "1.1.0"
"version": ""
},
"name": "zeek-logs",
"namespace": "so",

View File

@@ -10,6 +10,7 @@
run_installer:
cmd.script:
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux
- cwd: /opt/so
- args: -token={{ GRIDNODETOKEN }}
{% endif %}

View File

@@ -19,37 +19,40 @@ FLEETHOST="{{ GLOBALS.manager_ip }}"
#ENROLLMENTOKEN=$2
TARGETOS=( "linux" "darwin" "windows" )
printf "\n### Get rid of any previous runs\n"
rm -rf /tmp/elastic-agent-workspace
mkdir -p /tmp/elastic-agent-workspace
printf "\n### Creating a temp directory at /nsm/elastic-agent-workspace\n"
rm -rf /nsm/elastic-agent-workspace
mkdir -p /nsm/elastic-agent-workspace
printf "\n### Extract outer tarball and then each individual tarball/zip\n"
tar -xf /nsm/elastic-fleet/artifacts/elastic-agent_SO-{{ GLOBALS.so_version }}.tar.gz -C /tmp/elastic-agent-workspace/
unzip /tmp/elastic-agent-workspace/elastic-agent-*.zip -d /tmp/elastic-agent-workspace/
for archive in /tmp/elastic-agent-workspace/*.tar.gz
printf "\n### Extracting outer tarball and then each individual tarball/zip\n"
tar -xf /nsm/elastic-fleet/artifacts/elastic-agent_SO-{{ GLOBALS.so_version }}.tar.gz -C /nsm/elastic-agent-workspace/
unzip /nsm/elastic-agent-workspace/elastic-agent-*.zip -d /nsm/elastic-agent-workspace/
for archive in /nsm/elastic-agent-workspace/*.tar.gz
do
tar xf "$archive" -C /tmp/elastic-agent-workspace/
tar xf "$archive" -C /nsm/elastic-agent-workspace/
done
printf "\n### Strip out unused components"
find /tmp/elastic-agent-workspace/elastic-agent-*/data/elastic-agent-*/components -regex '.*fleet.*\|.*packet.*\|.*apm*.*\|.*audit.*\|.*heart.*\|.*cloud.*' -delete
printf "\n### Stripping out unused components"
find /nsm/elastic-agent-workspace/elastic-agent-*/data/elastic-agent-*/components -regex '.*fleet.*\|.*packet.*\|.*apm*.*\|.*audit.*\|.*heart.*\|.*cloud.*' -delete
printf "\n### Tar everything up again"
printf "\n### Tarring everything up again"
for OS in "${TARGETOS[@]}"
do
rm -rf /tmp/elastic-agent-workspace/elastic-agent
mv /tmp/elastic-agent-workspace/elastic-agent-*-$OS-x86_64 /tmp/elastic-agent-workspace/elastic-agent
tar -czvf /tmp/elastic-agent-workspace/$OS.tar.gz -C /tmp/elastic-agent-workspace elastic-agent
rm -rf /nsm/elastic-agent-workspace/elastic-agent
mv /nsm/elastic-agent-workspace/elastic-agent-*-$OS-x86_64 /nsm/elastic-agent-workspace/elastic-agent
tar -czvf /nsm/elastic-agent-workspace/$OS.tar.gz -C /nsm/elastic-agent-workspace elastic-agent
done
printf "\n### Generate OS packages using the cleaned up tarballs"
printf "\n### Generating OS packages using the cleaned up tarballs"
for OS in "${TARGETOS[@]}"
do
printf "\n\n### Generating $OS Installer...\n"
docker run -e CGO_ENABLED=0 -e GOOS=$OS \
--mount type=bind,source=/etc/ssl/certs/,target=/workspace/files/cert/ \
--mount type=bind,source=/tmp/elastic-agent-workspace/,target=/workspace/files/elastic-agent/ \
--mount type=bind,source=/nsm/elastic-agent-workspace/,target=/workspace/files/elastic-agent/ \
--mount type=bind,source=/opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/,target=/output/ \
{{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent-builder:{{ GLOBALS.so_version }} go build -ldflags "-X main.fleetHost=$FLEETHOST -X main.enrollmentToken=$ENROLLMENTOKEN" -o /output/so-elastic-agent_$OS
printf "\n### $OS Installer Generated...\n"
done
printf "\n### Cleaning up temp files in /nsm/elastic-agent-workspace"
rm -rf /nsm/elastic-agent-workspace

View File

@@ -12,7 +12,7 @@ printf "\n### Create ES Token ###\n"
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
### Create Outputs & Fleet URLs ###
printf "\nAdd Manager Elasticsearch Ouput...\n"
printf "\nAdd Manager Elasticsearch Output...\n"
ESCACRT=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
JSON_STRING=$( jq -n \
--arg ESCACRT "$ESCACRT" \

View File

@@ -3,8 +3,8 @@ idstools:
description: You can enable or disable IDSTools.
config:
oinkcode:
description: Enter your registration/oink code for paid NIDS rulesets.
title: registraion code
description: Enter your registration code or oinkcode for paid NIDS rulesets.
title: Registration Code
global: True
helpLink: rules.html
ruleset:
@@ -18,14 +18,14 @@ idstools:
helpLink: rules.html
sids:
disabled:
description: Contains the list of NIDS rules manually disabled across the grid. To disable a rule, add its signature ID (SID) to the Current Grid Value box, one entry per line. To disable multiple rules, you can use regular expressions.
description: Contains the list of NIDS rules manually disabled across the grid. To disable a rule, add its Signature ID (SID) to the Current Grid Value box, one entry per line. To disable multiple rules, you can use regular expressions.
global: True
multiline: True
forcedType: "[]string"
regex: \d*|re:.*
helpLink: managing-alerts.html
enabled:
description: Contains the list of NIDS rules manually enabled across the grid. To enable a rule, add its signature ID (SID) to the Current Grid Value box, one entry per line. To enable multiple rules, you can use regular expressions.
description: Contains the list of NIDS rules manually enabled across the grid. To enable a rule, add its Signature ID (SID) to the Current Grid Value box, one entry per line. To enable multiple rules, you can use regular expressions.
global: True
multiline: True
forcedType: "[]string"

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -40,18 +40,6 @@ logstash:
custom2: []
custom3: []
custom4: []
docker_options:
port_bindings:
- 0.0.0.0:3765:3765
- 0.0.0.0:5044:5044
- 0.0.0.0:5055:5055
- 0.0.0.0:5056:5056
- 0.0.0.0:5644:5644
- 0.0.0.0:6050:6050
- 0.0.0.0:6051:6051
- 0.0.0.0:6052:6052
- 0.0.0.0:6053:6053
- 0.0.0.0:9600:9600
settings:
lsheap: 500m
config:

View File

@@ -60,12 +60,6 @@ logstash:
helpLink: logstash.html
readonly: True
advanced: True
docker_options:
port_bindings:
description: List of ports to open to the logstash docker container. Firewall ports will still need to be added to the firewall configuration.
helpLink: logstash.html
advanced: True
multiline: True
dmz_nodes:
description: "List of receiver nodes in DMZs. Prevents sensors from sending to these receivers. Primarily used for external Elastic agents."
helpLink: logstash.html

View File

@@ -202,7 +202,7 @@ check_local_mods() {
default_file="${DEFAULT_SALT_DIR}${stripped_path}"
if [[ -f $default_file ]]; then
file_diff=$(diff "$default_file" "$local_file" )
if [[ $(echo "$file_diff" | grep -c "^<") -gt 0 ]]; then
if [[ $(echo "$file_diff" | grep -Ec "^[<>]") -gt 0 ]]; then
local_mod_arr+=( "$local_file" )
fi
fi

View File

@@ -12,18 +12,11 @@ include:
# Drop the correct nginx config based on role
nginxconfdir:
file.directory:
- name: /opt/so/conf/nginx/html
- name: /opt/so/conf/nginx
- user: 939
- group: 939
- makedirs: True
nginxhtml:
file.recurse:
- name: /opt/so/conf/nginx/html
- source: salt://nginx/html/
- user: 939
- group: 939
nginxconf:
file.managed:
- name: /opt/so/conf/nginx/nginx.conf

View File

@@ -2,3 +2,5 @@ nginx:
enabled: False
config:
replace_cert: False
throttle_login_burst: 6
throttle_login_rate: 10

View File

@@ -1,5 +1,6 @@
{%- from 'vars/globals.map.jinja' import GLOBALS %}
{%- from 'docker/docker.map.jinja' import DOCKER %}
{%- from 'nginx/map.jinja' import NGINXMERGED %}
{%- set role = grains.id.split('_') | last %}
{%- set influxpass = salt['pillar.get']('secrets:influx_pass') %}
{%- set influxauth = ('so:' + influxpass) | base64_encode %}
@@ -33,6 +34,8 @@ http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
limit_req_zone $binary_remote_addr zone=auth_throttle:10m rate={{ NGINXMERGED.config.throttle_login_rate }}r/m;
include /etc/nginx/conf.d/*.conf;
{%- if role in ['eval', 'managersearch', 'manager', 'standalone', 'import'] %}
@@ -143,7 +146,21 @@ http {
proxy_set_header X-Forwarded-Proto $scheme;
}
location ~ ^/auth/.*?(whoami|login|logout|settings) {
location ~ ^/auth/.*?(login) {
rewrite /auth/(.*) /$1 break;
limit_req zone=auth_throttle burst={{ NGINXMERGED.config.throttle_login_burst }} nodelay;
limit_req_status 429;
proxy_pass http://{{ GLOBALS.manager }}:4433;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location ~ ^/auth/.*?(whoami|logout|settings) {
rewrite /auth/(.*) /$1 break;
proxy_pass http://{{ GLOBALS.manager }}:4433;
proxy_read_timeout 90;
@@ -276,6 +293,7 @@ http {
error_page 401 = @error401;
error_page 403 = @error403;
error_page 429 = @error429;
location @error401 {
add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400";
@@ -287,6 +305,10 @@ http {
return 302 /auth/self-service/login/browser;
}
location @error429 {
return 302 /login/?thr={{ (120 / NGINXMERGED.config.throttle_login_rate) | round | int }};
}
error_page 500 502 503 504 /50x.html;
location = /usr/share/nginx/html/50x.html {
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 948 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.3 KiB

View File

@@ -1,13 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<title>Security Onion - Hybrid Hunter</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="icon" type="image/png" href="favicon-32x32.png" sizes="32x32" />
<link rel="icon" type="image/png" href="favicon-16x16.png" sizes="16x16" />
</head>
<body>
Security Onion
</body>
</html>

View File

@@ -24,3 +24,11 @@ nginx:
advanced: True
global: True
helpLink: nginx.html
throttle_login_burst:
description: Number of login requests that can burst without triggering request throttling. Higher values allow more repeated login attempts. Values greater than zero are required in order to provide a usable login flow.
global: True
helpLink: nginx.html
throttle_login_rate:
description: Number of login API requests per minute that can be processed without triggering a rate limit. Higher values allow more repeated login attempts. Requests are counted by unique client IP and averaged over time. Note that a single login flow will perform multiple requests to the login API, so this value will need to be adjusted accordingly.
global: True
helpLink: nginx.html

View File

@@ -1,29 +1,31 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: resf.keykeeper.v1
Comment: Keykeeper
mQINBGAofzYBEAC6yS1azw6f3wmaVd//3aSy6O2c9+jeetulRQvg2LvhRRS1eNqp
/x9tbBhfohu/tlDkGpYHV7diePgMml9SZDy1sKlI3tDhx6GZ3xwF0fd1vWBZpmNk
D9gRkUmYBeLotmcXQZ8ZpWLicosFtDpJEYpLUhuIgTKwt4gxJrHvkWsGQiBkJxKD
u3/RlL4IYA3Ot9iuCBflc91EyAw1Yj0gKcDzbOqjvlGtS3ASXgxPqSfU0uLC9USF
uKDnP2tcnlKKGfj0u6VkqISliSuRAzjlKho9Meond+mMIFOTT6qp4xyu+9Dj3IjZ
IC6rBXRU3xi8z0qYptoFZ6hx70NV5u+0XUzDMXdjQ5S859RYJKijiwmfMC7gZQAf
OkdOcicNzen/TwD/slhiCDssHBNEe86Wwu5kmDoCri7GJlYOlWU42Xi0o1JkVltN
D8ZId+EBDIms7ugSwGOVSxyZs43q2IAfFYCRtyKHFlgHBRe9/KTWPUrnsfKxGJgC
Do3Yb63/IYTvfTJptVfhQtL1AhEAeF1I+buVoJRmBEyYKD9BdU4xQN39VrZKziO3
hDIGng/eK6PaPhUdq6XqvmnsZ2h+KVbyoj4cTo2gKCB2XA7O2HLQsuGduHzYKNjf
QR9j0djjwTrsvGvzfEzchP19723vYf7GdcLvqtPqzpxSX2FNARpCGXBw9wARAQAB
tDNSZWxlYXNlIEVuZ2luZWVyaW5nIDxpbmZyYXN0cnVjdHVyZUByb2NreWxpbnV4
Lm9yZz6JAk4EEwEIADgWIQRwUcRwqSn0VM6+N7cVr12sbXRaYAUCYCh/NgIbDwUL
CQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRAVr12sbXRaYLFmEACSMvoO1FDdyAbu
1m6xEzDhs7FgnZeQNzLZECv2j+ggFSJXezlNVOZ5I1I8umBan2ywfKQD8M+IjmrW
k9/7h9i54t8RS/RN7KNo7ECGnKXqXDPzBBTs1Gwo1WzltAoaDKUfXqQ4oJ4aCP/q
/XPVWEzgpJO1XEezvCq8VXisutyDiXEjjMIeBczxb1hbamQX+jLTIQ1MDJ4Zo1YP
zlUqrHW434XC2b1/WbSaylq8Wk9cksca5J+g3FqTlgiWozyy0uxygIRjb6iTzKXk
V7SYxeXp3hNTuoUgiFkjh5/0yKWCwx7aQqlHar9GjpxmBDAO0kzOlgtTw//EqTwR
KnYZLig9FW0PhwvZJUigr0cvs/XXTTb77z/i/dfHkrjVTTYenNyXogPtTtSyxqca
61fbPf0B/S3N43PW8URXBRS0sykpX4SxKu+PwKCqf+OJ7hMEVAapqzTt1q9T7zyB
QwvCVx8s7WWvXbs2d6ZUrArklgjHoHQcdxJKdhuRmD34AuXWCLW+gH8rJWZpuNl3
+WsPZX4PvjKDgMw6YMcV7zhWX6c0SevKtzt7WP3XoKDuPhK1PMGJQqQ7spegGB+5
DZvsJS48Ip0S45Qfmj82ibXaCBJHTNZE8Zs+rdTjQ9DS5qvzRA1sRA1dBb/7OLYE
JmeWf4VZyebm+gc50szsg6Ut2yT8hw==
=AiP8
xsFNBGJ5RksBEADF/Lzssm7uryV6+VHAgL36klyCVcHwvx9Bk853LBOuHVEZWsme
kbJF3fQG7i7gfCKGuV5XW15xINToe4fBThZteGJziboSZRpkEQ2z3lYcbg34X7+d
co833lkBNgz1v6QO7PmAdY/x76Q6Hx0J9yiJWd+4j+vRi4hbWuh64vUtTd7rPwk8
0y3g4oK1YT0NR0Xm/QUO9vWmkSTVflQ6y82HhHIUrG+1vQnSOrWaC0O1lqUI3Nuo
b6jTARCmbaPsi+XVQnBbsnPPq6Tblwc+NYJSqj5d9nT0uEXT7Zovj4Je5oWVFXp9
P1OWkbo2z5XkKjoeobM/zKDESJR78h+YQAN9IOKFjL/u/Gzrk1oEgByCABXOX+H5
hfucrq5U3bbcKy4e5tYgnnZxqpELv3fN/2l8iZknHEh5aYNT5WXVHpD/8u2rMmwm
I9YTEMueEtmVy0ZV3opUzOlC+3ZUwjmvAJtdfJyeVW/VMy3Hw3Ih0Fij91rO613V
7n72ggVlJiX25jYyT4AXlaGfAOMndJNVgBps0RArOBYsJRPnvfHlLi5cfjVd7vYx
QhGX9ODYuvyJ/rW70dMVikeSjlBDKS08tvdqOgtiYy4yhtY4ijQC9BmCE9H9gOxU
FN297iLimAxr0EVsED96fP96TbDGILWsfJuxAvoqmpkElv8J+P1/F7to2QARAQAB
zU9Sb2NreSBFbnRlcnByaXNlIFNvZnR3YXJlIEZvdW5kYXRpb24gLSBSZWxlYXNl
IGtleSAyMDIyIDxyZWxlbmdAcm9ja3lsaW51eC5vcmc+wsGKBBMBCAA0BQJieUZL
FiEEIcslauFvxUxuZSlJcC1CbTUNJ10CGwMCHgECGQEDCwkHAhUIAxYAAgIiAQAK
CRBwLUJtNQ0nXWQ5D/9472seOyRO6//bQ2ns3w9lE+aTLlJ5CY0GSTb4xNuyv+AD
IXpgvLSMtTR0fp9GV3vMw6QIWsehDqt7O5xKWi+3tYdaXRpb1cvnh8r/oCcvI4uL
k8kImNgsx+Cj+drKeQo03vFxBTDi1BTQFkfEt32fA2Aw5gYcGElM717sNMAMQFEH
P+OW5hYDH4kcLbtUypPXFbcXUbaf6jUjfiEp5lLjqquzAyDPLlkzMr5RVa9n3/rI
R6OQp5loPVzCRZMgDLALBU2TcFXLVP+6hAW8qM77c+q/rOysP+Yd+N7GAd0fvEvA
mfeA4Y6dP0mMRu96EEAJ1qSKFWUul6K6nuqy+JTxktpw8F/IBAz44na17Tf02MJH
GCUWyM0n5vuO5kK+Ykkkwd+v43ZlqDnwG7akDkLwgj6O0QNx2TGkdgt3+C6aHN5S
MiF0pi0qYbiN9LO0e05Ai2r3zTFC/pCaBWlG1ph2jx1pDy4yUVPfswWFNfe5I+4i
CMHPRFsZNYxQnIA2Prtgt2YMwz3VIGI6DT/Z56Joqw4eOfaJTTQSXCANts/gD7qW
D3SZXPc7wQD63TpDEjJdqhmepaTECbxN7x/p+GwIZYWJN+AYhvrfGXfjud3eDu8/
i+YIbPKH1TAOMwiyxC106mIL705p+ORf5zATZMyB8Y0OvRIz5aKkBDFZM2QN6A==
=PzIf
-----END PGP PUBLIC KEY BLOCK-----

View File

@@ -562,6 +562,8 @@ strelka:
- gen_susp_xor.yar
- gen_webshells_ext_vars.yar
- configured_vulns_ext_vars.yar
- expl_outlook_cve_2023_23397.yar
- gen_mal_3cx_compromise_mar23.yar
filecheck:
historypath: '/nsm/strelka/history/'
strelkapath: '/nsm/strelka/unprocessed/'

View File

@@ -61,7 +61,7 @@ add_mngr_ip_to_hosts() {
add_socore_user_manager() {
info "Adding socore user"
logCmd "so_add_user 'socore' '939' '939' '/opt/so'"
logCmd "so_add_user socore 939 939 /opt/so"
}
add_web_user() {
@@ -967,15 +967,15 @@ detect_os() {
}
download_elastic_agent_artifacts() {
#TODO - ISO
logCmd "mkdir -p /nsm/elastic-fleet/artifacts/beats/elastic-agent/"
logCmd "curl --retry 5 --retry-delay 60 https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$SOVERSION.tar.gz --output /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.tar.gz"
logCmd "tar -xf /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.tar.gz -C /nsm/elastic-fleet/artifacts/beats/elastic-agent/"
}
if [[ $is_iso ]]; then
logCmd "tar -xf /nsm/elastic-fleet/artifacts/beats/elastic-agent_SO-$SOVERSION.tar.gz -C /nsm/elastic-fleet/artifacts/beats/elastic-agent/"
else
logCmd "mkdir -p /nsm/elastic-fleet/artifacts/beats/elastic-agent/"
logCmd "curl --retry 5 --retry-delay 60 https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$SOVERSION.tar.gz --output /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.tar.gz"
logCmd "tar -xf /nsm/elastic-fleet/artifacts/beats/elastic-agent_SO-$SOVERSION.tar.gz -C /nsm/elastic-fleet/artifacts/beats/elastic-agent/"
fi
}
installer_progress_loop() {
local i=0
@@ -1948,9 +1948,11 @@ securityonion_repo() {
logCmd "dnf repolist all"
fi
if [[ $waitforstate ]]; then
# Build the repo locally so we can use it
echo "Syncing Repo"
repo_sync_local
if [[ ! $is_airgap ]]; then
# Build the repo locally so we can use it
echo "Syncing Repo"
repo_sync_local
fi
fi
fi
}
@@ -2196,12 +2198,12 @@ setup_salt_master_dirs() {
logCmd "rsync -avh --exclude 'TRANS.TBL' /home/$INSTALLUSERNAME/SecurityOnion/pillar/* $default_salt_dir/pillar/"
logCmd "rsync -avh --exclude 'TRANS.TBL' /home/$INSTALLUSERNAME/SecurityOnion/salt/* $default_salt_dir/salt/"
logCmd "mkdir -p $local_salt_dir/salt/zeek/policy/intel"
logCmd "cp -Rv /home/$INSTALLUSERNAME/SecurityOnion/files/intel.dat $local_salt_dir/salt/zeek/policy/intel/"
logCmd "touch $local_salt_dir/salt/zeek/policy/intel/intel.dat"
else
logCmd "cp -Rv ../pillar/* $default_salt_dir/pillar/"
logCmd "cp -Rv ../salt/* $default_salt_dir/salt/"
logCmd "mkdir -p $local_salt_dir/salt/zeek/policy/intel"
logCmd "cp -Rv files/intel.dat $local_salt_dir/salt/zeek/policy/intel/"
logCmd "touch $local_salt_dir/salt/zeek/policy/intel/intel.dat"
fi
info "Chown the salt dirs on the manager for socore"
@@ -2295,8 +2297,8 @@ set_initial_firewall_access() {
set_management_interface() {
title "Setting up the main interface"
if [ "$address_type" = 'DHCP' ]; then
logCmd "nmcli con mod '$MNIC' connection.autoconnect yes"
logCmd "nmcli con up '$MNIC'"
logCmd "nmcli con mod $MNIC connection.autoconnect yes"
logCmd "nmcli con up $MNIC"
else
# Set Static IP
nmcli con mod "$MNIC" ipv4.addresses "$MIP"/"$MMASK"\
@@ -2331,8 +2333,8 @@ so_add_user() {
if [ "$5" ]; then local pass=$5; fi
info "Add $username user"
logCmd "groupadd --gid '$gid' '$username'"
logCmd "useradd -m --uid '$uid' --gid '$gid' --home-dir '$home_dir' '$username'"
logCmd "groupadd --gid $gid $username"
logCmd "useradd -m --uid $uid --gid $gid --home-dir $home_dir $username"
# If a password has been passed in, set the password
if [ "$pass" ]; then

View File

@@ -335,45 +335,53 @@ process_installtype
# If this is not an automated install prompt
if ! [[ -f $install_opt_file ]]; then
# If you are a manager ask ALL the manager things here. I know there is code re-use but this makes it easier to add new roles.
# If you are a manager ask ALL the manager things here. I know there is code re-use but this makes it easier to add new roles
if [[ $is_eval ]]; then
# waitforstate means we will run the full salt state at the end. This is for only nodes running the salt-master service
waitforstate=true
#ubuntu_check
# Does this role have monitoring interfaces?
monints=true
# Prompt the user to accept the elastic license
check_elastic_license
# If it is an install from ISO is this airgap?
[[ $is_iso ]] && whiptail_airgap
# Make sure minimum requirements are met
check_requirements "manager"
# Do networking things
networking_needful
collect_net_method
# Do we need a proxy?
[[ ! $is_airgap ]] && collect_net_method
# Do we need to change the dockernet subnet?
collect_dockernet
if [[ $is_iso ]]; then
whiptail_airgap
fi
detect_cloud
# Are we in the clouds?
[[ ! $is_airgap ]] && detect_cloud
# Sets some minion info
set_minion_info
set_default_log_size >> $setup_log 2>&1
info "Verifying all network devices are managed by Network Manager that should be"
check_network_manager_conf
set_network_dev_status_list
# What NIC for watching network traffic?
whiptail_sensor_nics
# How many cores do we have?
calculate_useable_cores
# What is the web user?
collect_webuser_inputs
# How are we accessing the UI?
get_redirect
# Does the user want to allow access to the UI?
collect_so_allow
whiptail_end_settings
elif [[ $is_standalone ]]; then
waitforstate=true
#ubuntu_check
monints=true
check_elastic_license
[[ $is_iso ]] && whiptail_airgap
check_requirements "manager"
networking_needful
collect_net_method
[[ ! $is_airgap ]] && collect_net_method
collect_dockernet
if [[ $is_iso ]]; then
whiptail_airgap
fi
detect_cloud
[[ ! $is_airgap ]] && detect_cloud
set_minion_info
set_default_log_size >> $setup_log 2>&1
info "Verifying all network devices are managed by Network Manager that should be"
@@ -389,14 +397,12 @@ if ! [[ -f $install_opt_file ]]; then
check_elastic_license
waitforstate=true
#ubuntu_check
[[ $is_iso ]] && whiptail_airgap
check_requirements "manager"
networking_needful
collect_net_method
[[ ! $is_airgap ]] && collect_net_method
collect_dockernet
if [[ $is_iso ]]; then
whiptail_airgap
fi
detect_cloud
[[ ! $is_airgap ]] && detect_cloud
set_minion_info
set_default_log_size >> $setup_log 2>&1
info "Verifying all network devices are managed by Network Manager that should be"
@@ -410,15 +416,12 @@ if ! [[ -f $install_opt_file ]]; then
elif [[ $is_managersearch ]]; then
check_elastic_license
waitforstate=true
#ubuntu_check
[[ $is_iso ]] && whiptail_airgap
check_requirements "manager"
networking_needful
collect_net_method
[[ ! $is_airgap ]] && collect_net_method
collect_dockernet
if [[ $is_iso ]]; then
whiptail_airgap
fi
detect_cloud
[[ ! $is_airgap ]] && detect_cloud
set_minion_info
set_default_log_size >> $setup_log 2>&1
info "Verifying all network devices are managed by Network Manager that should be"
@@ -430,7 +433,6 @@ if ! [[ -f $install_opt_file ]]; then
collect_so_allow
whiptail_end_settings
elif [[ $is_sensor ]]; then
#ubuntu_check
installer_prereq_packages
monints=true
check_requirements "sensor"
@@ -459,7 +461,6 @@ if ! [[ -f $install_opt_file ]]; then
whiptail_end_settings
elif [[ $is_searchnode ]]; then
#ubuntu_check
installer_prereq_packages
check_requirements "elasticsearch"
networking_needful
@@ -473,7 +474,6 @@ if ! [[ -f $install_opt_file ]]; then
whiptail_end_settings
elif [[ $is_heavynode ]]; then
#ubuntu_check
installer_prereq_packages
monints=true
check_requirements "heavynode"
@@ -486,29 +486,26 @@ if ! [[ -f $install_opt_file ]]; then
whiptail_end_settings
elif [[ $is_idh ]]; then
#ubuntu_check
installer_prereq_packages
check_requirements "idh"
networking_needful
collect_mngr_hostname
add_mngr_ip_to_hosts
check_manager_connection
#collect_idh_services (this may be added back sometime in the future)
collect_idh_preferences
set_minion_info
whiptail_end_settings
elif [[ $is_import ]]; then
#ubuntu_check
waitforstate=true
monints=true
[[ $is_iso ]] && whiptail_airgap
check_elastic_license
check_requirements "import"
networking_needful
if [[ $is_iso ]]; then
whiptail_airgap
fi
detect_cloud
[[ ! $is_airgap ]] && detect_cloud
collect_dockernet
[[ ! $is_airgap ]] && collect_net_method
set_minion_info
set_default_log_size >> $setup_log 2>&1
info "Verifying all network devices are managed by Network Manager that should be"
@@ -521,7 +518,6 @@ if ! [[ -f $install_opt_file ]]; then
whiptail_end_settings
elif [[ $is_receiver ]]; then
#ubuntu_check
installer_prereq_packages
check_requirements "receiver"
networking_needful

View File

@@ -31,7 +31,7 @@ log_has_errors() {
# Ignore Failed: 0 since that is the salt state output, and we detect state failures
# via Result: False already.
grep -E "FAILED|Failed|failed|ERROR|Result: False" "$setup_log" | \
grep -vE "The Salt Master has cached the public key for this node" | \
grep -vE "Minion failed to authenticate with the master" | \