mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-19 15:33:06 +01:00
Compare commits
26 Commits
c4a70b540e
...
reyesj2/el
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4780371ebe | ||
|
|
6c879cbd13 | ||
|
|
089b5aaf44 | ||
|
|
b61885add5 | ||
|
|
5cb1e284af | ||
|
|
e3a4f0873e | ||
|
|
7977a020ac | ||
|
|
d518f75468 | ||
|
|
04d6cca204 | ||
|
|
5ab6bda639 | ||
|
|
f433de7e12 | ||
|
|
8ef6c2f91d | ||
|
|
7575218697 | ||
|
|
dc945dad00 | ||
|
|
ddcd74ffd2 | ||
|
|
e105bd12e6 | ||
|
|
f5688175b6 | ||
|
|
72a4ba405f | ||
|
|
ba49765312 | ||
|
|
72c8c2371e | ||
|
|
80411ab6cf | ||
|
|
0ff8fa57e7 | ||
|
|
411f28a049 | ||
|
|
0f42233092 | ||
|
|
2dd49f6d9b | ||
|
|
271f545f4f |
@@ -60,7 +60,7 @@ so-elastalert:
|
|||||||
- watch:
|
- watch:
|
||||||
- file: elastaconf
|
- file: elastaconf
|
||||||
- onlyif:
|
- onlyif:
|
||||||
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 8" {# only run this state if elasticsearch is version 8 #}
|
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 9" {# only run this state if elasticsearch is version 9 #}
|
||||||
|
|
||||||
delete_so-elastalert_so-status.disabled:
|
delete_so-elastalert_so-status.disabled:
|
||||||
file.uncomment:
|
file.uncomment:
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
"package": {
|
"package": {
|
||||||
"name": "endpoint",
|
"name": "endpoint",
|
||||||
"title": "Elastic Defend",
|
"title": "Elastic Defend",
|
||||||
"version": "8.18.1",
|
"version": "9.0.2",
|
||||||
"requires_root": true
|
"requires_root": true
|
||||||
},
|
},
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
|
|||||||
@@ -21,6 +21,7 @@
|
|||||||
'azure_application_insights.app_state': 'azure.app_state',
|
'azure_application_insights.app_state': 'azure.app_state',
|
||||||
'azure_billing.billing': 'azure.billing',
|
'azure_billing.billing': 'azure.billing',
|
||||||
'azure_functions.metrics': 'azure.function',
|
'azure_functions.metrics': 'azure.function',
|
||||||
|
'azure_ai_foundry.metrics': 'azure.ai_foundry',
|
||||||
'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset',
|
'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset',
|
||||||
'azure_metrics.compute_vm': 'azure.compute_vm',
|
'azure_metrics.compute_vm': 'azure.compute_vm',
|
||||||
'azure_metrics.container_instance': 'azure.container_instance',
|
'azure_metrics.container_instance': 'azure.container_instance',
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
|
|||||||
latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list)
|
latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list)
|
||||||
echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST
|
echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST
|
||||||
rm -f $INSTALLED_PACKAGE_LIST
|
rm -f $INSTALLED_PACKAGE_LIST
|
||||||
echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .savedObject.attributes.install_version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST
|
echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .installationInfo.version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST
|
||||||
|
|
||||||
while read -r package; do
|
while read -r package; do
|
||||||
# get package details
|
# get package details
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ if ! kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l
|
|||||||
--arg KAFKACA "$KAFKACA" \
|
--arg KAFKACA "$KAFKACA" \
|
||||||
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
||||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||||
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||||
)
|
)
|
||||||
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
||||||
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
||||||
@@ -67,7 +67,7 @@ elif kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l
|
|||||||
--arg ENABLED_DISABLED "$ENABLED_DISABLED"\
|
--arg ENABLED_DISABLED "$ENABLED_DISABLED"\
|
||||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||||
--argjson HOSTS "$HOSTS" \
|
--argjson HOSTS "$HOSTS" \
|
||||||
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||||
)
|
)
|
||||||
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
||||||
echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n"
|
echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n"
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
elasticsearch:
|
elasticsearch:
|
||||||
enabled: false
|
enabled: false
|
||||||
version: 8.18.8
|
version: 9.0.8
|
||||||
index_clean: true
|
index_clean: true
|
||||||
config:
|
config:
|
||||||
action:
|
action:
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ set -e
|
|||||||
if [ ! -f /opt/so/saltstack/local/salt/elasticsearch/cacerts ]; then
|
if [ ! -f /opt/so/saltstack/local/salt/elasticsearch/cacerts ]; then
|
||||||
docker run -v /etc/pki/ca.crt:/etc/ssl/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:$ELASTIC_AGENT_TARBALL_VERSION -keystore /usr/share/elasticsearch/jdk/lib/security/cacerts -alias SOSCA -import -file /etc/ssl/ca.crt -storepass changeit -noprompt
|
docker run -v /etc/pki/ca.crt:/etc/ssl/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:$ELASTIC_AGENT_TARBALL_VERSION -keystore /usr/share/elasticsearch/jdk/lib/security/cacerts -alias SOSCA -import -file /etc/ssl/ca.crt -storepass changeit -noprompt
|
||||||
docker cp so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts
|
docker cp so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts
|
||||||
docker cp so-elasticsearchca:/etc/ssl/certs/ca-certificates.crt /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
|
docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
|
||||||
docker rm so-elasticsearchca
|
docker rm so-elasticsearchca
|
||||||
echo "" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
|
echo "" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
|
||||||
echo "sosca" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
|
echo "sosca" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ logstash:
|
|||||||
settings:
|
settings:
|
||||||
lsheap: 500m
|
lsheap: 500m
|
||||||
config:
|
config:
|
||||||
http_x_host: 0.0.0.0
|
api_x_http_x_host: 0.0.0.0
|
||||||
path_x_logs: /var/log/logstash
|
path_x_logs: /var/log/logstash
|
||||||
pipeline_x_workers: 1
|
pipeline_x_workers: 1
|
||||||
pipeline_x_batch_x_size: 125
|
pipeline_x_batch_x_size: 125
|
||||||
|
|||||||
@@ -5,10 +5,10 @@ input {
|
|||||||
codec => es_bulk
|
codec => es_bulk
|
||||||
request_headers_target_field => client_headers
|
request_headers_target_field => client_headers
|
||||||
remote_host_target_field => client_host
|
remote_host_target_field => client_host
|
||||||
ssl => true
|
ssl_enabled => true
|
||||||
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
|
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
|
||||||
ssl_certificate => "/usr/share/logstash/filebeat.crt"
|
ssl_certificate => "/usr/share/logstash/filebeat.crt"
|
||||||
ssl_key => "/usr/share/logstash/filebeat.key"
|
ssl_key => "/usr/share/logstash/filebeat.key"
|
||||||
ssl_verify_mode => "peer"
|
ssl_client_authentication => "required"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,11 +2,11 @@ input {
|
|||||||
elastic_agent {
|
elastic_agent {
|
||||||
port => 5055
|
port => 5055
|
||||||
tags => [ "elastic-agent", "input-{{ GLOBALS.hostname }}" ]
|
tags => [ "elastic-agent", "input-{{ GLOBALS.hostname }}" ]
|
||||||
ssl => true
|
ssl_enabled => true
|
||||||
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
|
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
|
||||||
ssl_certificate => "/usr/share/logstash/elasticfleet-logstash.crt"
|
ssl_certificate => "/usr/share/logstash/elasticfleet-logstash.crt"
|
||||||
ssl_key => "/usr/share/logstash/elasticfleet-logstash.key"
|
ssl_key => "/usr/share/logstash/elasticfleet-logstash.key"
|
||||||
ssl_verify_mode => "force_peer"
|
ssl_client_authentication => "required"
|
||||||
ecs_compatibility => v8
|
ecs_compatibility => v8
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ input {
|
|||||||
elastic_agent {
|
elastic_agent {
|
||||||
port => 5056
|
port => 5056
|
||||||
tags => [ "elastic-agent", "fleet-lumberjack-input" ]
|
tags => [ "elastic-agent", "fleet-lumberjack-input" ]
|
||||||
ssl => true
|
ssl_enabled => true
|
||||||
ssl_certificate => "/usr/share/logstash/elasticfleet-lumberjack.crt"
|
ssl_certificate => "/usr/share/logstash/elasticfleet-lumberjack.crt"
|
||||||
ssl_key => "/usr/share/logstash/elasticfleet-lumberjack.key"
|
ssl_key => "/usr/share/logstash/elasticfleet-lumberjack.key"
|
||||||
ecs_compatibility => v8
|
ecs_compatibility => v8
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ output {
|
|||||||
document_id => "%{[metadata][_id]}"
|
document_id => "%{[metadata][_id]}"
|
||||||
index => "so-ip-mappings"
|
index => "so-ip-mappings"
|
||||||
silence_errors_in_log => ["version_conflict_engine_exception"]
|
silence_errors_in_log => ["version_conflict_engine_exception"]
|
||||||
ssl => true
|
ssl_enabled => true
|
||||||
ssl_certificate_verification => false
|
ssl_verification_mode => "none"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
@@ -25,8 +25,8 @@ output {
|
|||||||
document_id => "%{[metadata][_id]}"
|
document_id => "%{[metadata][_id]}"
|
||||||
pipeline => "%{[metadata][pipeline]}"
|
pipeline => "%{[metadata][pipeline]}"
|
||||||
silence_errors_in_log => ["version_conflict_engine_exception"]
|
silence_errors_in_log => ["version_conflict_engine_exception"]
|
||||||
ssl => true
|
ssl_enabled => true
|
||||||
ssl_certificate_verification => false
|
ssl_verification_mode => "none"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
@@ -37,8 +37,8 @@ output {
|
|||||||
user => "{{ ES_USER }}"
|
user => "{{ ES_USER }}"
|
||||||
password => "{{ ES_PASS }}"
|
password => "{{ ES_PASS }}"
|
||||||
pipeline => "%{[metadata][pipeline]}"
|
pipeline => "%{[metadata][pipeline]}"
|
||||||
ssl => true
|
ssl_enabled => true
|
||||||
ssl_certificate_verification => false
|
ssl_verification_mode => "none"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -49,8 +49,8 @@ output {
|
|||||||
data_stream => true
|
data_stream => true
|
||||||
user => "{{ ES_USER }}"
|
user => "{{ ES_USER }}"
|
||||||
password => "{{ ES_PASS }}"
|
password => "{{ ES_PASS }}"
|
||||||
ssl => true
|
ssl_enabled => true
|
||||||
ssl_certificate_verification => false
|
ssl_verification_mode=> "none"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,8 +13,8 @@ output {
|
|||||||
user => "{{ ES_USER }}"
|
user => "{{ ES_USER }}"
|
||||||
password => "{{ ES_PASS }}"
|
password => "{{ ES_PASS }}"
|
||||||
index => "endgame-%{+YYYY.MM.dd}"
|
index => "endgame-%{+YYYY.MM.dd}"
|
||||||
ssl => true
|
ssl_enabled => true
|
||||||
ssl_certificate_verification => false
|
ssl_verification_mode => "none"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ logstash:
|
|||||||
helpLink: logstash.html
|
helpLink: logstash.html
|
||||||
global: False
|
global: False
|
||||||
config:
|
config:
|
||||||
http_x_host:
|
api_x_http_x_host:
|
||||||
description: Host interface to listen to connections.
|
description: Host interface to listen to connections.
|
||||||
helpLink: logstash.html
|
helpLink: logstash.html
|
||||||
readonly: True
|
readonly: True
|
||||||
|
|||||||
@@ -214,7 +214,7 @@ git_config_set_safe_dirs:
|
|||||||
|
|
||||||
surinsmrulesdir:
|
surinsmrulesdir:
|
||||||
file.directory:
|
file.directory:
|
||||||
- name: /nsm/rules/suricata
|
- name: /nsm/rules/suricata/etopen
|
||||||
- user: 939
|
- user: 939
|
||||||
- group: 939
|
- group: 939
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
|
|||||||
@@ -427,6 +427,7 @@ preupgrade_changes() {
|
|||||||
[[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180
|
[[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180
|
||||||
[[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190
|
[[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190
|
||||||
[[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200
|
[[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200
|
||||||
|
[[ "$INSTALLEDVERSION" == 2.4.200 ]] && up_to_2.4.210
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -459,6 +460,7 @@ postupgrade_changes() {
|
|||||||
[[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180
|
[[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180
|
||||||
[[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190
|
[[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190
|
||||||
[[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200
|
[[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200
|
||||||
|
[[ "$POSTVERSION" == 2.4.200 ]] && post_to_2.4.210
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -615,9 +617,6 @@ post_to_2.4.180() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
post_to_2.4.190() {
|
post_to_2.4.190() {
|
||||||
echo "Regenerating Elastic Agent Installers"
|
|
||||||
/sbin/so-elastic-agent-gen-installers
|
|
||||||
|
|
||||||
# Only need to update import / eval nodes
|
# Only need to update import / eval nodes
|
||||||
if [[ "$MINION_ROLE" == "import" ]] || [[ "$MINION_ROLE" == "eval" ]]; then
|
if [[ "$MINION_ROLE" == "import" ]] || [[ "$MINION_ROLE" == "eval" ]]; then
|
||||||
update_import_fleet_output
|
update_import_fleet_output
|
||||||
@@ -645,6 +644,13 @@ post_to_2.4.200() {
|
|||||||
POSTVERSION=2.4.200
|
POSTVERSION=2.4.200
|
||||||
}
|
}
|
||||||
|
|
||||||
|
post_to_2.4.210() {
|
||||||
|
echo "Regenerating Elastic Agent Installers"
|
||||||
|
/sbin/so-elastic-agent-gen-installers
|
||||||
|
|
||||||
|
POSTVERSION=2.4.210
|
||||||
|
}
|
||||||
|
|
||||||
repo_sync() {
|
repo_sync() {
|
||||||
echo "Sync the local repo."
|
echo "Sync the local repo."
|
||||||
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
|
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
|
||||||
@@ -906,9 +912,7 @@ up_to_2.4.180() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
up_to_2.4.190() {
|
up_to_2.4.190() {
|
||||||
# Elastic Update for this release, so download Elastic Agent files
|
echo "Nothing to do for 2.4.190"
|
||||||
determine_elastic_agent_upgrade
|
|
||||||
|
|
||||||
INSTALLEDVERSION=2.4.190
|
INSTALLEDVERSION=2.4.190
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -921,6 +925,13 @@ up_to_2.4.200() {
|
|||||||
INSTALLEDVERSION=2.4.200
|
INSTALLEDVERSION=2.4.200
|
||||||
}
|
}
|
||||||
|
|
||||||
|
up_to_2.4.210() {
|
||||||
|
# Elastic Update for this release, so download Elastic Agent files
|
||||||
|
determine_elastic_agent_upgrade
|
||||||
|
|
||||||
|
INSTALLEDVERSION=2.4.210
|
||||||
|
}
|
||||||
|
|
||||||
add_hydra_pillars() {
|
add_hydra_pillars() {
|
||||||
mkdir -p /opt/so/saltstack/local/pillar/hydra
|
mkdir -p /opt/so/saltstack/local/pillar/hydra
|
||||||
touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
|
touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
|
||||||
@@ -1113,9 +1124,13 @@ suricata_idstools_removal_pre() {
|
|||||||
install -d -o 939 -g 939 -m 755 /opt/so/conf/soc/fingerprints
|
install -d -o 939 -g 939 -m 755 /opt/so/conf/soc/fingerprints
|
||||||
install -o 939 -g 939 -m 644 /dev/null /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
|
install -o 939 -g 939 -m 644 /dev/null /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
|
||||||
cat > /opt/so/conf/soc/fingerprints/suricataengine.syncBlock << EOF
|
cat > /opt/so/conf/soc/fingerprints/suricataengine.syncBlock << EOF
|
||||||
Suricata ruleset sync is blocked until this file is removed. Make sure that you have manually added any custom Suricata rulesets via SOC config - review the documentation for more details: securityonion.net/docs
|
Suricata ruleset sync is blocked until this file is removed. **CRITICAL** Make sure that you have manually added any custom Suricata rulesets via SOC config before removing this file - review the documentation for more details: https://docs.securityonion.net/en/2.4/nids.html#sync-block
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
# Remove possible symlink & create salt local rules dir
|
||||||
|
[ -L /opt/so/saltstack/local/salt/suricata/rules ] && rm -f /opt/so/saltstack/local/salt/suricata/rules
|
||||||
|
install -d -o 939 -g 939 /opt/so/saltstack/local/salt/suricata/rules/ || echo "Failed to create Suricata local rules directory"
|
||||||
|
|
||||||
# Backup custom rules & overrides
|
# Backup custom rules & overrides
|
||||||
mkdir -p /nsm/backup/detections-migration/2-4-200
|
mkdir -p /nsm/backup/detections-migration/2-4-200
|
||||||
cp /usr/sbin/so-rule-update /nsm/backup/detections-migration/2-4-200
|
cp /usr/sbin/so-rule-update /nsm/backup/detections-migration/2-4-200
|
||||||
@@ -1127,6 +1142,7 @@ if [[ -f /opt/so/conf/soc/so-detections-backup.py ]]; then
|
|||||||
# Verify backup by comparing counts
|
# Verify backup by comparing counts
|
||||||
echo "Verifying detection overrides backup..."
|
echo "Verifying detection overrides backup..."
|
||||||
es_override_count=$(/sbin/so-elasticsearch-query 'so-detection/_count' \
|
es_override_count=$(/sbin/so-elasticsearch-query 'so-detection/_count' \
|
||||||
|
--retry 5 --retry-delay 10 --retry-all-errors \
|
||||||
-d '{"query": {"bool": {"must": [{"exists": {"field": "so_detection.overrides"}}]}}}' | jq -r '.count') || {
|
-d '{"query": {"bool": {"must": [{"exists": {"field": "so_detection.overrides"}}]}}}' | jq -r '.count') || {
|
||||||
echo " Error: Failed to query Elasticsearch for override count"
|
echo " Error: Failed to query Elasticsearch for override count"
|
||||||
exit 1
|
exit 1
|
||||||
@@ -1297,7 +1313,6 @@ if [ -n "$(docker ps -q -f name=^so-idstools$)" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Removing idstools symlink and scripts..."
|
echo "Removing idstools symlink and scripts..."
|
||||||
rm /opt/so/saltstack/local/salt/suricata/rules
|
|
||||||
rm -rf /usr/sbin/so-idstools*
|
rm -rf /usr/sbin/so-idstools*
|
||||||
sed -i '/^#\?so-idstools$/d' /opt/so/conf/so-status/so-status.conf
|
sed -i '/^#\?so-idstools$/d' /opt/so/conf/so-status/so-status.conf
|
||||||
|
|
||||||
@@ -1355,7 +1370,7 @@ unmount_update() {
|
|||||||
|
|
||||||
update_airgap_rules() {
|
update_airgap_rules() {
|
||||||
# Copy the rules over to update them for airgap.
|
# Copy the rules over to update them for airgap.
|
||||||
rsync -a $UPDATE_DIR/agrules/suricata/* /nsm/rules/suricata/
|
rsync -a --delete $UPDATE_DIR/agrules/suricata/ /nsm/rules/suricata/etopen/
|
||||||
rsync -a $UPDATE_DIR/agrules/detect-sigma/* /nsm/rules/detect-sigma/
|
rsync -a $UPDATE_DIR/agrules/detect-sigma/* /nsm/rules/detect-sigma/
|
||||||
rsync -a $UPDATE_DIR/agrules/detect-yara/* /nsm/rules/detect-yara/
|
rsync -a $UPDATE_DIR/agrules/detect-yara/* /nsm/rules/detect-yara/
|
||||||
# Copy the securityonion-resorces repo over for SOC Detection Summaries and checkout the published summaries branch
|
# Copy the securityonion-resorces repo over for SOC Detection Summaries and checkout the published summaries branch
|
||||||
@@ -1604,6 +1619,60 @@ verify_latest_update_script() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
verify_es_version_compatibility() {
|
||||||
|
|
||||||
|
# Define supported upgrade paths for SO ES versions
|
||||||
|
declare -A es_upgrade_map=(
|
||||||
|
["8.14.3"]="8.17.3 8.18.4 8.18.6 8.18.8"
|
||||||
|
["8.17.3"]="8.18.4 8.18.6 8.18.8"
|
||||||
|
["8.18.4"]="8.18.6 8.18.8 9.0.8"
|
||||||
|
["8.18.6"]="8.18.8 9.0.8"
|
||||||
|
["8.18.8"]="9.0.8"
|
||||||
|
)
|
||||||
|
|
||||||
|
# ES version stepping stones. Elasticsearch MUST upgrade through these versions
|
||||||
|
declare -A es_to_so_version=(
|
||||||
|
["8.18.8"]="2.4.190-20251024"
|
||||||
|
["9.0.8"]="2.4.210"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get current Elasticsearch version
|
||||||
|
if es_version_raw=$(so-elasticsearch-query / --fail --retry 5 --retry-delay 10); then
|
||||||
|
es_version=$(echo "$es_version_raw" | jq -r '.version.number' )
|
||||||
|
else
|
||||||
|
echo "Could not determine current Elasticsearch version to validate compatibility with post soup Elasticsearch version."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get the target ES version from the updatedir
|
||||||
|
# DOUBLE TAP ON THIS SOMEONE FROM AN OLD VERSION UPGRADING TO ANOTHER VERSION OLDER THAN 2.4.110 WOULD HAVE AN ERROR HERE. Prior to this version there was no version defined in defaults.yaml
|
||||||
|
target_es_version=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version | sed -n '1p')
|
||||||
|
|
||||||
|
if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " ]]; then
|
||||||
|
# supported upgrade
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
compatible_versions=${es_upgrade_map[$es_version]}
|
||||||
|
next_step_so_version=${es_to_so_version[${compatible_versions##* }]}
|
||||||
|
echo ""
|
||||||
|
echo "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version $next_step_so_version before updating to $(cat $UPDATE_DIR/VERSION)."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if [[ $is_airgap -eq 0 ]]; then
|
||||||
|
echo "You can download the $next_step_so_version ISO image from https://download.securityonion.net/file/securityonion/securityonion-$next_step_so_version.iso"
|
||||||
|
else
|
||||||
|
echo "You can use the following soup command to upgrade to $next_step_so_version;"
|
||||||
|
echo " sudo BRANCH=$next_step_so_version soup"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
echo "*** Once you have updated to $next_step_so_version, you can then run soup again to update to $(cat $UPDATE_DIR/VERSION). ***"
|
||||||
|
echo ""
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
# Keeping this block in case we need to do a hotfix that requires salt update
|
# Keeping this block in case we need to do a hotfix that requires salt update
|
||||||
apply_hotfix() {
|
apply_hotfix() {
|
||||||
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
|
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
|
||||||
@@ -1700,6 +1769,8 @@ main() {
|
|||||||
echo "Verifying we have the latest soup script."
|
echo "Verifying we have the latest soup script."
|
||||||
verify_latest_update_script
|
verify_latest_update_script
|
||||||
|
|
||||||
|
verify_es_version_compatibility
|
||||||
|
|
||||||
echo "Let's see if we need to update Security Onion."
|
echo "Let's see if we need to update Security Onion."
|
||||||
upgrade_check
|
upgrade_check
|
||||||
upgrade_space
|
upgrade_space
|
||||||
|
|||||||
@@ -1622,12 +1622,11 @@ soc:
|
|||||||
sourceType: directory
|
sourceType: directory
|
||||||
airgap:
|
airgap:
|
||||||
- name: Emerging-Threats
|
- name: Emerging-Threats
|
||||||
description: "Emerging Threats ruleset - To enable ET Pro, enter your license key below. Leave empty for ET Open (free) rules."
|
description: "Emerging Threats ruleset - To enable ET Pro on Airgap, review the documentation at https://docs.securityonion.net/suricata"
|
||||||
licenseKey: ""
|
licenseKey: ""
|
||||||
enabled: true
|
enabled: true
|
||||||
sourceType: url
|
sourceType: directory
|
||||||
sourcePath: 'https://rules.emergingthreats.net/open/suricata/emerging.rules.tar.gz'
|
sourcePath: /nsm/rules/suricata/etopen/
|
||||||
urlHash: "https://rules.emergingthreats.net/open/suricata/emerging.rules.tar.gz.md5"
|
|
||||||
license: "BSD"
|
license: "BSD"
|
||||||
excludeFiles:
|
excludeFiles:
|
||||||
- "*deleted*"
|
- "*deleted*"
|
||||||
@@ -2653,12 +2652,6 @@ soc:
|
|||||||
thresholdColorRatioMed: 0.75
|
thresholdColorRatioMed: 0.75
|
||||||
thresholdColorRatioMax: 1
|
thresholdColorRatioMax: 1
|
||||||
availableModels:
|
availableModels:
|
||||||
- id: sonnet-4
|
|
||||||
displayName: Claude Sonnet 4
|
|
||||||
contextLimitSmall: 200000
|
|
||||||
contextLimitLarge: 1000000
|
|
||||||
lowBalanceColorAlert: 500000
|
|
||||||
enabled: true
|
|
||||||
- id: sonnet-4.5
|
- id: sonnet-4.5
|
||||||
displayName: Claude Sonnet 4.5
|
displayName: Claude Sonnet 4.5
|
||||||
contextLimitSmall: 200000
|
contextLimitSmall: 200000
|
||||||
|
|||||||
@@ -70,7 +70,7 @@
|
|||||||
|
|
||||||
{# Define the Detections custom ruleset that should always be present #}
|
{# Define the Detections custom ruleset that should always be present #}
|
||||||
{% set CUSTOM_RULESET = {
|
{% set CUSTOM_RULESET = {
|
||||||
'name': 'custom',
|
'name': '__custom__',
|
||||||
'description': 'User-created custom rules created via the Detections module in the SOC UI',
|
'description': 'User-created custom rules created via the Detections module in the SOC UI',
|
||||||
'sourceType': 'elasticsearch',
|
'sourceType': 'elasticsearch',
|
||||||
'sourcePath': 'so_detection.ruleset:__custom__',
|
'sourcePath': 'so_detection.ruleset:__custom__',
|
||||||
@@ -83,7 +83,7 @@
|
|||||||
{# Always append the custom ruleset to suricataengine.rulesetSources if not already present #}
|
{# Always append the custom ruleset to suricataengine.rulesetSources if not already present #}
|
||||||
{% if SOCMERGED.config.server.modules.suricataengine is defined and SOCMERGED.config.server.modules.suricataengine.rulesetSources is defined %}
|
{% if SOCMERGED.config.server.modules.suricataengine is defined and SOCMERGED.config.server.modules.suricataengine.rulesetSources is defined %}
|
||||||
{% if SOCMERGED.config.server.modules.suricataengine.rulesetSources is not mapping %}
|
{% if SOCMERGED.config.server.modules.suricataengine.rulesetSources is not mapping %}
|
||||||
{% set custom_names = SOCMERGED.config.server.modules.suricataengine.rulesetSources | selectattr('name', 'equalto', 'custom') | list %}
|
{% set custom_names = SOCMERGED.config.server.modules.suricataengine.rulesetSources | selectattr('name', 'equalto', '__custom__') | list %}
|
||||||
{% if custom_names | length == 0 %}
|
{% if custom_names | length == 0 %}
|
||||||
{% do SOCMERGED.config.server.modules.suricataengine.rulesetSources.append(CUSTOM_RULESET) %}
|
{% do SOCMERGED.config.server.modules.suricataengine.rulesetSources.append(CUSTOM_RULESET) %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -108,21 +108,39 @@
|
|||||||
{% if ruleset.name == 'Emerging-Threats' %}
|
{% if ruleset.name == 'Emerging-Threats' %}
|
||||||
{% if ruleset.licenseKey and ruleset.licenseKey != '' %}
|
{% if ruleset.licenseKey and ruleset.licenseKey != '' %}
|
||||||
{# License key is defined - transform to ETPRO #}
|
{# License key is defined - transform to ETPRO #}
|
||||||
{# Engine Version is hardcoded in the URL - this does not change often: https://community.emergingthreats.net/t/supported-engines/71 #}
|
{% if ruleset.sourceType == 'directory' %}
|
||||||
{% do ruleset.update({
|
{# Airgap mode - update directory path #}
|
||||||
'name': 'ETPRO',
|
{% do ruleset.update({
|
||||||
'sourcePath': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz',
|
'name': 'ETPRO',
|
||||||
'urlHash': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz.md5',
|
'sourcePath': '/nsm/rules/custom-local-repos/local-etpro-suricata/etpro.rules.tar.gz',
|
||||||
'license': 'Commercial'
|
'license': 'Commercial'
|
||||||
}) %}
|
}) %}
|
||||||
|
{% else %}
|
||||||
|
{# Engine Version is hardcoded in the URL - this does not change often: https://community.emergingthreats.net/t/supported-engines/71 #}
|
||||||
|
{% do ruleset.update({
|
||||||
|
'name': 'ETPRO',
|
||||||
|
'sourcePath': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz',
|
||||||
|
'urlHash': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz.md5',
|
||||||
|
'license': 'Commercial'
|
||||||
|
}) %}
|
||||||
|
{% endif %}
|
||||||
{% else %}
|
{% else %}
|
||||||
{# No license key - explicitly set to ETOPEN #}
|
{# No license key - explicitly set to ETOPEN #}
|
||||||
{% do ruleset.update({
|
{% if ruleset.sourceType == 'directory' %}
|
||||||
'name': 'ETOPEN',
|
{# Airgap mode - update directory path #}
|
||||||
'sourcePath': 'https://rules.emergingthreats.net/open/suricata-7.0.3/emerging.rules.tar.gz',
|
{% do ruleset.update({
|
||||||
'urlHash': 'https://rules.emergingthreats.net/open/suricata-7.0.3/emerging.rules.tar.gz.md5',
|
'name': 'ETOPEN',
|
||||||
'license': 'BSD'
|
'sourcePath': '/nsm/rules/suricata/etopen/',
|
||||||
}) %}
|
'license': 'BSD'
|
||||||
|
}) %}
|
||||||
|
{% else %}
|
||||||
|
{% do ruleset.update({
|
||||||
|
'name': 'ETOPEN',
|
||||||
|
'sourcePath': 'https://rules.emergingthreats.net/open/suricata-7.0.3/emerging.rules.tar.gz',
|
||||||
|
'urlHash': 'https://rules.emergingthreats.net/open/suricata-7.0.3/emerging.rules.tar.gz.md5',
|
||||||
|
'license': 'BSD'
|
||||||
|
}) %}
|
||||||
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|||||||
@@ -608,6 +608,18 @@ soc:
|
|||||||
label: Delete Unreferenced (Deletes rules that are no longer referenced by ruleset source)
|
label: Delete Unreferenced (Deletes rules that are no longer referenced by ruleset source)
|
||||||
forcedType: bool
|
forcedType: bool
|
||||||
required: False
|
required: False
|
||||||
|
- field: proxyURL
|
||||||
|
label: HTTP/HTTPS proxy URL for downloading the ruleset.
|
||||||
|
required: False
|
||||||
|
- field: proxyUsername
|
||||||
|
label: Proxy authentication username.
|
||||||
|
required: False
|
||||||
|
- field: proxyPassword
|
||||||
|
label: Proxy authentication password.
|
||||||
|
required: False
|
||||||
|
- field: proxyCACert
|
||||||
|
label: Path to CA certificate file for MITM proxy verification.
|
||||||
|
required: False
|
||||||
airgap: *serulesetSources
|
airgap: *serulesetSources
|
||||||
navigator:
|
navigator:
|
||||||
intervalMinutes:
|
intervalMinutes:
|
||||||
|
|||||||
@@ -17,14 +17,23 @@ query() {
|
|||||||
|
|
||||||
STATS=$(query "ruleset-stats")
|
STATS=$(query "ruleset-stats")
|
||||||
RELOAD=$(query "ruleset-reload-time")
|
RELOAD=$(query "ruleset-reload-time")
|
||||||
|
[ -z "$RELOAD" ] && RELOAD='{}'
|
||||||
|
|
||||||
if echo "$STATS" | jq -e '.return == "OK"' > /dev/null 2>&1; then
|
# Outputs valid JSON on success, empty on failure
|
||||||
LOADED=$(echo "$STATS" | jq -r '.message[0].rules_loaded')
|
OUTPUT=$(jq -n \
|
||||||
FAILED=$(echo "$STATS" | jq -r '.message[0].rules_failed')
|
--argjson stats "$STATS" \
|
||||||
LAST_RELOAD=$(echo "$RELOAD" | jq -r '.message[0].last_reload')
|
--argjson reload "$RELOAD" \
|
||||||
|
'if $stats.return == "OK" and ($stats.message[0].rules_loaded | type) == "number" and ($stats.message[0].rules_failed | type) == "number" then
|
||||||
|
{
|
||||||
|
rules_loaded: $stats.message[0].rules_loaded,
|
||||||
|
rules_failed: $stats.message[0].rules_failed,
|
||||||
|
last_reload: ($reload.message[0].last_reload // ""),
|
||||||
|
return: "OK"
|
||||||
|
}
|
||||||
|
else empty end' 2>/dev/null)
|
||||||
|
|
||||||
jq -n --argjson loaded "$LOADED" --argjson failed "$FAILED" --arg reload "$LAST_RELOAD" \
|
if [ -n "$OUTPUT" ]; then
|
||||||
'{rules_loaded: $loaded, rules_failed: $failed, last_reload: $reload, return: "OK"}' > "$OUTFILE"
|
echo "$OUTPUT" > "$OUTFILE"
|
||||||
else
|
else
|
||||||
echo '{"return":"FAIL"}' > "$OUTFILE"
|
echo '{"return":"FAIL"}' > "$OUTFILE"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -18,11 +18,15 @@ if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then
|
|||||||
if [ -f "$STATSFILE" ] && [ $(($(date +%s) - $(stat -c %Y "$STATSFILE"))) -lt 90 ] && jq -e '.return == "OK" and .rules_loaded != null and .rules_failed != null' "$STATSFILE" > /dev/null 2>&1; then
|
if [ -f "$STATSFILE" ] && [ $(($(date +%s) - $(stat -c %Y "$STATSFILE"))) -lt 90 ] && jq -e '.return == "OK" and .rules_loaded != null and .rules_failed != null' "$STATSFILE" > /dev/null 2>&1; then
|
||||||
LOADED=$(jq -r '.rules_loaded' "$STATSFILE")
|
LOADED=$(jq -r '.rules_loaded' "$STATSFILE")
|
||||||
FAILED=$(jq -r '.rules_failed' "$STATSFILE")
|
FAILED=$(jq -r '.rules_failed' "$STATSFILE")
|
||||||
RELOAD_TIME=$(jq -r '.last_reload // ""' "$STATSFILE")
|
RELOAD_TIME=$(jq -r 'if .last_reload then .last_reload else "" end' "$STATSFILE")
|
||||||
|
|
||||||
echo "surirules loaded=${LOADED}i,failed=${FAILED}i,reload_time=\"${RELOAD_TIME}\",status=\"ok\""
|
if [ -n "$RELOAD_TIME" ]; then
|
||||||
|
echo "surirules loaded=${LOADED}i,failed=${FAILED}i,reload_time=\"${RELOAD_TIME}\",status=\"ok\""
|
||||||
|
else
|
||||||
|
echo "surirules loaded=${LOADED}i,failed=${FAILED}i,status=\"ok\""
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
echo "surirules loaded=0i,failed=0i,reload_time=\"\",status=\"unknown\""
|
echo "surirules loaded=0i,failed=0i,status=\"unknown\""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|||||||
Reference in New Issue
Block a user