mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-07 09:42:46 +01:00
Merge pull request #14612 from Security-Onion-Solutions/2.4/dev
2.4.150
This commit is contained in:
2
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
2
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
@@ -26,6 +26,8 @@ body:
|
||||
- 2.4.120
|
||||
- 2.4.130
|
||||
- 2.4.140
|
||||
- 2.4.141
|
||||
- 2.4.150
|
||||
- Other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
8
.github/workflows/pythontest.yml
vendored
8
.github/workflows/pythontest.yml
vendored
@@ -1,10 +1,6 @@
|
||||
name: python-test
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "salt/sensoroni/files/analyzers/**"
|
||||
- "salt/manager/tools/sbin"
|
||||
pull_request:
|
||||
paths:
|
||||
- "salt/sensoroni/files/analyzers/**"
|
||||
@@ -17,7 +13,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
python-version: ["3.13"]
|
||||
python-code-path: ["salt/sensoroni/files/analyzers", "salt/manager/tools/sbin"]
|
||||
|
||||
steps:
|
||||
@@ -36,4 +32,4 @@ jobs:
|
||||
flake8 ${{ matrix.python-code-path }} --show-source --max-complexity=12 --doctests --max-line-length=200 --statistics
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pytest ${{ matrix.python-code-path }} --cov=${{ matrix.python-code-path }} --doctest-modules --cov-report=term --cov-fail-under=100 --cov-config=pytest.ini
|
||||
PYTHONPATH=${{ matrix.python-code-path }} pytest ${{ matrix.python-code-path }} --cov=${{ matrix.python-code-path }} --doctest-modules --cov-report=term --cov-fail-under=100 --cov-config=pytest.ini
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
### 2.4.141-20250331 ISO image released on 2025/03/31
|
||||
### 2.4.150-20250512 ISO image released on 2025/05/12
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.4.141-20250331 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.141-20250331.iso
|
||||
2.4.150-20250512 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.150-20250512.iso
|
||||
|
||||
MD5: CAE347BC0437A93DC8F4089973ED0EA7
|
||||
SHA1: 3A6F0C2F3B6E3625E06F67EB251372D7E592CB0E
|
||||
SHA256: D0426D8E55E01A0FBA15AFE0BB7887CCB724C07FE82DA706CD1592E6001CD12B
|
||||
MD5: 7A7469A7A38EA9A2DB770C36AE36A0CA
|
||||
SHA1: 7E768D515353F339DC536DED6207B786DAFF7D27
|
||||
SHA256: F8B2EB6B332F2367F0C097D211577565C8FB5CC7809E97D63687C634035B3699
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.141-20250331.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.150-20250512.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
||||
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.141-20250331.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.150-20250512.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.141-20250331.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.150-20250512.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.4.141-20250331.iso.sig securityonion-2.4.141-20250331.iso
|
||||
gpg --verify securityonion-2.4.150-20250512.iso.sig securityonion-2.4.150-20250512.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Fri 28 Mar 2025 06:28:11 PM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Fri 09 May 2025 06:27:29 PM EDT using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{% if node_types %}
|
||||
node_data:
|
||||
{% for node_type, host_values in node_types.items() %}
|
||||
{% for hostname, details in host_values.items() %}
|
||||
@@ -33,3 +34,6 @@ node_data:
|
||||
role: {{node_type}}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
node_data: False
|
||||
{% endif %}
|
||||
|
||||
@@ -24,10 +24,10 @@ base:
|
||||
- firewall.adv_firewall
|
||||
- nginx.soc_nginx
|
||||
- nginx.adv_nginx
|
||||
- node_data.ips
|
||||
|
||||
'*_manager or *_managersearch':
|
||||
- match: compound
|
||||
- node_data.ips
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
@@ -90,6 +90,7 @@ base:
|
||||
- soc.license
|
||||
|
||||
'*_eval':
|
||||
- node_data.ips
|
||||
- secrets
|
||||
- healthcheck.eval
|
||||
- elasticsearch.index_templates
|
||||
@@ -138,6 +139,7 @@ base:
|
||||
- minions.adv_{{ grains.id }}
|
||||
|
||||
'*_standalone':
|
||||
- node_data.ips
|
||||
- logstash.nodes
|
||||
- logstash.soc_logstash
|
||||
- logstash.adv_logstash
|
||||
@@ -260,6 +262,7 @@ base:
|
||||
- soc.license
|
||||
|
||||
'*_import':
|
||||
- node_data.ips
|
||||
- secrets
|
||||
- elasticsearch.index_templates
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
@@ -305,6 +308,7 @@ base:
|
||||
- minions.adv_{{ grains.id }}
|
||||
|
||||
'*_fleet':
|
||||
- node_data.ips
|
||||
- backup.soc_backup
|
||||
- backup.adv_backup
|
||||
- logstash.nodes
|
||||
|
||||
@@ -129,6 +129,10 @@ common_sbin:
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
- show_changes: False
|
||||
{% if GLOBALS.role == 'so-heavynode' %}
|
||||
- exclude_pat:
|
||||
- so-pcap-import
|
||||
{% endif %}
|
||||
|
||||
common_sbin_jinja:
|
||||
file.recurse:
|
||||
@@ -139,6 +143,20 @@ common_sbin_jinja:
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
- show_changes: False
|
||||
{% if GLOBALS.role == 'so-heavynode' %}
|
||||
- exclude_pat:
|
||||
- so-import-pcap
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.role == 'so-heavynode' %}
|
||||
remove_so-pcap-import_heavynode:
|
||||
file.absent:
|
||||
- name: /usr/sbin/so-pcap-import
|
||||
|
||||
remove_so-import-pcap_heavynode:
|
||||
file.absent:
|
||||
- name: /usr/sbin/so-import-pcap
|
||||
{% endif %}
|
||||
|
||||
{% if not GLOBALS.is_manager%}
|
||||
# prior to 2.4.50 these scripts were in common/tools/sbin on the manager because of soup and distributed to non managers
|
||||
|
||||
@@ -128,6 +128,7 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|No shard available" # Typical error when making a query before ES has finished loading all indices
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek
|
||||
fi
|
||||
|
||||
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
||||
|
||||
@@ -200,6 +200,7 @@ docker:
|
||||
final_octet: 88
|
||||
port_bindings:
|
||||
- 0.0.0.0:9092:9092
|
||||
- 0.0.0.0:29092:29092
|
||||
- 0.0.0.0:9093:9093
|
||||
- 0.0.0.0:8778:8778
|
||||
custom_bind_mounts: []
|
||||
|
||||
@@ -11,6 +11,7 @@ elasticfleet:
|
||||
defend_filters:
|
||||
enable_auto_configuration: False
|
||||
subscription_integrations: False
|
||||
auto_upgrade_integrations: False
|
||||
logging:
|
||||
zeek:
|
||||
excluded:
|
||||
|
||||
@@ -0,0 +1,46 @@
|
||||
{%- set identities = salt['sqlite3.fetch']('/nsm/kratos/db/db.sqlite', 'SELECT id, json_extract(traits, "$.email") as email FROM identities;') -%}
|
||||
{%- set valid_identities = false -%}
|
||||
{%- if identities -%}
|
||||
{%- set valid_identities = true -%}
|
||||
{%- for id, email in identities -%}
|
||||
{%- if not id or not email -%}
|
||||
{%- set valid_identities = false -%}
|
||||
{%- break -%}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
{%- endif -%}
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"version": ""
|
||||
},
|
||||
"name": "kratos-logs",
|
||||
"namespace": "so",
|
||||
"description": "Kratos logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/kratos/kratos.log"
|
||||
],
|
||||
"data_stream.dataset": "kratos",
|
||||
"tags": ["so-kratos"],
|
||||
{%- if valid_identities -%}
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos\n- if:\n has_fields:\n - identity_id\n then:{% for id, email in identities %}\n - if:\n equals:\n identity_id: \"{{ id }}\"\n then:\n - add_fields:\n target: ''\n fields:\n user.name: \"{{ email }}\"{% endfor %}",
|
||||
{%- else -%}
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
|
||||
{%- endif -%}
|
||||
"custom": "pipeline: kratos"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"version": ""
|
||||
},
|
||||
"name": "kratos-logs",
|
||||
"namespace": "so",
|
||||
"description": "Kratos logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/kratos/kratos.log"
|
||||
],
|
||||
"data_stream.dataset": "kratos",
|
||||
"tags": ["so-kratos"],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
|
||||
"custom": "pipeline: kratos"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
@@ -31,7 +31,8 @@
|
||||
],
|
||||
"tags": [
|
||||
"so-grid-node"
|
||||
]
|
||||
],
|
||||
"processors": "- if:\n contains:\n message: \"salt-minion\"\n then: \n - dissect:\n tokenizer: \"%{} %{} %{} %{} %{} %{}: [%{log.level}] %{*}\"\n field: \"message\"\n trim_values: \"all\"\n target_prefix: \"\"\n - drop_event:\n when:\n equals:\n log.level: \"INFO\""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +45,11 @@ elasticfleet:
|
||||
global: True
|
||||
forcedType: bool
|
||||
helpLink: elastic-fleet.html
|
||||
auto_upgrade_integrations:
|
||||
description: Enables or disables automatically upgrading Elastic Agent integrations.
|
||||
global: True
|
||||
forcedType: bool
|
||||
helpLink: elastic-fleet.html
|
||||
server:
|
||||
custom_fqdn:
|
||||
description: Custom FQDN for Agents to connect to. One per line.
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
|
||||
curl_output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to connect to Kibana."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
IFS=$'\n'
|
||||
agent_policies=$(elastic_fleet_agent_policy_ids)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to retrieve agent policies."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for AGENT_POLICY in $agent_policies; do
|
||||
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
|
||||
for INTEGRATION in $integrations; do
|
||||
if ! [[ "$INTEGRATION" == "elastic-defend-endpoints" ]] && ! [[ "$INTEGRATION" == "fleet_server-"* ]]; then
|
||||
# Get package name so we know what package to look for when checking the current and latest available version
|
||||
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
|
||||
|
||||
# Get currently installed version of package
|
||||
PACKAGE_VERSION=$(elastic_fleet_integration_policy_package_version "$AGENT_POLICY" "$INTEGRATION")
|
||||
|
||||
# Get latest available version of package
|
||||
AVAILABLE_VERSION=$(elastic_fleet_package_latest_version_check "$PACKAGE_NAME")
|
||||
|
||||
# Get integration ID
|
||||
INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION")
|
||||
|
||||
if [[ "$PACKAGE_VERSION" != "$AVAILABLE_VERSION" ]]; then
|
||||
# Dry run of the upgrade
|
||||
echo "Current $PACKAGE_NAME package version ($PACKAGE_VERSION) is not the same as the latest available package ($AVAILABLE_VERSION)..."
|
||||
echo "Upgrading $INTEGRATION..."
|
||||
echo "Starting dry run..."
|
||||
DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID")
|
||||
DRYRUN_ERRORS=$(echo "$DRYRUN_OUTPUT" | jq .[].hasErrors)
|
||||
|
||||
# If no errors with dry run, proceed with actual upgrade
|
||||
if [[ "$DRYRUN_ERRORS" == "false" ]]; then
|
||||
echo "No errors detected. Proceeding with upgrade..."
|
||||
elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Upgrade failed for integration ID '$INTEGRATION_ID'."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Errors detected during dry run. Stopping upgrade..."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
echo
|
||||
@@ -83,5 +83,10 @@ docker run \
|
||||
{{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent-builder:{{ GLOBALS.so_version }} wixl -o so-elastic-agent_windows_amd64_msi --arch x64 /workspace/so-elastic-agent.wxs
|
||||
printf "\n### MSI Generated...\n"
|
||||
|
||||
printf "\n### Cleaning up temp files in /nsm/elastic-agent-workspace\n"
|
||||
printf "\n### Cleaning up temp files \n"
|
||||
rm -rf /nsm/elastic-agent-workspace
|
||||
rm -rf /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/so-elastic-agent_windows_amd64.exe
|
||||
|
||||
printf "\n### Copying so_agent-installers to /nsm/elastic-fleet/ for nginx.\n"
|
||||
\cp -vr /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/ /nsm/elastic-fleet/
|
||||
chmod 644 /nsm/elastic-fleet/so_agent-installers/*
|
||||
|
||||
@@ -14,7 +14,7 @@ if ! is_manager_node; then
|
||||
fi
|
||||
|
||||
# Get current list of Grid Node Agents that need to be upgraded
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=policy_id%20%3A%20so-grid-nodes_%2A&showInactive=false&showUpgradeable=true&getStatusSummary=true")
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true")
|
||||
|
||||
# Check to make sure that the server responded with good data - else, bail from script
|
||||
CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON")
|
||||
|
||||
@@ -0,0 +1,73 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||
{%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %}
|
||||
{%- set AUTO_UPGRADE_INTEGRATIONS = salt['pillar.get']('elasticfleet:config:auto_upgrade_integrations', default=false) %}
|
||||
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
|
||||
curl_output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to connect to Kibana."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
IFS=$'\n'
|
||||
agent_policies=$(elastic_fleet_agent_policy_ids)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to retrieve agent policies."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
|
||||
|
||||
for AGENT_POLICY in $agent_policies; do
|
||||
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
|
||||
for INTEGRATION in $integrations; do
|
||||
if ! [[ "$INTEGRATION" == "elastic-defend-endpoints" ]] && ! [[ "$INTEGRATION" == "fleet_server-"* ]]; then
|
||||
# Get package name so we know what package to look for when checking the current and latest available version
|
||||
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
if [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
|
||||
{%- endif %}
|
||||
# Get currently installed version of package
|
||||
PACKAGE_VERSION=$(elastic_fleet_integration_policy_package_version "$AGENT_POLICY" "$INTEGRATION")
|
||||
|
||||
# Get latest available version of package
|
||||
AVAILABLE_VERSION=$(elastic_fleet_package_latest_version_check "$PACKAGE_NAME")
|
||||
|
||||
# Get integration ID
|
||||
INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION")
|
||||
|
||||
if [[ "$PACKAGE_VERSION" != "$AVAILABLE_VERSION" ]]; then
|
||||
# Dry run of the upgrade
|
||||
echo ""
|
||||
echo "Current $PACKAGE_NAME package version ($PACKAGE_VERSION) is not the same as the latest available package ($AVAILABLE_VERSION)..."
|
||||
echo "Upgrading $INTEGRATION..."
|
||||
echo "Starting dry run..."
|
||||
DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID")
|
||||
DRYRUN_ERRORS=$(echo "$DRYRUN_OUTPUT" | jq .[].hasErrors)
|
||||
|
||||
# If no errors with dry run, proceed with actual upgrade
|
||||
if [[ "$DRYRUN_ERRORS" == "false" ]]; then
|
||||
echo "No errors detected. Proceeding with upgrade..."
|
||||
elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
fi
|
||||
{%- endif %}
|
||||
fi
|
||||
done
|
||||
done
|
||||
echo
|
||||
@@ -3,7 +3,10 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||
{% set SUB = salt['pillar.get']('elasticfleet:config:subscription_integrations', default=false) %}
|
||||
{% set AUTO_UPGRADE_INTEGRATIONS = salt['pillar.get']('elasticfleet:config:auto_upgrade_integrations', default=false) %}
|
||||
{%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
@@ -46,6 +49,28 @@ compare_versions() {
|
||||
fi
|
||||
}
|
||||
|
||||
IFS=$'\n'
|
||||
agent_policies=$(elastic_fleet_agent_policy_ids)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to retrieve agent policies."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
|
||||
|
||||
in_use_integrations=()
|
||||
|
||||
for AGENT_POLICY in $agent_policies; do
|
||||
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
|
||||
for INTEGRATION in $integrations; do
|
||||
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
|
||||
# non-default integrations that are in-use in any policy
|
||||
if ! [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
|
||||
in_use_integrations+=("$PACKAGE_NAME")
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
if [[ -f $STATE_FILE_SUCCESS ]]; then
|
||||
if retry 3 1 "curl -s -K /opt/so/conf/elasticsearch/curl.config --output /dev/null --silent --head --fail localhost:5601/api/fleet/epm/packages"; then
|
||||
# Package_list contains all integrations beta / non-beta.
|
||||
@@ -77,10 +102,19 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
|
||||
else
|
||||
results=$(compare_versions "$latest_version" "$installed_version")
|
||||
if [ $results == "greater" ]; then
|
||||
echo "$package_name is at version $installed_version latest version is $latest_version... Adding to next update."
|
||||
jq --argjson package "$bulk_package" '.packages += [$package]' $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_PACKAGE_TMP && mv $BULK_INSTALL_PACKAGE_TMP $BULK_INSTALL_PACKAGE_LIST
|
||||
{#- When auto_upgrade_integrations is false, skip upgrading in_use_integrations #}
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
if ! [[ " ${in_use_integrations[@]} " =~ " $package_name " ]]; then
|
||||
{%- endif %}
|
||||
echo "$package_name is at version $installed_version latest version is $latest_version... Adding to next update."
|
||||
jq --argjson package "$bulk_package" '.packages += [$package]' $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_PACKAGE_TMP && mv $BULK_INSTALL_PACKAGE_TMP $BULK_INSTALL_PACKAGE_LIST
|
||||
|
||||
PENDING_UPDATE=true
|
||||
PENDING_UPDATE=true
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
else
|
||||
echo "skipping available upgrade for in use integration - $package_name."
|
||||
fi
|
||||
{%- endif %}
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@@ -92,9 +126,18 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
|
||||
else
|
||||
results=$(compare_versions "$latest_version" "$installed_version")
|
||||
if [ $results == "greater" ]; then
|
||||
echo "$package_name is at version $installed_version latest version is $latest_version... Adding to next update."
|
||||
jq --argjson package "$bulk_package" '.packages += [$package]' $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_PACKAGE_TMP && mv $BULK_INSTALL_PACKAGE_TMP $BULK_INSTALL_PACKAGE_LIST
|
||||
PENDING_UPDATE=true
|
||||
{#- When auto_upgrade_integrations is false, skip upgrading in_use_integrations #}
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
if ! [[ " ${in_use_integrations[@]} " =~ " $package_name " ]]; then
|
||||
{%- endif %}
|
||||
echo "$package_name is at version $installed_version latest version is $latest_version... Adding to next update."
|
||||
jq --argjson package "$bulk_package" '.packages += [$package]' $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_PACKAGE_TMP && mv $BULK_INSTALL_PACKAGE_TMP $BULK_INSTALL_PACKAGE_LIST
|
||||
PENDING_UPDATE=true
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
else
|
||||
echo "skipping available upgrade for in use integration - $package_name."
|
||||
fi
|
||||
{%- endif %}
|
||||
fi
|
||||
fi
|
||||
{% endif %}
|
||||
|
||||
@@ -32,7 +32,7 @@ if ! echo "$output" | grep -q "so-manager_kafka"; then
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
'{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ $MANAGER_IP ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics":[{"topic":"%{[event.module]}-securityonion","when":{"type":"regexp","condition":"event.module:.+"}},{"topic":"default-securityonion"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }'
|
||||
'{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ $MANAGER_IP ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 10 }, "topics":[{"topic":"default-securityonion"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }'
|
||||
)
|
||||
curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" -o /dev/null
|
||||
refresh_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
elastic_auth_pillar:
|
||||
file.managed:
|
||||
- name: /opt/so/saltstack/local/pillar/elasticsearch/auth.sls
|
||||
- mode: 600
|
||||
- mode: 640
|
||||
- reload_pillar: True
|
||||
- contents: |
|
||||
elasticsearch:
|
||||
|
||||
@@ -162,6 +162,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -274,7 +275,7 @@ elasticsearch:
|
||||
number_of_replicas: 0
|
||||
auto_expand_replicas: 0-2
|
||||
number_of_shards: 1
|
||||
refresh_interval: 30s
|
||||
refresh_interval: 1s
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
@@ -316,6 +317,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -427,6 +429,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -534,6 +537,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -697,6 +701,7 @@ elasticsearch:
|
||||
- client-mappings
|
||||
- device-mappings
|
||||
- network-mappings
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
@@ -768,6 +773,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -878,6 +884,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -998,6 +1005,7 @@ elasticsearch:
|
||||
index_template:
|
||||
composed_of:
|
||||
- so-data-streams-mappings
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
- so-logs-mappings
|
||||
@@ -2832,6 +2840,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -3062,6 +3071,7 @@ elasticsearch:
|
||||
- event-mappings
|
||||
- logs-system.syslog@package
|
||||
- logs-system.syslog@custom
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
- so-system-mappings
|
||||
@@ -3421,6 +3431,7 @@ elasticsearch:
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- logstash-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -3505,6 +3516,7 @@ elasticsearch:
|
||||
composed_of:
|
||||
- metrics-endpoint.metadata@package
|
||||
- metrics-endpoint.metadata@custom
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
@@ -3551,6 +3563,7 @@ elasticsearch:
|
||||
composed_of:
|
||||
- metrics-endpoint.metrics@package
|
||||
- metrics-endpoint.metrics@custom
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
@@ -3597,6 +3610,7 @@ elasticsearch:
|
||||
composed_of:
|
||||
- metrics-endpoint.policy@package
|
||||
- metrics-endpoint.policy@custom
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
@@ -3645,6 +3659,7 @@ elasticsearch:
|
||||
- metrics-fleet_server.agent_status@package
|
||||
- metrics-fleet_server.agent_status@custom
|
||||
- ecs@mappings
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
@@ -3668,6 +3683,7 @@ elasticsearch:
|
||||
- metrics-fleet_server.agent_versions@package
|
||||
- metrics-fleet_server.agent_versions@custom
|
||||
- ecs@mappings
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
@@ -3715,6 +3731,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -3827,6 +3844,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -3939,6 +3957,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -4051,6 +4070,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -4163,6 +4183,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -4276,6 +4297,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
|
||||
@@ -204,12 +204,17 @@ so-elasticsearch-roles-load:
|
||||
- docker_container: so-elasticsearch
|
||||
- file: elasticsearch_sbin_jinja
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
|
||||
{% if grains.role in ['so-managersearch', 'so-manager'] %}
|
||||
{% set ap = "absent" %}
|
||||
{% endif %}
|
||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% if ELASTICSEARCHMERGED.index_clean %}
|
||||
{% set ap = "present" %}
|
||||
{% else %}
|
||||
{% set ap = "absent" %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
|
||||
so-elasticsearch-indices-delete:
|
||||
cron.{{ap}}:
|
||||
- name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"processors": [
|
||||
{ "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
|
||||
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
|
||||
{ "split": { "if": "ctx.data_stream?.dataset.contains('.')", "field":"data_stream.dataset", "separator":"\\.", "target_field":"datastream_dataset_temp", "ignore_missing":true } },
|
||||
{ "split": { "if": "ctx.data_stream?.dataset != null && ctx.data_stream?.dataset.contains('.')", "field":"data_stream.dataset", "separator":"\\.", "target_field":"datastream_dataset_temp", "ignore_missing":true } },
|
||||
{ "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
|
||||
{ "set": { "if": "ctx.datastream_dataset_temp != null && ctx.datastream_dataset_temp[0] == 'network_traffic'", "field":"event.module", "value":"{{ datastream_dataset_temp.0 }}", "ignore_failure":true, "ignore_empty_value":true, "description":"Fix EA network packet capture" } },
|
||||
{ "gsub": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "pattern": "^[^.]*.", "replacement": "", "target_field": "dataset_tag_temp" } },
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
"file": {
|
||||
"properties": {
|
||||
"line": {
|
||||
"type": "integer"
|
||||
"type": "long"
|
||||
},
|
||||
"name": {
|
||||
"ignore_above": 1024,
|
||||
|
||||
26
salt/elasticsearch/templates/component/ecs/metadata.json
Normal file
26
salt/elasticsearch/templates/component/ecs/metadata.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"dynamic_templates": [],
|
||||
"properties": {
|
||||
"metadata": {
|
||||
"properties": {
|
||||
"kafka": {
|
||||
"properties": {
|
||||
"timestamp": {
|
||||
"type": "date"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"_meta": {
|
||||
"documentation": "https://www.elastic.co/guide/en/ecs/current/ecs-log.html",
|
||||
"ecs_version": "1.12.2"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@
|
||||
"managed_by": "security_onion",
|
||||
"managed": true
|
||||
},
|
||||
"date_detection": false,
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"strings_as_keyword": {
|
||||
@@ -16,7 +17,19 @@
|
||||
}
|
||||
}
|
||||
],
|
||||
"date_detection": false
|
||||
"properties": {
|
||||
"metadata": {
|
||||
"properties": {
|
||||
"kafka": {
|
||||
"properties": {
|
||||
"timestamp": {
|
||||
"type": "date"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
|
||||
@@ -1,37 +1,59 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"host": {
|
||||
"properties":{
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
}
|
||||
},
|
||||
"related": {
|
||||
"properties":{
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"host": {
|
||||
"properties": {
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
},
|
||||
"destination": {
|
||||
"properties":{
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
}
|
||||
},
|
||||
"related": {
|
||||
"properties": {
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
},
|
||||
"source": {
|
||||
"properties":{
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
},
|
||||
"destination": {
|
||||
"properties": {
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": {
|
||||
"properties": {
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"properties": {
|
||||
"input": {
|
||||
"properties": {
|
||||
"beats": {
|
||||
"properties": {
|
||||
"host": {
|
||||
"properties": {
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"managed_by": "security_onion",
|
||||
"managed": true
|
||||
}
|
||||
|
||||
}
|
||||
@@ -11,6 +11,7 @@ firewall:
|
||||
endgame: []
|
||||
eval: []
|
||||
external_suricata: []
|
||||
external_kafka: []
|
||||
fleet: []
|
||||
heavynode: []
|
||||
idh: []
|
||||
@@ -103,6 +104,10 @@ firewall:
|
||||
tcp:
|
||||
- 9092
|
||||
udp: []
|
||||
kafka_external_access:
|
||||
tcp:
|
||||
- 29092
|
||||
udp: []
|
||||
kibana:
|
||||
tcp:
|
||||
- 5601
|
||||
@@ -473,6 +478,8 @@ firewall:
|
||||
external_suricata:
|
||||
portgroups:
|
||||
- external_suricata
|
||||
external_kafka:
|
||||
portgroups: []
|
||||
desktop:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
@@ -668,6 +675,8 @@ firewall:
|
||||
external_suricata:
|
||||
portgroups:
|
||||
- external_suricata
|
||||
external_kafka:
|
||||
portgroups: []
|
||||
desktop:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
@@ -867,6 +876,8 @@ firewall:
|
||||
external_suricata:
|
||||
portgroups:
|
||||
- external_suricata
|
||||
external_kafka:
|
||||
portgroups: []
|
||||
strelka_frontend:
|
||||
portgroups:
|
||||
- strelka_frontend
|
||||
@@ -1337,6 +1348,8 @@ firewall:
|
||||
endgame:
|
||||
portgroups:
|
||||
- endgame
|
||||
external_kafka:
|
||||
portgroups: []
|
||||
receiver:
|
||||
portgroups: []
|
||||
customhostgroup0:
|
||||
|
||||
@@ -21,25 +21,38 @@
|
||||
{# Only add Kafka firewall items when Kafka enabled #}
|
||||
{% set role = GLOBALS.role.split('-')[1] %}
|
||||
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and role in ['manager', 'managersearch', 'standalone'] %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[role].portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||
{% endif %}
|
||||
{% if GLOBALS.pipeline == 'KAFKA' %}
|
||||
{% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %}
|
||||
{% set kafka_node_type = salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname + ':role') %}
|
||||
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and role == 'receiver' %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.self.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.standalone.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.manager.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.managersearch.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||
{% endif %}
|
||||
{% if role in ['manager', 'managersearch', 'standalone'] %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[role].portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and role in ['manager', 'managersearch', 'standalone', 'receiver'] %}
|
||||
{% for r in ['manager', 'managersearch', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %}
|
||||
{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('kafka_data') %}
|
||||
{% if role == 'receiver' %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.self.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.standalone.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.manager.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.managersearch.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||
{% endif %}
|
||||
|
||||
{% if role in ['manager', 'managersearch', 'standalone', 'receiver'] %}
|
||||
{% for r in ['manager', 'managersearch', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %}
|
||||
{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('kafka_data') %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% if KAFKA_EXTERNAL_ACCESS %}
|
||||
{# Kafka external access only applies for Kafka nodes with the broker role. #}
|
||||
{% if role in ['manager', 'managersearch', 'standalone', 'receiver'] and 'broker' in kafka_node_type %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.external_kafka.portgroups.append('kafka_external_access') %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
|
||||
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
|
||||
@@ -33,6 +33,7 @@ firewall:
|
||||
endgame: *hostgroupsettingsadv
|
||||
eval: *hostgroupsettings
|
||||
external_suricata: *hostgroupsettings
|
||||
external_kafka: *hostgroupsettings
|
||||
fleet: *hostgroupsettings
|
||||
heavynode: *hostgroupsettings
|
||||
idh: *hostgroupsettings
|
||||
@@ -130,6 +131,9 @@ firewall:
|
||||
kafka_data:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
kafka_external_access:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
kibana:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
|
||||
@@ -24,13 +24,23 @@ idstools_sbin:
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
|
||||
idstools_sbin_jinja:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://idstools/tools/sbin_jinja
|
||||
# If this is used, exclude so-rule-update
|
||||
#idstools_sbin_jinja:
|
||||
# file.recurse:
|
||||
# - name: /usr/sbin
|
||||
# - source: salt://idstools/tools/sbin_jinja
|
||||
# - user: 934
|
||||
# - group: 939
|
||||
# - file_mode: 755
|
||||
# - template: jinja
|
||||
|
||||
idstools_so-rule-update:
|
||||
file.managed:
|
||||
- name: /usr/sbin/so-rule-update
|
||||
- source: salt://idstools/tools/sbin_jinja/so-rule-update
|
||||
- user: 934
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
- mode: 755
|
||||
- template: jinja
|
||||
|
||||
suricatacustomdirsfile:
|
||||
|
||||
@@ -55,6 +55,7 @@ so-idstools:
|
||||
{% endif %}
|
||||
- watch:
|
||||
- file: idstoolsetcsync
|
||||
- file: idstools_so-rule-update
|
||||
|
||||
delete_so-idstools_so-status.disabled:
|
||||
file.uncomment:
|
||||
@@ -76,6 +77,7 @@ run_so-rule-update:
|
||||
- require:
|
||||
- docker_container: so-idstools
|
||||
- onchanges:
|
||||
- file: idstools_so-rule-update
|
||||
- file: idstoolsetcsync
|
||||
- file: synclocalnidsrules
|
||||
- order: last
|
||||
|
||||
@@ -5,10 +5,10 @@
|
||||
"name": "alarm-nsm-disk"
|
||||
},
|
||||
"spec": {
|
||||
"description": "Percent used space on the root partition of at least one node has exceeded the alarm threshold.",
|
||||
"description": "Percent used space on the nsm partition of at least one node has exceeded the alarm threshold.",
|
||||
"every": "1m0s",
|
||||
"name": "NSM Disk High Usage",
|
||||
"query": "from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> aggregateWindow(every: 1m, fn: max, createEmpty: false)\n |> yield(name: \"max\")",
|
||||
"query": "from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> aggregateWindow(every: 1m, fn: max, createEmpty: false)\n |> yield(name: \"max\")",
|
||||
"status": "active",
|
||||
"statusMessageTemplate": "Check: ${ r._check_name } is: ${ r._level }",
|
||||
"thresholds": [
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
{% set KAFKA_NODES_PILLAR = salt['pillar.get']('kafka:nodes') %}
|
||||
{% set KAFKA_PASSWORD = salt['pillar.get']('kafka:config:password') %}
|
||||
{% set KAFKA_TRUSTPASS = salt['pillar.get']('kafka:config:trustpass') %}
|
||||
{% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %}
|
||||
|
||||
{# Create list of KRaft controllers #}
|
||||
{% set controllers = [] %}
|
||||
@@ -15,7 +16,7 @@
|
||||
{# Check for Kafka nodes with controller in process_x_roles #}
|
||||
{% for node in KAFKA_NODES_PILLAR %}
|
||||
{% if 'controller' in KAFKA_NODES_PILLAR[node].role %}
|
||||
{% do controllers.append(KAFKA_NODES_PILLAR[node].nodeid ~ "@" ~ node ~ ":9093") %}
|
||||
{% do controllers.append(KAFKA_NODES_PILLAR[node].nodeid ~ "@" ~ KAFKA_NODES_PILLAR[node].ip ~ ":9093") %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
@@ -28,7 +29,15 @@
|
||||
{# Generate server.properties for 'broker' , 'controller', 'broker,controller' node types
|
||||
anything above this line is a configuration needed for ALL Kafka nodes #}
|
||||
{% if node_type == 'broker' %}
|
||||
{% do KAFKAMERGED.config.broker.update({'advertised_x_listeners': 'BROKER://'+ GLOBALS.node_ip +':9092' }) %}
|
||||
{% if KAFKA_EXTERNAL_ACCESS %}
|
||||
{% do KAFKAMERGED.config.broker.update({'advertised_x_listeners': 'BROKER://'+ GLOBALS.node_ip +':9092' + ',' + 'EXTERNAL_ACCESS://' + GLOBALS.node_ip + ':29092' }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'listeners': KAFKAMERGED.config.broker.listeners + ',' + KAFKAMERGED.config.external_access.listeners }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'listener_x_security_x_protocol_x_map': KAFKAMERGED.config.broker.listener_x_security_x_protocol_x_map + ',' + KAFKAMERGED.config.external_access.listener_x_security_x_protocol_x_map }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'sasl_x_enabled_x_mechanisms': KAFKAMERGED.config.external_access.sasl_x_enabled_x_mechanisms }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'sasl_x_mechanism_x_inter_x_broker_x_protocol': KAFKAMERGED.config.external_access.sasl_x_mechanism_x_inter_x_broker_x_protocol }) %}
|
||||
{% else %}
|
||||
{% do KAFKAMERGED.config.broker.update({'advertised_x_listeners': 'BROKER://'+ GLOBALS.node_ip +':9092' }) %}
|
||||
{% endif %}
|
||||
{% do KAFKAMERGED.config.broker.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'ssl_x_keystore_x_password': KAFKA_PASSWORD }) %}
|
||||
@@ -42,6 +51,7 @@
|
||||
{% endif %}
|
||||
|
||||
{% if node_type == 'controller' %}
|
||||
{% do KAFKAMERGED.config.controller.update({'advertised_x_listeners': 'CONTROLLER://' + GLOBALS.node_ip + ':9093'}) %}
|
||||
{% do KAFKAMERGED.config.controller.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %}
|
||||
{% do KAFKAMERGED.config.controller.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %}
|
||||
{% do KAFKAMERGED.config.controller.update({'ssl_x_keystore_x_password': KAFKA_PASSWORD }) %}
|
||||
@@ -50,7 +60,15 @@
|
||||
|
||||
{# Kafka nodes of this type are not recommended for use outside of development / testing. #}
|
||||
{% if node_type == 'broker,controller' %}
|
||||
{% do KAFKAMERGED.config.broker.update({'advertised_x_listeners': 'BROKER://'+ GLOBALS.node_ip +':9092' }) %}
|
||||
{% if KAFKA_EXTERNAL_ACCESS %}
|
||||
{% do KAFKAMERGED.config.broker.update({'advertised_x_listeners': 'BROKER://'+ GLOBALS.node_ip +':9092' + ',' + 'CONTROLLER://'+ GLOBALS.node_ip +':9093' + ',' + 'EXTERNAL_ACCESS://' + GLOBALS.node_ip + ':29092' }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'listeners': KAFKAMERGED.config.broker.listeners + ',' + KAFKAMERGED.config.external_access.listeners }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'listener_x_security_x_protocol_x_map': KAFKAMERGED.config.broker.listener_x_security_x_protocol_x_map + ',' + KAFKAMERGED.config.external_access.listener_x_security_x_protocol_x_map }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'sasl_x_enabled_x_mechanisms': KAFKAMERGED.config.external_access.sasl_x_enabled_x_mechanisms }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'sasl_x_mechanism_x_inter_x_broker_x_protocol': KAFKAMERGED.config.external_access.sasl_x_mechanism_x_inter_x_broker_x_protocol }) %}
|
||||
{% else %}
|
||||
{% do KAFKAMERGED.config.broker.update({'advertised_x_listeners': 'BROKER://'+ GLOBALS.node_ip +':9092' + ',' + 'CONTROLLER://'+ GLOBALS.node_ip +':9093' }) %}
|
||||
{% endif %}
|
||||
{% do KAFKAMERGED.config.broker.update({'controller_x_listener_x_names': KAFKAMERGED.config.controller.controller_x_listener_x_names }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %}
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %}
|
||||
{% set KAFKA_EXTERNAL_USERS = salt['pillar.get']('kafka:config:external_access:remote_users', default=None) %}
|
||||
|
||||
kafka_group:
|
||||
group.present:
|
||||
@@ -69,6 +71,29 @@ kafka_kraft_{{sc}}_properties:
|
||||
- show_changes: False
|
||||
{% endfor %}
|
||||
|
||||
{% if KAFKA_EXTERNAL_ACCESS and KAFKA_EXTERNAL_USERS != None %}
|
||||
kafka_server_jaas_properties:
|
||||
file.managed:
|
||||
- source: salt://kafka/etc/jaas.conf.jinja
|
||||
- name: /opt/so/conf/kafka/kafka_server_jaas.conf
|
||||
- template: jinja
|
||||
- user: 960
|
||||
- group: 960
|
||||
- show_changes: False
|
||||
{% else %}
|
||||
remove_kafka_server_jaas_properties:
|
||||
file.absent:
|
||||
- name: /opt/so/conf/kafka/kafka_server_jaas.conf
|
||||
{% endif %}
|
||||
|
||||
kafka_log4j_properties:
|
||||
file.managed:
|
||||
- source: salt://kafka/etc/log4j.properties
|
||||
- name: /opt/so/conf/kafka/log4j.properties
|
||||
- user: 960
|
||||
- group: 960
|
||||
- show_changes: False
|
||||
|
||||
reset_quorum_on_changes:
|
||||
cmd.run:
|
||||
- name: rm -f /nsm/kafka/data/__cluster_metadata-0/quorum-state
|
||||
@@ -81,4 +106,4 @@ reset_quorum_on_changes:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
@@ -21,7 +21,7 @@ kafka:
|
||||
log_x_segment_x_bytes: 1073741824
|
||||
node_x_id:
|
||||
num_x_io_x_threads: 8
|
||||
num_x_network_x_threads: 3
|
||||
num_x_network_x_threads: 5
|
||||
num_x_partitions: 3
|
||||
num_x_recovery_x_threads_x_per_x_data_x_dir: 1
|
||||
offsets_x_topic_x_replication_x_factor: 1
|
||||
@@ -46,6 +46,7 @@ kafka:
|
||||
ssl_x_keystore_x_type: PKCS12
|
||||
ssl_x_keystore_x_password:
|
||||
controller:
|
||||
advertsied_x_listeners:
|
||||
controller_x_listener_x_names: CONTROLLER
|
||||
controller_x_quorum_x_voters:
|
||||
listeners: CONTROLLER://0.0.0.0:9093
|
||||
@@ -61,4 +62,10 @@ kafka:
|
||||
ssl_x_keystore_x_password:
|
||||
ssl_x_truststore_x_location: /etc/pki/kafka-truststore.jks
|
||||
ssl_x_truststore_x_type: JKS
|
||||
ssl_x_truststore_x_password:
|
||||
ssl_x_truststore_x_password:
|
||||
external_access:
|
||||
enabled: False
|
||||
listeners: EXTERNAL_ACCESS://0.0.0.0:29092
|
||||
listener_x_security_x_protocol_x_map: EXTERNAL_ACCESS:SASL_SSL
|
||||
sasl_x_enabled_x_mechanisms: PLAIN
|
||||
sasl_x_mechanism_x_inter_x_broker_x_protocol: SSL
|
||||
@@ -14,6 +14,7 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% set KAFKANODES = salt['pillar.get']('kafka:nodes') %}
|
||||
{% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %}
|
||||
{% if 'gmd' in salt['pillar.get']('features', []) %}
|
||||
|
||||
include:
|
||||
@@ -34,7 +35,7 @@ so-kafka:
|
||||
- user: kafka
|
||||
- environment:
|
||||
KAFKA_HEAP_OPTS: -Xmx2G -Xms1G
|
||||
KAFKA_OPTS: -javaagent:/opt/jolokia/agents/jolokia-agent-jvm-javaagent.jar=port=8778,host={{ DOCKER.containers['so-kafka'].ip }},policyLocation=file:/opt/jolokia/jolokia.xml
|
||||
KAFKA_OPTS: "-javaagent:/opt/jolokia/agents/jolokia-agent-jvm-javaagent.jar=port=8778,host={{ DOCKER.containers['so-kafka'].ip }},policyLocation=file:/opt/jolokia/jolokia.xml {%- if KAFKA_EXTERNAL_ACCESS %} -Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf {% endif -%}"
|
||||
- extra_hosts:
|
||||
{% for node in KAFKANODES %}
|
||||
- {{ node }}:{{ KAFKANODES[node].ip }}
|
||||
@@ -54,11 +55,18 @@ so-kafka:
|
||||
- /nsm/kafka/data/:/nsm/kafka/data/:rw
|
||||
- /opt/so/log/kafka:/opt/kafka/logs/:rw
|
||||
- /opt/so/conf/kafka/server.properties:/opt/kafka/config/kraft/server.properties:ro
|
||||
- /opt/so/conf/kafka/client.properties:/opt/kafka/config/kraft/client.properties
|
||||
- /opt/so/conf/kafka/client.properties:/opt/kafka/config/kraft/client.properties:ro
|
||||
- /opt/so/conf/kafka/log4j.properties:/opt/kafka/config/log4j.properties:ro
|
||||
{% if KAFKA_EXTERNAL_ACCESS %}
|
||||
- /opt/so/conf/kafka/kafka_server_jaas.conf:/opt/kafka/config/kafka_server_jaas.conf:ro
|
||||
{% endif %}
|
||||
- watch:
|
||||
{% for sc in ['server', 'client'] %}
|
||||
- file: kafka_kraft_{{sc}}_properties
|
||||
{% endfor %}
|
||||
{% if KAFKA_EXTERNAL_ACCESS %}
|
||||
- file: kafka_server_jaas_properties
|
||||
{% endif %}
|
||||
- file: kafkacertz
|
||||
- require:
|
||||
- file: kafkacertz
|
||||
@@ -87,4 +95,4 @@ include:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
16
salt/kafka/etc/jaas.conf.jinja
Normal file
16
salt/kafka/etc/jaas.conf.jinja
Normal file
@@ -0,0 +1,16 @@
|
||||
{% set KAFKA_EXTERNAL_USERS = salt['pillar.get']('kafka:config:external_access:remote_users') -%}
|
||||
|
||||
{%- set valid_users = [] -%}
|
||||
|
||||
{%- for item, user in KAFKA_EXTERNAL_USERS.items() -%}
|
||||
{% if 'password' in user and user.password is not none and user.password.strip() != "" -%}
|
||||
{% do valid_users.append('user_' ~ user.username ~ '="' ~ user.password ~ '"') -%}
|
||||
{% endif -%}
|
||||
{%- endfor -%}
|
||||
|
||||
KafkaServer {
|
||||
org.apache.kafka.common.security.plain.PlainLoginModule required
|
||||
{% for user_entry in valid_users -%}
|
||||
{{ user_entry }}{{ ";" if loop.last }}
|
||||
{% endfor %}
|
||||
};
|
||||
101
salt/kafka/etc/log4j.properties
Normal file
101
salt/kafka/etc/log4j.properties
Normal file
@@ -0,0 +1,101 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Unspecified loggers and loggers with additivity=true output to server.log and stdout
|
||||
# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise
|
||||
log4j.rootLogger=INFO, stdout, kafkaAppender
|
||||
|
||||
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.kafkaAppender=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
|
||||
log4j.appender.kafkaAppender.MaxFileSize=10MB
|
||||
log4j.appender.kafkaAppender.MaxBackupIndex=10
|
||||
log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.stateChangeAppender=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
|
||||
log4j.appender.stateChangeAppender.MaxFileSize=10MB
|
||||
log4j.appender.stateChangeAppender.MaxBackupIndex=10
|
||||
log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.requestAppender=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
|
||||
log4j.appender.requestAppender.MaxFileSize=10MB
|
||||
log4j.appender.requestAppender.MaxBackupIndex=10
|
||||
log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.cleanerAppender=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
|
||||
log4j.appender.cleanerAppender.MaxFileSize=10MB
|
||||
log4j.appender.cleanerAppender.MaxBackupIndex=10
|
||||
log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.controllerAppender=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
|
||||
log4j.appender.controllerAppender.MaxFileSize=10MB
|
||||
log4j.appender.controllerAppender.MaxBackupIndex=10
|
||||
log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
log4j.appender.authorizerAppender=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log
|
||||
log4j.appender.authorizerAppender.MaxFileSize=10MB
|
||||
log4j.appender.authorizerAppender.MaxBackupIndex=10
|
||||
log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
|
||||
|
||||
# Change the line below to adjust ZK client logging
|
||||
log4j.logger.org.apache.zookeeper=INFO
|
||||
|
||||
# Change the two lines below to adjust the general broker logging level (output to server.log and stdout)
|
||||
log4j.logger.kafka=INFO
|
||||
log4j.logger.org.apache.kafka=INFO
|
||||
|
||||
# Change to DEBUG or TRACE to enable request logging
|
||||
log4j.logger.kafka.request.logger=WARN, requestAppender
|
||||
log4j.additivity.kafka.request.logger=false
|
||||
|
||||
# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output
|
||||
# related to the handling of requests
|
||||
#log4j.logger.kafka.network.Processor=TRACE, requestAppender
|
||||
#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
|
||||
#log4j.additivity.kafka.server.KafkaApis=false
|
||||
log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
|
||||
log4j.additivity.kafka.network.RequestChannel$=false
|
||||
|
||||
# Change the line below to adjust KRaft mode controller logging
|
||||
log4j.logger.org.apache.kafka.controller=INFO, controllerAppender
|
||||
log4j.additivity.org.apache.kafka.controller=false
|
||||
|
||||
# Change the line below to adjust ZK mode controller logging
|
||||
log4j.logger.kafka.controller=TRACE, controllerAppender
|
||||
log4j.additivity.kafka.controller=false
|
||||
|
||||
log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
|
||||
log4j.additivity.kafka.log.LogCleaner=false
|
||||
|
||||
log4j.logger.state.change.logger=INFO, stateChangeAppender
|
||||
log4j.additivity.state.change.logger=false
|
||||
|
||||
# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses
|
||||
log4j.logger.kafka.authorizer.logger=INFO, authorizerAppender
|
||||
log4j.additivity.kafka.authorizer.logger=false
|
||||
@@ -10,9 +10,9 @@
|
||||
write_kafka_pillar_yaml:
|
||||
file.managed:
|
||||
- name: /opt/so/saltstack/local/pillar/kafka/nodes.sls
|
||||
- mode: 644
|
||||
- mode: 640
|
||||
- user: socore
|
||||
- source: salt://kafka/files/managed_node_pillar.jinja
|
||||
- template: jinja
|
||||
- context:
|
||||
COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }}
|
||||
COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }}
|
||||
|
||||
@@ -34,10 +34,6 @@ kafka:
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
broker:
|
||||
advertised_x_listeners:
|
||||
description: Specify the list of listeners (hostname and port) that Kafka brokers provide to clients for communication.
|
||||
title: advertised.listeners
|
||||
helpLink: kafka.html
|
||||
auto_x_create_x_topics_x_enable:
|
||||
description: Enable the auto creation of topics.
|
||||
title: auto.create.topics.enable
|
||||
@@ -226,4 +222,52 @@ kafka:
|
||||
description: The role performed by controller node.
|
||||
title: process.roles
|
||||
readonly: True
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka.html
|
||||
external_access:
|
||||
enabled:
|
||||
description: Enables or disables access to Kafka topics using user/password authentication. Used for producing / consuming messages via an external client.
|
||||
forcedType: bool
|
||||
helpLink: kafka.html
|
||||
listeners:
|
||||
description: Set of URIs that is listened on and the listener names in a comma-seperated list.
|
||||
title: listeners
|
||||
readonly: True
|
||||
advanced: True
|
||||
helpLink: kafka.html
|
||||
listener_x_security_x_protocol_x_map:
|
||||
description: External listener name and mapped security protocol.
|
||||
title: listener.security.protocol.map
|
||||
readonly: True
|
||||
advanced: True
|
||||
helpLink: kafka.html
|
||||
sasl_x_enabled_x_mechanisms:
|
||||
description: SASL/PLAIN is a simple username/password authentication mechanism, used with TLS to implement secure authentication.
|
||||
title: sasl.enabled.mechanisms
|
||||
readonly: True
|
||||
advanced: True
|
||||
helpLink: kafka.html
|
||||
sasl_x_mechanism_x_inter_x_broker_x_protocol:
|
||||
description: SASL mechanism used for inter-broker communication
|
||||
title: sasl.mechanism.inter.broker.protocol
|
||||
readonly: True
|
||||
advanced: True
|
||||
helpLink: kafka.html
|
||||
remote_users:
|
||||
user01: &remote_user
|
||||
username:
|
||||
description: Username to be used for custom account
|
||||
forcedType: string
|
||||
global: True
|
||||
password:
|
||||
description: Password to be used for custom account
|
||||
forcedType: string
|
||||
global: True
|
||||
sensitive: True
|
||||
user02: *remote_user
|
||||
user03: *remote_user
|
||||
user04: *remote_user
|
||||
user05: *remote_user
|
||||
user06: *remote_user
|
||||
user07: *remote_user
|
||||
user08: *remote_user
|
||||
user09: *remote_user
|
||||
@@ -22,7 +22,7 @@ kibana_pillar_directory:
|
||||
kibana_secrets_pillar:
|
||||
file.managed:
|
||||
- name: /opt/so/saltstack/local/pillar/kibana/secrets.sls
|
||||
- mode: 600
|
||||
- mode: 640
|
||||
- reload_pillar: True
|
||||
- contents: |
|
||||
kibana:
|
||||
|
||||
@@ -13,13 +13,21 @@ kratosgroup:
|
||||
- name: kratos
|
||||
- gid: 928
|
||||
|
||||
kratoshome:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/kratos
|
||||
- user: 928
|
||||
- group: 928
|
||||
- mode: 700
|
||||
- makedirs: True
|
||||
|
||||
# Add Kratos user
|
||||
kratos:
|
||||
user.present:
|
||||
- uid: 928
|
||||
- gid: 928
|
||||
- home: /opt/so/conf/kratos
|
||||
|
||||
|
||||
kratosdir:
|
||||
file.directory:
|
||||
- name: /nsm/kratos
|
||||
|
||||
@@ -22,7 +22,15 @@ logstashgroup:
|
||||
- name: logstash
|
||||
- gid: 931
|
||||
|
||||
# Add the logstash user for the jog4j settings
|
||||
logstashhome:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/logstash
|
||||
- user: 931
|
||||
- group: 931
|
||||
- mode: 700
|
||||
- makedirs: True
|
||||
|
||||
# Add the logstash user for the log4j settings
|
||||
logstash:
|
||||
user.present:
|
||||
- uid: 931
|
||||
|
||||
@@ -23,6 +23,8 @@ appender.rolling.policies.type = Policies
|
||||
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
|
||||
appender.rolling.policies.time.interval = 1
|
||||
appender.rolling.policies.time.modulate = true
|
||||
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||
appender.rolling.policies.size.size = 1GB
|
||||
appender.rolling.strategy.type = DefaultRolloverStrategy
|
||||
appender.rolling.strategy.action.type = Delete
|
||||
appender.rolling.strategy.action.basepath = /var/log/logstash
|
||||
|
||||
@@ -3,5 +3,5 @@ elastic_curl_config_distributed:
|
||||
- name: /opt/so/saltstack/local/salt/elasticsearch/curl.config
|
||||
- source: salt://elasticsearch/files/curl.config.template
|
||||
- template: jinja
|
||||
- mode: 600
|
||||
- mode: 640
|
||||
- show_changes: False
|
||||
|
||||
@@ -127,15 +127,28 @@ so_fleetagent_status:
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
|
||||
socore_own_saltstack:
|
||||
socore_own_saltstack_default:
|
||||
file.directory:
|
||||
- name: /opt/so/saltstack
|
||||
- name: /opt/so/saltstack/default
|
||||
- user: socore
|
||||
- group: socore
|
||||
- recurse:
|
||||
- user
|
||||
- group
|
||||
|
||||
socore_own_saltstack_local:
|
||||
file.directory:
|
||||
- name: /opt/so/saltstack/local
|
||||
- user: socore
|
||||
- group: socore
|
||||
- dir_mode: 750
|
||||
- file_mode: 640
|
||||
- replace: False
|
||||
- recurse:
|
||||
- user
|
||||
- group
|
||||
- mode
|
||||
|
||||
rules_dir:
|
||||
file.directory:
|
||||
- name: /nsm/rules/yara
|
||||
|
||||
@@ -126,7 +126,7 @@ function testMinion() {
|
||||
}
|
||||
|
||||
function restartMinion() {
|
||||
salt "$MINION_ID" system.reboot
|
||||
salt "$MINION_ID" system.reboot --async
|
||||
result=$?
|
||||
|
||||
exit $result
|
||||
|
||||
@@ -356,7 +356,7 @@ function syncElastic() {
|
||||
[[ $? != 0 ]] && fail "Unable to read credential hashes from database"
|
||||
|
||||
user_data_formatted=$(echo "${userData}" | jq -r '.user + ":" + .data.hashed_password')
|
||||
if lookup_salt_value "features" "" "pillar" | grep -x odc; then
|
||||
if lookup_salt_value "features" "" "pillar" | grep -qx odc; then
|
||||
# generate random placeholder salt/hash for users without passwords
|
||||
random_crypt=$(get_random_value 53)
|
||||
user_data_formatted=$(echo "${user_data_formatted}" | sed -r "s/^(.+:)\$/\\1\$2a\$12${random_crypt}/")
|
||||
|
||||
@@ -243,6 +243,13 @@ check_pillar_items() {
|
||||
fi
|
||||
}
|
||||
|
||||
check_saltmaster_status() {
|
||||
set +e
|
||||
echo "Waiting on the Salt Master service to be ready."
|
||||
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
|
||||
set -e
|
||||
}
|
||||
|
||||
check_sudoers() {
|
||||
if grep -q "so-setup" /etc/sudoers; then
|
||||
echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"."
|
||||
@@ -409,6 +416,7 @@ preupgrade_changes() {
|
||||
[[ "$INSTALLEDVERSION" == 2.4.120 ]] && up_to_2.4.130
|
||||
[[ "$INSTALLEDVERSION" == 2.4.130 ]] && up_to_2.4.140
|
||||
[[ "$INSTALLEDVERSION" == 2.4.140 ]] && up_to_2.4.141
|
||||
[[ "$INSTALLEDVERSION" == 2.4.141 ]] && up_to_2.4.150
|
||||
true
|
||||
}
|
||||
|
||||
@@ -435,6 +443,7 @@ postupgrade_changes() {
|
||||
[[ "$POSTVERSION" == 2.4.120 ]] && post_to_2.4.130
|
||||
[[ "$POSTVERSION" == 2.4.130 ]] && post_to_2.4.140
|
||||
[[ "$POSTVERSION" == 2.4.140 ]] && post_to_2.4.141
|
||||
[[ "$POSTVERSION" == 2.4.141 ]] && post_to_2.4.150
|
||||
true
|
||||
}
|
||||
|
||||
@@ -551,9 +560,6 @@ post_to_2.4.130() {
|
||||
echo "Updating Kibana default space"
|
||||
/usr/sbin/so-kibana-space-defaults
|
||||
|
||||
echo "Regenerating Elastic Agent Installers"
|
||||
/sbin/so-elastic-agent-gen-installers
|
||||
|
||||
POSTVERSION=2.4.130
|
||||
}
|
||||
|
||||
@@ -567,6 +573,12 @@ post_to_2.4.141() {
|
||||
POSTVERSION=2.4.141
|
||||
}
|
||||
|
||||
post_to_2.4.150() {
|
||||
echo "Regenerating Elastic Agent Installers"
|
||||
/sbin/so-elastic-agent-gen-installers
|
||||
POSTVERSION=2.4.150
|
||||
}
|
||||
|
||||
repo_sync() {
|
||||
echo "Sync the local repo."
|
||||
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
|
||||
@@ -739,8 +751,6 @@ up_to_2.4.90() {
|
||||
so-yaml.py remove /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.password
|
||||
so-yaml.py add /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.config.password "$kafkatrimpass"
|
||||
so-yaml.py add /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.config.trustpass "$kafkatrust"
|
||||
echo "If the Detection index exists, update the refresh_interval"
|
||||
so-elasticsearch-query so-detection*/_settings -X PUT -d '{"index":{"refresh_interval":"1s"}}'
|
||||
|
||||
INSTALLEDVERSION=2.4.90
|
||||
}
|
||||
@@ -799,6 +809,13 @@ up_to_2.4.141() {
|
||||
INSTALLEDVERSION=2.4.141
|
||||
}
|
||||
|
||||
up_to_2.4.150() {
|
||||
echo "If the Detection indices exists, update the refresh_interval"
|
||||
so-elasticsearch-query so-detection*/_settings -X PUT -d '{"index":{"refresh_interval":"1s"}}'
|
||||
|
||||
INSTALLEDVERSION=2.4.150
|
||||
}
|
||||
|
||||
add_hydra_pillars() {
|
||||
mkdir -p /opt/so/saltstack/local/pillar/hydra
|
||||
touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
|
||||
@@ -1411,10 +1428,7 @@ main() {
|
||||
systemctl_func "start" "salt-master"
|
||||
|
||||
# Testing that salt-master is up by checking that is it connected to itself
|
||||
set +e
|
||||
echo "Waiting on the Salt Master service to be ready."
|
||||
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
|
||||
set -e
|
||||
check_saltmaster_status
|
||||
|
||||
# update the salt-minion configs here and start the minion
|
||||
# since highstate are disabled above, minion start should not trigger a highstate
|
||||
@@ -1441,10 +1455,7 @@ main() {
|
||||
|
||||
systemctl_func "start" "salt-master"
|
||||
|
||||
set +e
|
||||
echo "Waiting on the Salt Master service to be ready."
|
||||
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
|
||||
set -e
|
||||
check_saltmaster_status
|
||||
|
||||
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
|
||||
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
|
||||
@@ -1456,6 +1467,7 @@ main() {
|
||||
update_salt_mine
|
||||
|
||||
highstate
|
||||
check_saltmaster_status
|
||||
postupgrade_changes
|
||||
[[ $is_airgap -eq 0 ]] && unmount_update
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ managerssl_crt:
|
||||
- signing_policy: managerssl
|
||||
- private_key: /etc/pki/managerssl.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- subjectAltName: "DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}, DNS:{{ GLOBALS.url_base }}"
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
@@ -121,7 +121,7 @@ so-nginx:
|
||||
- /opt/so/log/nginx/:/var/log/nginx:rw
|
||||
- /opt/so/tmp/nginx/:/var/lib/nginx:rw
|
||||
- /opt/so/tmp/nginx/:/run:rw
|
||||
- /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/:/opt/socore/html/packages
|
||||
- /nsm/elastic-fleet/so_agent-installers/:/opt/socore/html/packages
|
||||
- /nsm/elastic-fleet/artifacts/:/opt/socore/html/artifacts
|
||||
{% if grains.role in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone', 'so-import'] %}
|
||||
- /etc/pki/managerssl.crt:/etc/pki/nginx/server.crt:ro
|
||||
|
||||
29
salt/salt/master/mine_update_highstate.sls
Normal file
29
salt/salt/master/mine_update_highstate.sls
Normal file
@@ -0,0 +1,29 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# This state should only be run on managers and should never be run manually
|
||||
|
||||
{% set MINION_ID = grains.id %}
|
||||
|
||||
# Run mine.update on all minions
|
||||
salt.master.mine_update_highstate.update_mine_all_minions:
|
||||
salt.function:
|
||||
- name: mine.update
|
||||
- tgt: '*'
|
||||
- batch: 50
|
||||
- retry:
|
||||
attempts: 3
|
||||
interval: 1
|
||||
|
||||
# Run highstate on the original minion
|
||||
# we can use concurrent on this highstate because no other highstate would be running when this is called
|
||||
# this state will run onlyif there is not an instance of it already running
|
||||
salt.master.mine_update_highstate.run_highstate_on_{{ MINION_ID }}:
|
||||
salt.state:
|
||||
- tgt: {{ MINION_ID }}
|
||||
- highstate: True
|
||||
- concurrent: True
|
||||
- onlyif:
|
||||
- 'ps -ef | grep -v grep | grep "/usr/bin/salt-minion.*ProcessPayload.*jid=.*Minion._thread_return" | wc -l | grep -q "^0$"'
|
||||
@@ -3,4 +3,3 @@ salt:
|
||||
minion:
|
||||
version: '3006.9'
|
||||
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
|
||||
service_start_delay: 30 # in seconds.
|
||||
|
||||
@@ -5,10 +5,10 @@
|
||||
{% from 'salt/map.jinja' import SALTPACKAGES %}
|
||||
{% from 'salt/map.jinja' import SYSTEMD_UNIT_FILE %}
|
||||
{% import_yaml 'salt/minion.defaults.yaml' as SALTMINION %}
|
||||
{% set service_start_delay = SALTMINION.salt.minion.service_start_delay %}
|
||||
|
||||
include:
|
||||
- salt.python_modules
|
||||
- salt.patch.x509_v2
|
||||
- salt
|
||||
- systemd.reload
|
||||
- repo.client
|
||||
@@ -89,8 +89,6 @@ salt_minion_service_unit_file:
|
||||
- name: {{ SYSTEMD_UNIT_FILE }}
|
||||
- source: salt://salt/service/salt-minion.service.jinja
|
||||
- template: jinja
|
||||
- defaults:
|
||||
service_start_delay: {{ service_start_delay }}
|
||||
- onchanges_in:
|
||||
- module: systemd_reload
|
||||
|
||||
|
||||
6
salt/salt/patch/x509_v2/init.sls
Normal file
6
salt/salt/patch/x509_v2/init.sls
Normal file
@@ -0,0 +1,6 @@
|
||||
patch_x509_v2_state_module:
|
||||
file.replace:
|
||||
- name: /opt/saltstack/salt/lib/python3.10/site-packages/salt/states/x509_v2.py
|
||||
- pattern: 'res = __salt__\["state.single"\]\("file.managed", name, test=test, \*\*kwargs\)'
|
||||
- repl: 'res = __salt__["state.single"]("file.managed", name, test=test, concurrent=True, **kwargs)'
|
||||
- backup: .bak
|
||||
@@ -8,8 +8,9 @@ KillMode=process
|
||||
Type=notify
|
||||
NotifyAccess=all
|
||||
LimitNOFILE=8192
|
||||
ExecStartPre=/bin/bash -c 'until /sbin/ip -4 addr show dev {{ salt["pillar.get"]("host:mainint") }} | grep -q "inet "; do sleep 1; done'
|
||||
ExecStart=/usr/bin/salt-minion
|
||||
ExecStartPre=/bin/sleep {{ salt['pillar.get']('salt:minion:service_start_delay', service_start_delay) }}
|
||||
TimeoutStartSec=120
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,2 +1,2 @@
|
||||
requests>=2.31.0
|
||||
python-whois>=0.7.3
|
||||
python-whois>=0.9.5
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user