merge and fix conflicts

This commit is contained in:
Josh Patterson
2025-05-06 11:55:42 -04:00
71 changed files with 97 additions and 20 deletions

View File

@@ -1,10 +1,6 @@
name: python-test
on:
push:
paths:
- "salt/sensoroni/files/analyzers/**"
- "salt/manager/tools/sbin"
pull_request:
paths:
- "salt/sensoroni/files/analyzers/**"
@@ -17,7 +13,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.10"]
python-version: ["3.13"]
python-code-path: ["salt/sensoroni/files/analyzers", "salt/manager/tools/sbin"]
steps:
@@ -36,4 +32,4 @@ jobs:
flake8 ${{ matrix.python-code-path }} --show-source --max-complexity=12 --doctests --max-line-length=200 --statistics
- name: Test with pytest
run: |
pytest ${{ matrix.python-code-path }} --cov=${{ matrix.python-code-path }} --doctest-modules --cov-report=term --cov-fail-under=100 --cov-config=pytest.ini
PYTHONPATH=${{ matrix.python-code-path }} pytest ${{ matrix.python-code-path }} --cov=${{ matrix.python-code-path }} --doctest-modules --cov-report=term --cov-fail-under=100 --cov-config=pytest.ini

View File

@@ -85,3 +85,6 @@ printf "\n### MSI Generated...\n"
printf "\n### Cleaning up temp files in /nsm/elastic-agent-workspace\n"
rm -rf /nsm/elastic-agent-workspace
printf "\n### Copying so_agent-installers to /nsm/elastic-fleet/ for nginx.\n"
\cp -vr /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/ /nsm/elastic-fleet/

View File

@@ -204,10 +204,10 @@ so-elasticsearch-roles-load:
- docker_container: so-elasticsearch
- file: elasticsearch_sbin_jinja
{% if grains.role in ['so-managersearch', 'so-heavynode', 'so-manager', 'so-managerhype'] %}
{% if grains.role in ['so-managersearch', 'so-manager', 'so-managerhype'] %}
{% set ap = "absent" %}
{% endif %}
{% if grains.role in ['so-eval', 'so-standalone'] %}
{% if grains.role in ['so-eval', 'so-standalone', 'so-heavynode'] %}
{% if ELASTICSEARCHMERGED.index_clean %}
{% set ap = "present" %}
{% else %}

View File

@@ -243,6 +243,13 @@ check_pillar_items() {
fi
}
check_saltmaster_status() {
set +e
echo "Waiting on the Salt Master service to be ready."
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
set -e
}
check_sudoers() {
if grep -q "so-setup" /etc/sudoers; then
echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"."
@@ -1423,10 +1430,7 @@ main() {
systemctl_func "start" "salt-master"
# Testing that salt-master is up by checking that is it connected to itself
set +e
echo "Waiting on the Salt Master service to be ready."
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
set -e
check_saltmaster_status
# update the salt-minion configs here and start the minion
# since highstate are disabled above, minion start should not trigger a highstate
@@ -1453,10 +1457,7 @@ main() {
systemctl_func "start" "salt-master"
set +e
echo "Waiting on the Salt Master service to be ready."
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
set -e
check_saltmaster_status
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
@@ -1468,6 +1469,7 @@ main() {
update_salt_mine
highstate
check_saltmaster_status
postupgrade_changes
[[ $is_airgap -eq 0 ]] && unmount_update

View File

@@ -121,7 +121,7 @@ so-nginx:
- /opt/so/log/nginx/:/var/log/nginx:rw
- /opt/so/tmp/nginx/:/var/lib/nginx:rw
- /opt/so/tmp/nginx/:/run:rw
- /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/:/opt/socore/html/packages
- /nsm/elastic-fleet/so_agent-installers/:/opt/socore/html/packages
- /nsm/elastic-fleet/artifacts/:/opt/socore/html/artifacts
{% if GLOBALS.is_manager %}
- /etc/pki/managerssl.crt:/etc/pki/nginx/server.crt:ro

View File

@@ -36,7 +36,7 @@ class TestWhoisLookupMethods(unittest.TestCase):
def test_sendReqNotFound(self):
mock = MagicMock()
mock.side_effect = whoisit.errors.ResourceDoesNotExist()
mock.side_effect = whoisit.errors.ResourceDoesNotExist("foo")
with patch('whoisit.domain', new=mock):
response = whoislookup.sendReq("abcd1234.com")
mock.assert_called_once_with("abcd1234.com", raw=True)

View File

@@ -45,6 +45,24 @@ tgraf_sync_script_{{script}}:
GLOBALS: {{ GLOBALS }}
{% endfor %}
{% if GLOBALS.is_manager or GLOBALS.role == 'so-heavynode' %}
tgraf_sync_script_esindexsize.sh:
file.managed:
- name: /opt/so/conf/telegraf/scripts/esindexsize.sh
- user: root
- group: 939
- mode: 770
- source: salt://telegraf/scripts/esindexsize.sh
{# Copy conf/elasticsearch/curl.config for telegraf to use with esindexsize.sh #}
tgraf_sync_escurl_conf:
file.managed:
- name: /opt/so/conf/telegraf/etc/escurl.config
- user: 939
- group: 939
- mode: 400
- source: salt://elasticsearch/curl.config
{% endif %}
telegraf_sbin:
file.recurse:
- name: /usr/sbin

View File

@@ -56,6 +56,9 @@ so-telegraf:
- /opt/so/log/sostatus:/var/log/sostatus:ro
- /opt/so/log/salt:/var/log/salt:ro
- /opt/so/log/agents:/var/log/agents:ro
{% if GLOBALS.is_manager or GLOBALS.role == 'so-heavynode' %}
- /opt/so/conf/telegraf/etc/escurl.config:/etc/telegraf/elasticsearch.config:ro
{% endif %}
{% if DOCKER.containers['so-telegraf'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-telegraf'].custom_bind_mounts %}
- {{ BIND }}

View File

@@ -192,7 +192,7 @@
#
# # Read stats from one or more Elasticsearch servers or clusters
{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-import'] %}
{%- if GLOBALS.is_manager or GLOBALS.role == 'so-heavynode' %}
[[inputs.elasticsearch]]
servers = ["https://{{ NODEIP }}:9200"]
cluster_stats = true
@@ -323,3 +323,13 @@
# # Read metrics about network interface usage
[[inputs.net]]
# Scripts run every 30s||TELEGRAFMERGED.config.interval - ES index script doesn't need to run as frequently
{%- if GLOBALS.is_manager or GLOBALS.role == 'so-heavynode' %}
[[ inputs.exec ]]
commands = [
"/scripts/esindexsize.sh"
]
data_format = "influx"
interval = "1h"
{%- endif %}

View File

@@ -24,7 +24,7 @@ if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then
EVENTS=$(cat $LOGFILE | grep -wF events | awk '{print $2}' | tr -d ',')
TOTAL=$(cat $LOGFILE | grep -wF total | awk '{print $2}' | tr -d ',')
ALL=$(cat $LOGFILE | grep -wF all | awk '{print $2}' | tr -d ',')
ACTIVE=$(cat $LOGFILE | grep -wF active | awk '{print $2}')
ACTIVE=$(cat $LOGFILE | grep -wF active | awk '{print $2}' | tr -d ',')
echo "agentstatus online=$ONLINE,error=$ERROR,inactive=$INACTIVE,offline=$OFFLINE,updating=$UPDATING,unenrolled=$UNENROLLED,other=$OTHER,events=$EVENTS,total=$TOTAL,all=$ALL,active=$ACTIVE"
fi

View File

@@ -0,0 +1,45 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
if curl -K /etc/telegraf/elasticsearch.config -s -k -L "https://localhost:9200/" -w "%{http_code}" -o /dev/null | grep -q '200'; then
DATASTREAM_INFO=$(curl -K /etc/telegraf/elasticsearch.config -s -k -L "https://localhost:9200/_data_stream?format=json")
INDICES=$(curl -K /etc/telegraf/elasticsearch.config -s -k -L "https://localhost:9200/_cat/indices?h=index,store.size&bytes=b&s=index:asc&format=json")
INDICES_WITH_SIZE=()
while IFS= read -r DS; do
datastream_indices=()
datastream=$(echo "$DS" | jq -r '.name')
# influx doesn't like key starting with '.'
if [[ $datastream != .* ]]; then
while IFS= read -r DS_IDX; do
datastream_indices+=("$DS_IDX")
done < <(echo "$DS" | jq -r '.indices[].index_name')
datastream_size=0
for idx in ${datastream_indices[@]}; do
current_index=$(echo "$INDICES" | jq -r --arg index "$idx" '.[] | select(.index == $index)["store.size"]')
datastream_size=$(($datastream_size + $current_index))
done
INDICES_WITH_SIZE+=("${datastream}=${datastream_size}i")
# echo "$datastream size is $(echo "$datastream_size" | numfmt --to iec)"
fi
done < <(echo "$DATASTREAM_INFO" | jq -c '.data_streams[]')
measurement="elasticsearch_index_size "
total=${#INDICES_WITH_SIZE[@]}
for idxws in "${!INDICES_WITH_SIZE[@]}"; do
if [[ $idxws -lt $(($total - 1)) ]]; then
measurement+="${INDICES_WITH_SIZE[$idxws]},"
else
measurement+="${INDICES_WITH_SIZE[$idxws]}"
fi
done
echo "$measurement"
fi