mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-07 09:42:46 +01:00
Merge branch 'dev' of https://github.com/Security-Onion-Solutions/securityonion into dev
This commit is contained in:
@@ -1,5 +1,5 @@
|
|||||||
{%- set DOCKERRANGE = salt['pillar.get']('docker:range') %}
|
{%- set DOCKERRANGE = salt['pillar.get']('docker:range', '172.17.0.0/24') %}
|
||||||
{%- set DOCKERBIND = salt['pillar.get']('docker:bip') %}
|
{%- set DOCKERBIND = salt['pillar.get']('docker:bip', '172.17.0.1/24') %}
|
||||||
{
|
{
|
||||||
"registry-mirrors": [ "https://:5000" ],
|
"registry-mirrors": [ "https://:5000" ],
|
||||||
"bip": "{{ DOCKERBIND }}",
|
"bip": "{{ DOCKERBIND }}",
|
||||||
|
|||||||
@@ -84,11 +84,13 @@ container_list() {
|
|||||||
TRUSTED_CONTAINERS=(
|
TRUSTED_CONTAINERS=(
|
||||||
"so-filebeat"
|
"so-filebeat"
|
||||||
"so-idstools"
|
"so-idstools"
|
||||||
|
"so-elasticsearch"
|
||||||
"so-logstash"
|
"so-logstash"
|
||||||
"so-nginx"
|
"so-nginx"
|
||||||
"so-redis"
|
"so-redis"
|
||||||
"so-steno"
|
"so-steno"
|
||||||
"so-suricata"
|
"so-suricata"
|
||||||
|
"so-soc"
|
||||||
"so-telegraf"
|
"so-telegraf"
|
||||||
"so-zeek"
|
"so-zeek"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -217,6 +217,6 @@ https://{{ URLBASE }}/#/hunt?q=import.id:${HASH}%20%7C%20groupby%20event.module%
|
|||||||
or you can manually set your Time Range to be (in UTC):
|
or you can manually set your Time Range to be (in UTC):
|
||||||
From: $START_OLDEST To: $END_NEWEST
|
From: $START_OLDEST To: $END_NEWEST
|
||||||
|
|
||||||
Please note that it may take 30 seconds or more for events to appear in Onion Hunt.
|
Please note that it may take 30 seconds or more for events to appear in Hunt.
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -29,14 +29,14 @@ echo "Running all.rules and $TESTRULE against the following pcap: $TESTPCAP"
|
|||||||
echo ""
|
echo ""
|
||||||
sleep 3
|
sleep 3
|
||||||
|
|
||||||
cp /opt/so/conf/suricata/rules/all.rules /tmp/nids-testing/rules/all.rules
|
|
||||||
cat $TESTRULE >> /tmp/nids-testing/rules/all.rules
|
|
||||||
|
|
||||||
rm -rf /tmp/nids-testing/output
|
rm -rf /tmp/nids-testing/output
|
||||||
mkdir -p /tmp/nids-testing/output
|
mkdir -p /tmp/nids-testing/output
|
||||||
chown suricata:socore /tmp/nids-testing/output
|
chown suricata:socore /tmp/nids-testing/output
|
||||||
mkdir -p /tmp/nids-testing/rules
|
mkdir -p /tmp/nids-testing/rules
|
||||||
|
|
||||||
|
cp /opt/so/conf/suricata/rules/all.rules /tmp/nids-testing/rules/all.rules
|
||||||
|
cat $TESTRULE >> /tmp/nids-testing/rules/all.rules
|
||||||
|
|
||||||
echo "==== Begin Suricata Output ==="
|
echo "==== Begin Suricata Output ==="
|
||||||
|
|
||||||
|
|||||||
@@ -158,6 +158,7 @@ copy_new_files() {
|
|||||||
generate_and_clean_tarballs() {
|
generate_and_clean_tarballs() {
|
||||||
local new_version
|
local new_version
|
||||||
new_version=$(cat $UPDATE_DIR/VERSION)
|
new_version=$(cat $UPDATE_DIR/VERSION)
|
||||||
|
[ -d /opt/so/repo ] || mkdir -p /opt/so/repo
|
||||||
tar -cxf "/opt/so/repo/$new_version.tar.gz" "$UPDATE_DIR"
|
tar -cxf "/opt/so/repo/$new_version.tar.gz" "$UPDATE_DIR"
|
||||||
find "/opt/so/repo" -type f -not -name "$new_version.tar.gz" -exec rm -rf {} \;
|
find "/opt/so/repo" -type f -not -name "$new_version.tar.gz" -exec rm -rf {} \;
|
||||||
}
|
}
|
||||||
@@ -290,7 +291,7 @@ rc3_to_2.3.0() {
|
|||||||
INSTALLEDVERSION=2.3.0
|
INSTALLEDVERSION=2.3.0
|
||||||
}
|
}
|
||||||
|
|
||||||
2.3.0_to_2.3.20(){
|
2.3.0_to_2.3.20(){DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
|
||||||
# Remove PCAP from global
|
# Remove PCAP from global
|
||||||
sed '/pcap:/d' /opt/so/saltstack/local/pillar/global.sls
|
sed '/pcap:/d' /opt/so/saltstack/local/pillar/global.sls
|
||||||
sed '/sensor_checkin_interval_ms:/d' /opt/so/saltstack/local/pillar/global.sls
|
sed '/sensor_checkin_interval_ms:/d' /opt/so/saltstack/local/pillar/global.sls
|
||||||
@@ -340,6 +341,29 @@ space_check() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
thehive_maint() {
|
||||||
|
echo -n "Waiting for TheHive..."
|
||||||
|
COUNT=0
|
||||||
|
THEHIVE_CONNECTED="no"
|
||||||
|
while [[ "$COUNT" -le 240 ]]; do
|
||||||
|
curl --output /dev/null --silent --head --fail -k "https://localhost/thehive/api/alert"
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
THEHIVE_CONNECTED="yes"
|
||||||
|
echo "connected!"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
((COUNT+=1))
|
||||||
|
sleep 1
|
||||||
|
echo -n "."
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
|
||||||
|
echo "Migrating thehive databases if needed."
|
||||||
|
curl -v -k -XPOST -L "https://localhost/thehive/api/maintenance/migrate"
|
||||||
|
curl -v -k -XPOST -L "https://localhost/cortex/api/maintenance/migrate"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
unmount_update() {
|
unmount_update() {
|
||||||
cd /tmp
|
cd /tmp
|
||||||
umount /tmp/soagupdate
|
umount /tmp/soagupdate
|
||||||
@@ -583,9 +607,6 @@ if [[ "$FLEET_MANAGER" == "True" || "$FLEET_NODE" == "True" ]]; then
|
|||||||
echo ""
|
echo ""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Applying common state for any package updates."
|
|
||||||
salt-call -l info state.apply common queue=True
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
|
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
|
||||||
salt-call state.highstate -l info queue=True
|
salt-call state.highstate -l info queue=True
|
||||||
@@ -605,6 +626,7 @@ echo "Running a highstate. This could take several minutes."
|
|||||||
salt-call state.highstate -l info queue=True
|
salt-call state.highstate -l info queue=True
|
||||||
playbook
|
playbook
|
||||||
unmount_update
|
unmount_update
|
||||||
|
thehive_maint
|
||||||
|
|
||||||
if [ "$UPGRADESALT" == "1" ]; then
|
if [ "$UPGRADESALT" == "1" ]; then
|
||||||
echo ""
|
echo ""
|
||||||
|
|||||||
@@ -18,6 +18,10 @@
|
|||||||
{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||||
{%- set MANAGER = salt['grains.get']('master') %}
|
{%- set MANAGER = salt['grains.get']('master') %}
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
# Exit on errors, since all lines must succeed
|
||||||
|
set -e
|
||||||
|
|
||||||
# Check to see if we have extracted the ca cert.
|
# Check to see if we have extracted the ca cert.
|
||||||
if [ ! -f /opt/so/saltstack/local/salt/common/cacerts ]; then
|
if [ ! -f /opt/so/saltstack/local/salt/common/cacerts ]; then
|
||||||
docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }} -keystore /etc/pki/ca-trust/extracted/java/cacerts -alias SOSCA -import -file /etc/pki/ca.crt -storepass changeit -noprompt
|
docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }} -keystore /etc/pki/ca-trust/extracted/java/cacerts -alias SOSCA -import -file /etc/pki/ca.crt -storepass changeit -noprompt
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -133,18 +133,19 @@ append_so-steno_so-status.conf:
|
|||||||
file.append:
|
file.append:
|
||||||
- name: /opt/so/conf/so-status/so-status.conf
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
- text: so-steno
|
- text: so-steno
|
||||||
- unless: grep so-steno /opt/so/conf/so-status/so-status.conf
|
- unless: grep -q so-steno /opt/so/conf/so-status/so-status.conf
|
||||||
|
|
||||||
{% if STENOOPTIONS.status == 'running' %}
|
|
||||||
delete_so-steno_so-status.disabled:
|
{% if not STENOOPTIONS.start %}
|
||||||
file.uncomment:
|
|
||||||
- name: /opt/so/conf/so-status/so-status.conf
|
|
||||||
- regex: ^so-steno$
|
|
||||||
{% elif STENOOPTIONS.status == 'stopped' %}
|
|
||||||
so-steno_so-status.disabled:
|
so-steno_so-status.disabled:
|
||||||
file.comment:
|
file.comment:
|
||||||
- name: /opt/so/conf/so-status/so-status.conf
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
- regex: ^so-steno$
|
- regex: ^so-steno$
|
||||||
|
{% else %}
|
||||||
|
delete_so-steno_so-status.disabled:
|
||||||
|
file.uncomment:
|
||||||
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
|
- regex: ^so-steno$
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% else %}
|
{% else %}
|
||||||
|
|||||||
@@ -8,12 +8,12 @@ include:
|
|||||||
wait_for_playbook:
|
wait_for_playbook:
|
||||||
cmd.run:
|
cmd.run:
|
||||||
- name: until nc -z {{ MAINIP }} 3200; do sleep 1; done
|
- name: until nc -z {{ MAINIP }} 3200; do sleep 1; done
|
||||||
- timeout: 30
|
- timeout: 300
|
||||||
- onchanges:
|
|
||||||
- cmd: create_user
|
|
||||||
|
|
||||||
create_user:
|
create_user:
|
||||||
cmd.script:
|
cmd.script:
|
||||||
- source: salt://playbook/files/automation_user_create.sh
|
- source: salt://playbook/files/automation_user_create.sh
|
||||||
- cwd: /root
|
- cwd: /root
|
||||||
- template: jinja
|
- template: jinja
|
||||||
|
- onchanges:
|
||||||
|
- cmd: wait_for_playbook
|
||||||
|
|||||||
@@ -2,6 +2,8 @@
|
|||||||
# {%- set admin_pass = salt['pillar.get']('secrets:playbook_admin', None) -%}
|
# {%- set admin_pass = salt['pillar.get']('secrets:playbook_admin', None) -%}
|
||||||
# {%- set automation_pass = salt['pillar.get']('secrets:playbook_automation', None) %}
|
# {%- set automation_pass = salt['pillar.get']('secrets:playbook_automation', None) %}
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
local_salt_dir=/opt/so/saltstack/local
|
local_salt_dir=/opt/so/saltstack/local
|
||||||
|
|
||||||
try_count=6
|
try_count=6
|
||||||
@@ -44,7 +46,11 @@ while [[ $try_count -le 6 ]]; do
|
|||||||
echo " api_key: ${automation_api_key}"
|
echo " api_key: ${automation_api_key}"
|
||||||
} >> $local_salt_dir/pillar/global.sls
|
} >> $local_salt_dir/pillar/global.sls
|
||||||
fi
|
fi
|
||||||
|
exit 0
|
||||||
fi
|
fi
|
||||||
((try_count++))
|
((try_count++))
|
||||||
sleep "${interval}s"
|
sleep "${interval}s"
|
||||||
done
|
done
|
||||||
|
|
||||||
|
# Timeout exceeded, exit with non-zero exit code
|
||||||
|
exit 1
|
||||||
|
|||||||
@@ -1,42 +1,54 @@
|
|||||||
{
|
{
|
||||||
"title": "Security Onion 2.3.10 is here!",
|
"title": "Security Onion 2.3.20 is here!",
|
||||||
"changes": [
|
"changes": [
|
||||||
{ "summary": "UEFI installs with multiple disks should work as intended now." },
|
{ "summary": "soup has been refactored. You will need to run it a few times to get all the changes properly. We are working on making this even easier for future releases."},
|
||||||
{ "summary": "Telegraf scripts will now make sure they are not already running before execution." },
|
{ "summary": "soup now has awareness of Elastic Features and now downloads the appropriate Docker containers."},
|
||||||
{ "summary": "You are now prompted during setup if you want to change the docker IP range. If you change this it needs to be the same on all nodes in the grid." },
|
{ "summary": "The Sensors interface has been renamed to Grid. This interface now includes all Security Onion nodes."},
|
||||||
{ "summary": "Soup will now download the new containers before stopping anything. If anything fails it will now exit and leave the grid at the current version." },
|
{ "summary": "Grid interface now includes the status of the node. The status currently shows either Online (blue) or Offline (orange). If a node does not check-in on time then it will be marked as Offline."},
|
||||||
{ "summary": "All containers are now hosted on quay.io to prevent pull limitations. We are now using GPG keys to determine if the image is from Security Onion." },
|
{ "summary": "Grid interface now includes the IP and Role of each node in the grid."},
|
||||||
{ "summary": "Osquery installers have been updated to osquery 4.5.1." },
|
{ "summary": "Grid interface includes a new Filter search input to filter the visible list of grid nodes to a desired subset. As an example, typing in “sensor” will hide all nodes except those that behave as a sensor."},
|
||||||
{ "summary": "Fix for bug where Playbook was not removing the Elastalert rules for inactive Plays." },
|
{ "summary": "The Grid description field can now be customized via the local minion pillar file for each node."},
|
||||||
{ "summary": "Exifdata reported by Strelka is now constrained to a single multi-valued field to prevent mapping explosion (scan.exiftool)." },
|
{ "summary": "SOC will now draw attention to an unhealthy situation within the grid or with the connection between the user’s browser and the manager node. For example, when the Grid has at least one Offline node the SOC interface will show an exclamation mark in front of the browser tab’s title and an exclamation mark next to the Grid menu option in SOC. Additionally, the favicon will show an orange marker in the top-right corner (dynamic favicons not supported in Safari). Additionally, if the user’s web browser is unable to communicate with the manager the unhealth indicators appear along with a message at the top of SOC that states there is a connection problem."},
|
||||||
{ "summary": "Resolved issue with Navigator layer(s) not loading correctly." },
|
{ "summary": "Docker has been upgraded to the latest version."},
|
||||||
{ "summary": "Wazuh authd is now started by default on port 1515/tcp." },
|
{ "summary": "Docker should be more reliable now as Salt is now managing daemon.json."},
|
||||||
{ "summary": "Wazuh API default credentials are now removed after setup. Scripts have been added for API user management." },
|
{ "summary": "You can now install Elastic in a traditional cluster. When setting up the manager select Advanced and follow the prompts. Replicas are controlled in global.sls."},
|
||||||
{ "summary": "Upgraded Salt to 3002.2 due to CVEs." },
|
{ "summary": "You can now use Hot and Warm routing with Elastic in a traditional cluster. You can change the box.type in the minion’s sls file. You will need to create a curator job to re-tag the indexes based on your criteria."},
|
||||||
{ "summary": "If salt-minion is unable to apply states after the defined threshold, we assume salt-minion is in a bad state and the salt-minion service will be restarted." },
|
{ "summary": "Telegraf has been updated to version 1.16.3."},
|
||||||
{ "summary": "Fixed bug that prevented mysql from installing for Fleet if Playbook wasn't also installed." },
|
{ "summary": "Grafana has been updated to 7.3.4 to resolve some XSS vulnerabilities."},
|
||||||
{ "summary": "<code>so-status</code> will now show STARTING or WAIT_START, instead of ERROR, if <code>so-status</code> is run before a salt highstate has started or finished for the first time after system startup" },
|
{ "summary": "Grafana graphs have been changed to graphs vs guages so alerting can be set up."},
|
||||||
{ "summary": "Stenographer can now be disabled on a sensor node by setting the pillar steno:enabled:false in it's minion.sls file or globally if set in the global.sls file" },
|
{ "summary": "Grafana is now completely pillarized, allowing users to customize alerts and making it customizable for email, Slack, etc. See the docs <a href=\"https://securityonion.net/docs/grafana\">here</a>."},
|
||||||
{ "summary": "Added <code>so-ssh-harden</code> script that runs the commands listed in <a href='https://docs.securityonion.net/en/2.3/ssh.html' target='so-help'>https://docs.securityonion.net/en/2.3/ssh.html</a>" },
|
{ "summary": "Yara rules now should properly install on non-airgap installs. Previously, users had to wait for an automated job to place them in the correct location."},
|
||||||
{ "summary": "NGINX now redirects the browser to the hostname/IP address/FQDN based on global:url_base" },
|
{ "summary": "Strelka backend will not stop itself any more. Previously, its behavior was to shut itself down after fifteen minutes and wait for Salt to restart it to look for work before shutting down again."},
|
||||||
{ "summary": "MySQL state now waits for MySQL server to respond to a query before completeing" },
|
{ "summary": "Strelka daily rule updates are now logged to <code>/nsm/strelka/log/yara-update.log</code>"},
|
||||||
{ "summary": "Added Analyst option to network installs" },
|
{ "summary": "Several changes to the setup script to improve install reliability."},
|
||||||
{ "summary": "Acknowledging (and Escalating) alerts did not consistently remove the alert from the visible list; this has been corrected." },
|
{ "summary": "Airgap now supports the import node type."},
|
||||||
{ "summary": "Escalating alerts that have a <i>rule.case_template</i> field defined will automatically assign that case template to the case generated in TheHive." },
|
{ "summary": "Custom Zeek file extraction values in the pillar now work properly."},
|
||||||
{ "summary": "Alerts and Hunt interface quick action bar has been converted into a vertical menu to improve quick action option clarity. Related changes also eliminated the issues that occurred when the quick action bar was appearing to the left of the visible browser area." },
|
{ "summary": "TheHive has been updated to support Elastic 7."},
|
||||||
{ "summary": "Updated Go to newer version to fix a timezone, daylight savings time (DST) issue that resulted in Alerts and Hunt interfaces not consistently showing results." },
|
{ "summary": "Cortex image now includes whois package to correct an issue with the CERTatPassiveDNS analyzer."},
|
||||||
{ "summary": "Improved Hunt and Alert table sorting." },
|
{ "summary": "Hunt and Alert quick action menu has been refactored into submenus."},
|
||||||
{ "summary": "Alerts interface now allows absolute time searches." },
|
{ "summary": "New clipboard quick actions now allow for copying fields or entire events to the clipboard."},
|
||||||
{ "summary": "Alerts interface 'Hunt' quick action is now working as intended." },
|
{ "summary": "PCAP Add Job form now retains previous job details for quickly adding additional jobs. A new Clear button now exists at the bottom of this form to clear out these fields and forget the previous job details."},
|
||||||
{ "summary": "Alerts interface 'Ack' icon tooltip has been changed from 'Dismiss' to 'Acknowledge' for consistency." },
|
{ "summary": "PCAP Add Job form now allows users to perform arbitrary PCAP lookups of imported PCAP data (data imported via the <code>so-import-pcap</code> script)."},
|
||||||
{ "summary": "Hunt interface bar charts will now show the quick action menu when clicked instead of assuming the click was intended to add an include filter." },
|
{ "summary": "Downloads page now allows direct download of Wazuh agents for Linux, Mac, and Windows from the manager, and shows the version of Wazuh and Elastic installed with Security Onion."},
|
||||||
{ "summary": "Hunt interface quick action will now cast a wider net on field searches." },
|
{ "summary": "PCAP job interface now shows additional job filter criteria when expanding the job filter details."},
|
||||||
{ "summary": "Now explicitly preventing the use of a dollar sign ($) character in web user passwords during setup." },
|
{ "summary": "Upgraded authentication backend to Kratos 0.5.5."},
|
||||||
{ "summary": "Cortex container will now restart properly if the SO host was not gracefully shutdown." },
|
{ "summary": "SOC tables with the “Rows per Page” dropdown no longer show truncated page counts."},
|
||||||
{ "summary": "Added syslog plugin to the logstash container; this is not in-use by default but available for those users that choose to use it." },
|
{ "summary": "Several Hunt errors are now more descriptive, particularly those around malformed queries."},
|
||||||
{ "summary": "Winlogbeat download package is now available from the SOC Downloads interface." },
|
{ "summary": "SOC Error banner has been improved to avoid showing raw HTML syntax, making connection and server-side errors more readable."},
|
||||||
{ "summary": "Upgraded Kratos authentication system." },
|
{ "summary": "Hunt and Alerts interfaces will now allow pivoting to PCAP from a group of results if the grouped results contain a network.community_id field."},
|
||||||
{ "summary": "Added new Reset Defaults button to the SOC Profile Settings interface which allows users to reset all local browser SOC customizations back to their defaults. This includes things like default sort column, sort order, items per page, etc." },
|
{ "summary": "New “Correlate” quick action will pivot to a new Hunt search for all events that can be correlated by at least one of various event IDs."},
|
||||||
{ "summary": "Known Issues <ul><li>Following the Salt minion upgrade on remote nodes, the salt-minion service may not restart properly. If this occurs, you can ssh to the minion and run <code>sudo systemctl restart salt-minion</code>. If you do not want to connect to each node and manually restart the salt-minion, the new salt-minion watch process will restart it automatically after 1 hour.</li><li>During soup, you may see the following during the first highstate run, it can be ignored: <code>Rendering SLS '<some_sls_name_here>' failed: Jinja variable 'list object' has no attribute 'values'</code>. The second highstate will complete without that error.</li></ul>" }
|
{ "summary": "Fixed bug that caused some Hunt queries to not group correctly without a .keyword suffix. This has been corrected so that the .keyword suffix is no longer necessary on those groupby terms."},
|
||||||
|
{ "summary": "Fixed issue where PCAP interface loses formatting and color coding when opening multiple PCAP tabs."},
|
||||||
|
{ "summary": "Alerts interface now has a Refresh button that allows users to refresh the current alerts view without refreshing the entire SOC application."},
|
||||||
|
{ "summary": "Hunt and Alerts interfaces now have an auto-refresh dropdown that will automatically refresh the current view at the selected frequency."},
|
||||||
|
{ "summary": "The <code>so-elastalert-test</code> script has been refactored to work with Security Onion 2.3."},
|
||||||
|
{ "summary": "The included Logstash image now includes Kafka plugins."},
|
||||||
|
{ "summary": "Wazuh agent registration process has been improved to support slower hardware and networks."},
|
||||||
|
{ "summary": "An Elasticsearch ingest pipeline has been added for suricata.ftp_data."},
|
||||||
|
{ "summary": "Elasticsearch’s indices.query.bool.max_clause_count value has been increased to accommodate a slightly larger number of fields (1024 -> 1500) when querying using a wildcard."},
|
||||||
|
{ "summary": "On nodes being added to an existing grid, setup will compare the version currently being installed to the manager (>=2.3.20), pull the correct Security Onion version from the manager if there is a mismatch, and run that version."},
|
||||||
|
{ "summary": "Setup will gather any errors found during a failed install into <code>/root/errors.log</code> for easy copy/paste and debugging."},
|
||||||
|
{ "summary": "Selecting Suricata as the metadata engine no longer results in the install failing."},
|
||||||
|
{ "summary": "<code>so-rule-update</code> now accepts arguments to idstools. For example, <code>so-rule-update -f</code> will force idstools to pull rules, ignoring the default 15-minute pull limit."}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -167,6 +167,14 @@ append_so-suricata_so-status.conf:
|
|||||||
file.append:
|
file.append:
|
||||||
- name: /opt/so/conf/so-status/so-status.conf
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
- text: so-suricata
|
- text: so-suricata
|
||||||
|
- unless: grep -q so-suricata /opt/so/conf/so-status/so-status.conf
|
||||||
|
|
||||||
|
{% if grains.role == 'so-import' %}
|
||||||
|
disable_so-suricata_so-status.conf:
|
||||||
|
file.comment:
|
||||||
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
|
- regex: ^so-suricata$
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
surilogrotate:
|
surilogrotate:
|
||||||
file.managed:
|
file.managed:
|
||||||
|
|||||||
@@ -48,6 +48,7 @@ so-telegraf:
|
|||||||
- HOST_ETC=/host/etc
|
- HOST_ETC=/host/etc
|
||||||
- HOST_SYS=/host/sys
|
- HOST_SYS=/host/sys
|
||||||
- HOST_MOUNT_PREFIX=/host
|
- HOST_MOUNT_PREFIX=/host
|
||||||
|
- GODEBUG=x509ignoreCN=0
|
||||||
- network_mode: host
|
- network_mode: host
|
||||||
- binds:
|
- binds:
|
||||||
- /opt/so/log/telegraf:/var/log/telegraf:rw
|
- /opt/so/log/telegraf:/var/log/telegraf:rw
|
||||||
|
|||||||
@@ -200,6 +200,14 @@ append_so-zeek_so-status.conf:
|
|||||||
file.append:
|
file.append:
|
||||||
- name: /opt/so/conf/so-status/so-status.conf
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
- text: so-zeek
|
- text: so-zeek
|
||||||
|
- unless: grep -q so-zeek /opt/so/conf/so-status/so-status.conf
|
||||||
|
|
||||||
|
{% if grains.role == 'so-import' %}
|
||||||
|
disable_so-zeek_so-status.conf:
|
||||||
|
file.comment:
|
||||||
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
|
- regex: ^so-zeek$
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
{% else %}
|
{% else %}
|
||||||
|
|
||||||
|
|||||||
Binary file not shown.
|
Before Width: | Height: | Size: 188 KiB After Width: | Height: | Size: 245 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 138 KiB After Width: | Height: | Size: 168 KiB |
@@ -719,7 +719,7 @@ create_local_directories() {
|
|||||||
for d in $(find $PILLARSALTDIR/$i -type d); do
|
for d in $(find $PILLARSALTDIR/$i -type d); do
|
||||||
suffixdir=${d//$PILLARSALTDIR/}
|
suffixdir=${d//$PILLARSALTDIR/}
|
||||||
if [ ! -d "$local_salt_dir/$suffixdir" ]; then
|
if [ ! -d "$local_salt_dir/$suffixdir" ]; then
|
||||||
mkdir -v "$local_salt_dir$suffixdir" >> "$setup_log" 2>&1
|
mkdir -pv "$local_salt_dir$suffixdir" >> "$setup_log" 2>&1
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
chown -R socore:socore "$local_salt_dir/$i"
|
chown -R socore:socore "$local_salt_dir/$i"
|
||||||
@@ -1198,7 +1198,10 @@ manager_global() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$DOCKERNET" ]; then
|
if [ -z "$DOCKERNET" ]; then
|
||||||
DOCKERNET=172.17.0.0
|
DOCKERNET=172.17.0.0
|
||||||
|
DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
|
||||||
|
else
|
||||||
|
DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create a global file for global values
|
# Create a global file for global values
|
||||||
@@ -1276,9 +1279,9 @@ manager_global() {
|
|||||||
" discovery_nodes: 1"\
|
" discovery_nodes: 1"\
|
||||||
" hot_warm_enabled: False"\
|
" hot_warm_enabled: False"\
|
||||||
" cluster_routing_allocation_disk.threshold_enabled: true"\
|
" cluster_routing_allocation_disk.threshold_enabled: true"\
|
||||||
" cluster_routing_allocation_disk_watermark_low: '95%'"\
|
" cluster_routing_allocation_disk_watermark_low: '95%'"\
|
||||||
" cluster_routing_allocation_disk_watermark_high: '98%'"\
|
" cluster_routing_allocation_disk_watermark_high: '98%'"\
|
||||||
" cluster_routing_allocation_disk_watermark_flood_stage: '98%'"\
|
" cluster_routing_allocation_disk_watermark_flood_stage: '98%'"\
|
||||||
" index_settings:"\
|
" index_settings:"\
|
||||||
" so-beats:"\
|
" so-beats:"\
|
||||||
" shards: 1"\
|
" shards: 1"\
|
||||||
@@ -1346,6 +1349,9 @@ manager_global() {
|
|||||||
" playbook:"\
|
" playbook:"\
|
||||||
" rulesets:"\
|
" rulesets:"\
|
||||||
" - windows"\
|
" - windows"\
|
||||||
|
"docker:"\
|
||||||
|
" range: '$DOCKERNET/24'"\
|
||||||
|
" bip: '$DOCKERBIP'"\
|
||||||
"redis_settings:"\
|
"redis_settings:"\
|
||||||
" redis_maxmemory: 812" >> "$global_pillar"
|
" redis_maxmemory: 812" >> "$global_pillar"
|
||||||
|
|
||||||
@@ -1525,7 +1531,7 @@ reinstall_init() {
|
|||||||
|
|
||||||
if command -v docker &> /dev/null; then
|
if command -v docker &> /dev/null; then
|
||||||
# Stop and remove all so-* containers so files can be changed with more safety
|
# Stop and remove all so-* containers so files can be changed with more safety
|
||||||
if [ $(docker ps -a -q --filter "name=so-") -gt 0 ]; then
|
if [ $(docker ps -a -q --filter "name=so-" | wc -l) -gt 0 ]; then
|
||||||
docker stop $(docker ps -a -q --filter "name=so-")
|
docker stop $(docker ps -a -q --filter "name=so-")
|
||||||
docker rm -f $(docker ps -a -q --filter "name=so-")
|
docker rm -f $(docker ps -a -q --filter "name=so-")
|
||||||
fi
|
fi
|
||||||
@@ -1941,7 +1947,6 @@ sensor_pillar() {
|
|||||||
if [ "$HNSENSOR" != 'inherit' ]; then
|
if [ "$HNSENSOR" != 'inherit' ]; then
|
||||||
echo " hnsensor: $HNSENSOR" >> "$pillar_file"
|
echo " hnsensor: $HNSENSOR" >> "$pillar_file"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
set_default_log_size() {
|
set_default_log_size() {
|
||||||
|
|||||||
@@ -836,7 +836,7 @@ whiptail_manager_adv_escluster(){
|
|||||||
[ -n "$TESTING" ] && return
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
whiptail --title "Security Onion Setup" --yesno \
|
whiptail --title "Security Onion Setup" --yesno \
|
||||||
"Do you want to set up a traditional ES cluster?" 8 75
|
"Do you want to set up a traditional ES cluster for using replicas and/or Hot-Warm indices? Recommended only for those who have experience with ES clustering! " 12 75
|
||||||
|
|
||||||
local exitstatus=$?
|
local exitstatus=$?
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user