mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-07 01:32:47 +01:00
Merge branch 'dev' into feature/docker-prune-rework
This commit is contained in:
@@ -86,10 +86,82 @@ add_interface_bond0() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
check_container() {
|
||||||
|
docker ps | grep "$1:" > /dev/null 2>&1
|
||||||
|
return $?
|
||||||
|
}
|
||||||
|
|
||||||
|
check_password() {
|
||||||
|
local password=$1
|
||||||
|
echo "$password" | egrep -v "'|\"|\\$|\\\\" > /dev/null 2>&1
|
||||||
|
return $?
|
||||||
|
}
|
||||||
|
|
||||||
|
fail() {
|
||||||
|
msg=$1
|
||||||
|
echo "ERROR: $msg"
|
||||||
|
echo "Exiting."
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
get_random_value() {
|
||||||
|
length=${1:-20}
|
||||||
|
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
|
||||||
|
}
|
||||||
|
|
||||||
header() {
|
header() {
|
||||||
printf '%s\n' "" "$banner" " $*" "$banner"
|
printf '%s\n' "" "$banner" " $*" "$banner"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
init_monitor() {
|
||||||
|
MONITORNIC=$1
|
||||||
|
|
||||||
|
if [[ $MONITORNIC == "bond0" ]]; then
|
||||||
|
BIFACES=$(lookup_bond_interfaces)
|
||||||
|
else
|
||||||
|
BIFACES=$MONITORNIC
|
||||||
|
fi
|
||||||
|
|
||||||
|
for DEVICE_IFACE in $BIFACES; do
|
||||||
|
for i in rx tx sg tso ufo gso gro lro; do
|
||||||
|
ethtool -K "$DEVICE_IFACE" "$i" off;
|
||||||
|
done
|
||||||
|
ip link set dev "$DEVICE_IFACE" arp off multicast off allmulticast off promisc on
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
is_manager_node() {
|
||||||
|
# Check to see if this is a manager node
|
||||||
|
role=$(lookup_role)
|
||||||
|
is_single_node_grid && return 0
|
||||||
|
[ $role == 'manager' ] && return 0
|
||||||
|
[ $role == 'managersearch' ] && return 0
|
||||||
|
[ $role == 'helix' ] && return 0
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
is_sensor_node() {
|
||||||
|
# Check to see if this is a sensor (forward) node
|
||||||
|
role=$(lookup_role)
|
||||||
|
is_single_node_grid && return 0
|
||||||
|
[ $role == 'sensor' ] && return 0
|
||||||
|
[ $role == 'heavynode' ] && return 0
|
||||||
|
[ $role == 'helix' ] && return 0
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
is_single_node_grid() {
|
||||||
|
role=$(lookup_role)
|
||||||
|
[ $role == 'eval' ] && return 0
|
||||||
|
[ $role == 'standalone' ] && return 0
|
||||||
|
[ $role == 'import' ] && return 0
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
lookup_bond_interfaces() {
|
||||||
|
cat /proc/net/bonding/bond0 | grep "Slave Interface:" | sed -e "s/Slave Interface: //g"
|
||||||
|
}
|
||||||
|
|
||||||
lookup_salt_value() {
|
lookup_salt_value() {
|
||||||
key=$1
|
key=$1
|
||||||
group=$2
|
group=$2
|
||||||
@@ -129,15 +201,41 @@ lookup_role() {
|
|||||||
echo ${pieces[1]}
|
echo ${pieces[1]}
|
||||||
}
|
}
|
||||||
|
|
||||||
check_container() {
|
require_manager() {
|
||||||
docker ps | grep "$1:" > /dev/null 2>&1
|
if is_manager_node; then
|
||||||
return $?
|
echo "This is a manager, We can proceed."
|
||||||
|
else
|
||||||
|
echo "Please run this command on the manager; the manager controls the grid."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
check_password() {
|
retry() {
|
||||||
local password=$1
|
maxAttempts=$1
|
||||||
echo "$password" | egrep -v "'|\"|\\$|\\\\" > /dev/null 2>&1
|
sleepDelay=$2
|
||||||
return $?
|
cmd=$3
|
||||||
|
expectedOutput=$4
|
||||||
|
attempt=0
|
||||||
|
while [[ $attempt -lt $maxAttempts ]]; do
|
||||||
|
attempt=$((attempt+1))
|
||||||
|
echo "Executing command with retry support: $cmd"
|
||||||
|
output=$(eval "$cmd")
|
||||||
|
exitcode=$?
|
||||||
|
echo "Results: $output ($exitcode)"
|
||||||
|
if [ -n "$expectedOutput" ]; then
|
||||||
|
if [[ "$output" =~ "$expectedOutput" ]]; then
|
||||||
|
return $exitCode
|
||||||
|
else
|
||||||
|
echo "Expected '$expectedOutput' but got '$output'"
|
||||||
|
fi
|
||||||
|
elif [[ $exitcode -eq 0 ]]; then
|
||||||
|
return $exitCode
|
||||||
|
fi
|
||||||
|
echo "Command failed with exit code $exitcode; will retry in $sleepDelay seconds ($attempt / $maxAttempts)..."
|
||||||
|
sleep $sleepDelay
|
||||||
|
done
|
||||||
|
echo "Command continues to fail; giving up."
|
||||||
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
set_os() {
|
set_os() {
|
||||||
@@ -171,83 +269,6 @@ set_version() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
require_manager() {
|
|
||||||
if is_manager_node; then
|
|
||||||
echo "This is a manager, We can proceed."
|
|
||||||
else
|
|
||||||
echo "Please run this command on the manager; the manager controls the grid."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
is_manager_node() {
|
|
||||||
# Check to see if this is a manager node
|
|
||||||
role=$(lookup_role)
|
|
||||||
is_single_node_grid && return 0
|
|
||||||
[ $role == 'manager' ] && return 0
|
|
||||||
[ $role == 'managersearch' ] && return 0
|
|
||||||
[ $role == 'helix' ] && return 0
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
is_sensor_node() {
|
|
||||||
# Check to see if this is a sensor (forward) node
|
|
||||||
role=$(lookup_role)
|
|
||||||
is_single_node_grid && return 0
|
|
||||||
[ $role == 'sensor' ] && return 0
|
|
||||||
[ $role == 'heavynode' ] && return 0
|
|
||||||
[ $role == 'helix' ] && return 0
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
is_single_node_grid() {
|
|
||||||
role=$(lookup_role)
|
|
||||||
[ $role == 'eval' ] && return 0
|
|
||||||
[ $role == 'standalone' ] && return 0
|
|
||||||
[ $role == 'import' ] && return 0
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
fail() {
|
|
||||||
msg=$1
|
|
||||||
echo "ERROR: $msg"
|
|
||||||
echo "Exiting."
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
get_random_value() {
|
|
||||||
length=${1:-20}
|
|
||||||
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
|
|
||||||
}
|
|
||||||
|
|
||||||
retry() {
|
|
||||||
maxAttempts=$1
|
|
||||||
sleepDelay=$2
|
|
||||||
cmd=$3
|
|
||||||
expectedOutput=$4
|
|
||||||
attempt=0
|
|
||||||
while [[ $attempt -lt $maxAttempts ]]; do
|
|
||||||
attempt=$((attempt+1))
|
|
||||||
echo "Executing command with retry support: $cmd"
|
|
||||||
output=$(eval "$cmd")
|
|
||||||
exitcode=$?
|
|
||||||
echo "Results: $output ($exitcode)"
|
|
||||||
if [ -n "$expectedOutput" ]; then
|
|
||||||
if [[ "$output" =~ "$expectedOutput" ]]; then
|
|
||||||
return $exitCode
|
|
||||||
else
|
|
||||||
echo "Expected '$expectedOutput' but got '$output'"
|
|
||||||
fi
|
|
||||||
elif [[ $exitcode -eq 0 ]]; then
|
|
||||||
return $exitCode
|
|
||||||
fi
|
|
||||||
echo "Command failed with exit code $exitcode; will retry in $sleepDelay seconds ($attempt / $maxAttempts)..."
|
|
||||||
sleep $sleepDelay
|
|
||||||
done
|
|
||||||
echo "Command continues to fail; giving up."
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
valid_cidr() {
|
valid_cidr() {
|
||||||
# Verify there is a backslash in the string
|
# Verify there is a backslash in the string
|
||||||
echo "$1" | grep -qP "^[^/]+/[^/]+$" || return 1
|
echo "$1" | grep -qP "^[^/]+/[^/]+$" || return 1
|
||||||
@@ -312,7 +333,7 @@ valid_ip4() {
|
|||||||
valid_int() {
|
valid_int() {
|
||||||
local num=$1
|
local num=$1
|
||||||
local min=${2:-1}
|
local min=${2:-1}
|
||||||
local max=${3:-1000}
|
local max=${3:-1000000000}
|
||||||
|
|
||||||
[[ $num =~ ^[0-9]*$ ]] && [[ $num -ge $min ]] && [[ $num -le $max ]] && return 0 || return 1
|
[[ $num =~ ^[0-9]*$ ]] && [[ $num -ge $min ]] && [[ $num -le $max ]] && return 0 || return 1
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,4 +17,11 @@
|
|||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
# Regenerate ElastAlert & update Plays
|
||||||
docker exec so-soctopus python3 playbook_play-update.py
|
docker exec so-soctopus python3 playbook_play-update.py
|
||||||
|
|
||||||
|
# Delete current Elastalert Rules
|
||||||
|
rm /opt/so/rules/elastalert/playbook/*.yaml
|
||||||
|
|
||||||
|
# Regenerate Elastalert Rules
|
||||||
|
so-playbook-sync
|
||||||
@@ -24,11 +24,11 @@ show_stats() {
|
|||||||
echo
|
echo
|
||||||
echo "Average throughput:"
|
echo "Average throughput:"
|
||||||
echo
|
echo
|
||||||
docker exec -it so-zeek /opt/zeek/bin/zeekctl capstats
|
docker exec so-zeek env -i PATH=/bin:/usr/bin:/sbin:/usr/sbin:/opt/bin:/usr/local/bin:/usr/local/sbin runuser -l zeek -c '/opt/zeek/bin/zeekctl capstats'
|
||||||
echo
|
echo
|
||||||
echo "Average packet loss:"
|
echo "Average packet loss:"
|
||||||
echo
|
echo
|
||||||
docker exec -it so-zeek /opt/zeek/bin/zeekctl netstats
|
docker exec so-zeek env -i PATH=/bin:/usr/bin:/sbin:/usr/sbin:/opt/bin:/usr/local/bin:/usr/local/sbin runuser -l zeek -c '/opt/zeek/bin/zeekctl netstats'
|
||||||
echo
|
echo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -121,17 +121,13 @@ check_sudoers() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
check_log_size_limit() {
|
check_log_size_limit() {
|
||||||
local wait_for_enter=false
|
|
||||||
|
|
||||||
local num_minion_pillars
|
local num_minion_pillars
|
||||||
num_minion_pillars=$(find /opt/so/saltstack/local/pillar/minions/ -type f | wc -l)
|
num_minion_pillars=$(find /opt/so/saltstack/local/pillar/minions/ -type f | wc -l)
|
||||||
|
|
||||||
if [[ $num_minion_pillars -gt 1 ]]; then
|
if [[ $num_minion_pillars -gt 1 ]]; then
|
||||||
if find /opt/so/saltstack/local/pillar/minions/ -type f | grep -q "_heavynode"; then
|
if find /opt/so/saltstack/local/pillar/minions/ -type f | grep -q "_heavynode"; then
|
||||||
wait_for_enter=true
|
lsl_msg='distributed'
|
||||||
echo "[INFO] The value of log_size_limit in any heavy node minion pillars may be incorrect."
|
|
||||||
echo " -> We recommend checking and adjusting the values as necessary."
|
|
||||||
echo " -> Minion pillar directory: /opt/so/saltstack/local/pillar/minions/"
|
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
local minion_id
|
local minion_id
|
||||||
@@ -172,16 +168,8 @@ check_log_size_limit() {
|
|||||||
new_limit=$( echo "$disk_size_gb" "$percent" | awk '{printf("%.0f", $1 * ($2/100))}')
|
new_limit=$( echo "$disk_size_gb" "$percent" | awk '{printf("%.0f", $1 * ($2/100))}')
|
||||||
|
|
||||||
if [[ $current_limit != "$new_limit" ]]; then
|
if [[ $current_limit != "$new_limit" ]]; then
|
||||||
wait_for_enter=true
|
lsl_msg='single-node'
|
||||||
echo "[WARNING] The value of log_size_limit (${current_limit}) does not match the recommended value of ${new_limit}."
|
lsl_details=( "$current_limit" "$new_limit" "$minion_id" )
|
||||||
echo " -> We recommend checking and adjusting the value as necessary."
|
|
||||||
echo " -> File: /opt/so/saltstack/local/pillar/minions/${minion_id}.sls"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $wait_for_enter == true ]]; then
|
|
||||||
echo ""
|
|
||||||
read -n 1 -s -r -p "Press any key to continue..."
|
|
||||||
echo "" # Since read doesn't print a newline, print one for it
|
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@@ -742,7 +730,21 @@ fi
|
|||||||
|
|
||||||
check_sudoers
|
check_sudoers
|
||||||
|
|
||||||
|
if [[ -n $lsl_msg ]]; then
|
||||||
|
case $lsl_msg in
|
||||||
|
'distributed')
|
||||||
|
echo "[INFO] The value of log_size_limit in any heavy node minion pillars may be incorrect."
|
||||||
|
echo " -> We recommend checking and adjusting the values as necessary."
|
||||||
|
echo " -> Minion pillar directory: /opt/so/saltstack/local/pillar/minions/"
|
||||||
|
;;
|
||||||
|
'single-node')
|
||||||
|
# We can assume the lsl_details array has been set if lsl_msg has this value
|
||||||
|
echo "[WARNING] The value of log_size_limit (${lsl_details[0]}) does not match the recommended value of ${lsl_details[1]}."
|
||||||
|
echo " -> We recommend checking and adjusting the value as necessary."
|
||||||
|
echo " -> File: /opt/so/saltstack/local/pillar/minions/${lsl_details[2]}.sls"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,4 +2,4 @@
|
|||||||
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
|
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
|
||||||
salt:
|
salt:
|
||||||
master:
|
master:
|
||||||
version: 3002.2
|
version: 3002.5
|
||||||
@@ -2,5 +2,5 @@
|
|||||||
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
|
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
|
||||||
salt:
|
salt:
|
||||||
minion:
|
minion:
|
||||||
version: 3002.2
|
version: 3002.5
|
||||||
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
|
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
|
||||||
@@ -1,54 +1,48 @@
|
|||||||
{
|
{
|
||||||
"title": "Security Onion 2.3.20 is here!",
|
"title": "Security Onion 2.3.30 is here!",
|
||||||
"changes": [
|
"changes": [
|
||||||
{ "summary": "soup has been refactored. You will need to run it a few times to get all the changes properly. We are working on making this even easier for future releases."},
|
{ "summary": "Zeek is now at version 3.0.13." },
|
||||||
{ "summary": "soup now has awareness of Elastic Features and now downloads the appropriate Docker containers."},
|
{ "summary": "CyberChef is now at version 9.27.2." },
|
||||||
{ "summary": "The Sensors interface has been renamed to Grid. This interface now includes all Security Onion nodes."},
|
{ "summary": "Elastic components are now at version 7.10.2. This is the last version that uses the Apache license." },
|
||||||
{ "summary": "Grid interface now includes the status of the node. The status currently shows either Online (blue) or Offline (orange). If a node does not check-in on time then it will be marked as Offline."},
|
{ "summary": "Suricata is now at version 6.0.1." },
|
||||||
{ "summary": "Grid interface now includes the IP and Role of each node in the grid."},
|
{ "summary": "Suricata metadata parsing is now vastly improved." },
|
||||||
{ "summary": "Grid interface includes a new Filter search input to filter the visible list of grid nodes to a desired subset. As an example, typing in “sensor” will hide all nodes except those that behave as a sensor."},
|
{ "summary": "If you choose Suricata for metadata parsing, it will now extract files from the network and send them to Strelka. You can add additional mime types <a href='https://github.com/Security-Onion-Solutions/securityonion/blob/dev/salt/idstools/sorules/extraction.rules'>here</a>." },
|
||||||
{ "summary": "The Grid description field can now be customized via the local minion pillar file for each node."},
|
{ "summary": "It is now possible to filter Suricata events from being written to the logs. This is a new Suricata 6 feature. We have included some examples <a href='https://github.com/Security-Onion-Solutions/securityonion/blob/dev/salt/idstools/sorules/filters.rules'>here</a>." },
|
||||||
{ "summary": "SOC will now draw attention to an unhealthy situation within the grid or with the connection between the user’s browser and the manager node. For example, when the Grid has at least one Offline node the SOC interface will show an exclamation mark in front of the browser tab’s title and an exclamation mark next to the Grid menu option in SOC. Additionally, the favicon will show an orange marker in the top-right corner (dynamic favicons not supported in Safari). Additionally, if the user’s web browser is unable to communicate with the manager the unhealth indicators appear along with a message at the top of SOC that states there is a connection problem."},
|
{ "summary": "The Kratos docker container will now perform DNS lookups locally before reaching out to the network DNS provider." },
|
||||||
{ "summary": "Docker has been upgraded to the latest version."},
|
{ "summary": "Network configuration is now more compatible with manually configured OpenVPN or Wireguard VPN interfaces." },
|
||||||
{ "summary": "Docker should be more reliable now as Salt is now managing daemon.json."},
|
{ "summary": "<code>so-sensor-clean</code> will no longer spawn multiple instances." },
|
||||||
{ "summary": "You can now install Elastic in a traditional cluster. When setting up the manager select Advanced and follow the prompts. Replicas are controlled in global.sls."},
|
{ "summary": "Suricata eve.json logs will now be cleaned up after 7 days. This can be changed via the pillar setting." },
|
||||||
{ "summary": "You can now use Hot and Warm routing with Elastic in a traditional cluster. You can change the box.type in the minion’s sls file. You will need to create a curator job to re-tag the indexes based on your criteria."},
|
{ "summary": "The automated backup script on the manager now backs up all keys along with the salt configurations. Backup retention is now set to 7 days." },
|
||||||
{ "summary": "Telegraf has been updated to version 1.16.3."},
|
{ "summary": "Strelka logs are now being rotated properly." },
|
||||||
{ "summary": "Grafana has been updated to 7.3.4 to resolve some XSS vulnerabilities."},
|
{ "summary": "Elastalert can now be customized via a pillar." },
|
||||||
{ "summary": "Grafana graphs have been changed to graphs vs guages so alerting can be set up."},
|
{ "summary": "Introduced new script <code>so-monitor-add</code> that allows the user to easily add interfaces to the bond for monitoring." },
|
||||||
{ "summary": "Grafana is now completely pillarized, allowing users to customize alerts and making it customizable for email, Slack, etc. See the docs <a href=\"https://securityonion.net/docs/grafana\">here</a>."},
|
{ "summary": "Setup now validates all user input fields to give up-front feedback if an entered value is invalid." },
|
||||||
{ "summary": "Yara rules now should properly install on non-airgap installs. Previously, users had to wait for an automated job to place them in the correct location."},
|
{ "summary": "There have been several changes to improve install reliability. Many install steps have had their validation processes reworked to ensure that required tasks have been completed before moving on to the next step of the install." },
|
||||||
{ "summary": "Strelka backend will not stop itself any more. Previously, its behavior was to shut itself down after fifteen minutes and wait for Salt to restart it to look for work before shutting down again."},
|
{ "summary": "Users are now warned if they try to set <i>securityonion</i> as their hostname." },
|
||||||
{ "summary": "Strelka daily rule updates are now logged to <code>/nsm/strelka/log/yara-update.log</code>"},
|
{ "summary": "The ISO should now identify xvda and nvme devices as install targets." },
|
||||||
{ "summary": "Several changes to the setup script to improve install reliability."},
|
{ "summary": "At the end of the first stage of the ISO setup, the ISO device should properly unmount and eject." },
|
||||||
{ "summary": "Airgap now supports the import node type."},
|
{ "summary": "The text selection of choosing Suricata vs Zeek for metadata is now more descriptive." },
|
||||||
{ "summary": "Custom Zeek file extraction values in the pillar now work properly."},
|
{ "summary": "The logic for properly setting the <code>LOG_SIZE_LIMIT</code> variable has been improved." },
|
||||||
{ "summary": "TheHive has been updated to support Elastic 7."},
|
{ "summary": "When installing on Ubuntu, Setup will now wait for cloud init to complete before trying to start the install of packages." },
|
||||||
{ "summary": "Cortex image now includes whois package to correct an issue with the CERTatPassiveDNS analyzer."},
|
{ "summary": "The firewall state runs considerably faster now." },
|
||||||
{ "summary": "Hunt and Alert quick action menu has been refactored into submenus."},
|
{ "summary": "ICMP timestamps are now disabled." },
|
||||||
{ "summary": "New clipboard quick actions now allow for copying fields or entire events to the clipboard."},
|
{ "summary": "Copyright dates on all Security Onion specific files have been updated." },
|
||||||
{ "summary": "PCAP Add Job form now retains previous job details for quickly adding additional jobs. A new Clear button now exists at the bottom of this form to clear out these fields and forget the previous job details."},
|
{ "summary": "<code>so-tcpreplay</code> (and indirectly <code>so-test</code>) should now work properly." },
|
||||||
{ "summary": "PCAP Add Job form now allows users to perform arbitrary PCAP lookups of imported PCAP data (data imported via the <code>so-import-pcap</code> script)."},
|
{ "summary": "The Zeek packet loss script is now more accurate." },
|
||||||
{ "summary": "Downloads page now allows direct download of Wazuh agents for Linux, Mac, and Windows from the manager, and shows the version of Wazuh and Elastic installed with Security Onion."},
|
{ "summary": "Grafana now includes an estimated EPS graph for events ingested on the manager." },
|
||||||
{ "summary": "PCAP job interface now shows additional job filter criteria when expanding the job filter details."},
|
{ "summary": "Updated Elastalert to release 0.2.4-alt2 based on the <a href='https://github.com/jertel/elastalert'>jertel/elastalert</a> alt branch." },
|
||||||
{ "summary": "Upgraded authentication backend to Kratos 0.5.5."},
|
{ "summary": "Pivots from Alerts/Hunts to action links will properly URI encode values." },
|
||||||
{ "summary": "SOC tables with the “Rows per Page” dropdown no longer show truncated page counts."},
|
{ "summary": "Hunt timeline graph will properly scale the data point interval based on the search date range." },
|
||||||
{ "summary": "Several Hunt errors are now more descriptive, particularly those around malformed queries."},
|
{ "summary": "Grid interface will properly show <i>Search</i> as the node type instead of <i>so-node</i>." },
|
||||||
{ "summary": "SOC Error banner has been improved to avoid showing raw HTML syntax, making connection and server-side errors more readable."},
|
{ "summary": "Import node now supports airgap environments." },
|
||||||
{ "summary": "Hunt and Alerts interfaces will now allow pivoting to PCAP from a group of results if the grouped results contain a network.community_id field."},
|
{ "summary": "The so-mysql container will now show <i>healthy</i> when viewing the docker ps output." },
|
||||||
{ "summary": "New “Correlate” quick action will pivot to a new Hunt search for all events that can be correlated by at least one of various event IDs."},
|
{ "summary": "The Soctopus configuration now uses private IPs instead of public IPs, allowing network communications to succeed within the grid." },
|
||||||
{ "summary": "Fixed bug that caused some Hunt queries to not group correctly without a .keyword suffix. This has been corrected so that the .keyword suffix is no longer necessary on those groupby terms."},
|
{ "summary": "The Correlate action in Hunt now groups the OR filters together to ensure subsequent user-added filters are correctly ANDed to the entire OR group." },
|
||||||
{ "summary": "Fixed issue where PCAP interface loses formatting and color coding when opening multiple PCAP tabs."},
|
{ "summary": "Add support to <code>so-firewall</code> script to display existing port groups and host groups." },
|
||||||
{ "summary": "Alerts interface now has a Refresh button that allows users to refresh the current alerts view without refreshing the entire SOC application."},
|
{ "summary": "TheHive initialization during Security Onion setup will now properly check for a running ES instance and will retry connectivity checks to TheHive before proceeding." },
|
||||||
{ "summary": "Hunt and Alerts interfaces now have an auto-refresh dropdown that will automatically refresh the current view at the selected frequency."},
|
{ "summary": "Changes to the <i>.security</i> analyzer yields more accurate query results when using Playbook." },
|
||||||
{ "summary": "The <code>so-elastalert-test</code> script has been refactored to work with Security Onion 2.3."},
|
{ "summary": "Several Hunt queries have been updated." },
|
||||||
{ "summary": "The included Logstash image now includes Kafka plugins."},
|
{ "summary": "The pfSense firewall log parser has been updated to improve compatibility." },
|
||||||
{ "summary": "Wazuh agent registration process has been improved to support slower hardware and networks."},
|
{ "summary": "Kibana dashboard hyperlinks have been updated for faster navigation." }
|
||||||
{ "summary": "An Elasticsearch ingest pipeline has been added for suricata.ftp_data."},
|
|
||||||
{ "summary": "Elasticsearch’s indices.query.bool.max_clause_count value has been increased to accommodate a slightly larger number of fields (1024 -> 1500) when querying using a wildcard."},
|
|
||||||
{ "summary": "On nodes being added to an existing grid, setup will compare the version currently being installed to the manager (>=2.3.20), pull the correct Security Onion version from the manager if there is a mismatch, and run that version."},
|
|
||||||
{ "summary": "Setup will gather any errors found during a failed install into <code>/root/errors.log</code> for easy copy/paste and debugging."},
|
|
||||||
{ "summary": "Selecting Suricata as the metadata engine no longer results in the install failing."},
|
|
||||||
{ "summary": "<code>so-rule-update</code> now accepts arguments to idstools. For example, <code>so-rule-update -f</code> will force idstools to pull rules, ignoring the default 15-minute pull limit."}
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
/usr/bin/docker exec so-zeek /opt/zeek/bin/zeekctl netstats | awk '{print $(NF-2),$(NF-1),$NF}' | awk -F '[ =]' '{RCVD += $2;DRP += $4;TTL += $6} END { print "rcvd: " RCVD, "dropped: " DRP, "total: " TTL}' >> /nsm/zeek/logs/packetloss.log 2>&1
|
/usr/bin/docker exec so-zeek env -i PATH=/bin:/usr/bin:/sbin:/usr/sbin:/opt/bin:/usr/local/bin:/usr/local/sbin runuser -l zeek -c '/opt/zeek/bin/zeekctl netstats' | awk '{print $(NF-2),$(NF-1),$NF}' | awk -F '[ =]' '{RCVD += $2;DRP += $4;TTL += $6} END { print "rcvd: " RCVD, "dropped: " DRP, "total: " TTL}' >> /nsm/zeek/logs/packetloss.log 2>&1
|
||||||
|
|||||||
78
setup/automation/distributed-net-ubuntu-suricata-search
Normal file
78
setup/automation/distributed-net-ubuntu-suricata-search
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
TESTING=true
|
||||||
|
|
||||||
|
address_type=DHCP
|
||||||
|
ADMINUSER=onionuser
|
||||||
|
ADMINPASS1=onionuser
|
||||||
|
ADMINPASS2=onionuser
|
||||||
|
# ALLOW_CIDR=0.0.0.0/0
|
||||||
|
# ALLOW_ROLE=a
|
||||||
|
# BASICZEEK=7
|
||||||
|
# BASICSURI=7
|
||||||
|
# BLOGS=
|
||||||
|
# BNICS=eth1
|
||||||
|
# ZEEKVERSION=ZEEK
|
||||||
|
# CURCLOSEDAYS=
|
||||||
|
# EVALADVANCED=BASIC
|
||||||
|
# GRAFANA=1
|
||||||
|
# HELIXAPIKEY=
|
||||||
|
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
|
||||||
|
HNSENSOR=inherit
|
||||||
|
HOSTNAME=distributed-search
|
||||||
|
install_type=SEARCHNODE
|
||||||
|
# LSINPUTBATCHCOUNT=
|
||||||
|
# LSINPUTTHREADS=
|
||||||
|
# LSPIPELINEBATCH=
|
||||||
|
# LSPIPELINEWORKERS=
|
||||||
|
# MANAGERADV=BASIC
|
||||||
|
MANAGERUPDATES=1
|
||||||
|
# MDNS=
|
||||||
|
# MGATEWAY=
|
||||||
|
# MIP=
|
||||||
|
# MMASK=
|
||||||
|
MNIC=ens18
|
||||||
|
# MSEARCH=
|
||||||
|
MSRV=distributed-manager
|
||||||
|
MSRVIP=10.66.166.66
|
||||||
|
# MTU=
|
||||||
|
# NIDS=Suricata
|
||||||
|
# NODE_ES_HEAP_SIZE=
|
||||||
|
# NODE_LS_HEAP_SIZE=
|
||||||
|
NODESETUP=NODEBASIC
|
||||||
|
NSMSETUP=BASIC
|
||||||
|
NODEUPDATES=MANAGER
|
||||||
|
# OINKCODE=
|
||||||
|
# OSQUERY=1
|
||||||
|
# PATCHSCHEDULEDAYS=
|
||||||
|
# PATCHSCHEDULEHOURS=
|
||||||
|
PATCHSCHEDULENAME=auto
|
||||||
|
# PLAYBOOK=1
|
||||||
|
# REDIRECTHOST=
|
||||||
|
# REDIRECTINFO=IP
|
||||||
|
# RULESETUP=ETOPEN
|
||||||
|
# SHARDCOUNT=
|
||||||
|
# SKIP_REBOOT=
|
||||||
|
SOREMOTEPASS1=onionuser
|
||||||
|
SOREMOTEPASS2=onionuser
|
||||||
|
# STRELKA=1
|
||||||
|
# THEHIVE=1
|
||||||
|
# WAZUH=1
|
||||||
|
# WEBUSER=onionuser@somewhere.invalid
|
||||||
|
# WEBPASSWD1=0n10nus3r
|
||||||
|
# WEBPASSWD2=0n10nus3r
|
||||||
78
setup/automation/distributed-net-ubuntu-suricata-sensor
Normal file
78
setup/automation/distributed-net-ubuntu-suricata-sensor
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
TESTING=true
|
||||||
|
|
||||||
|
address_type=DHCP
|
||||||
|
ADMINUSER=onionuser
|
||||||
|
ADMINPASS1=onionuser
|
||||||
|
ADMINPASS2=onionuser
|
||||||
|
# ALLOW_CIDR=0.0.0.0/0
|
||||||
|
# ALLOW_ROLE=a
|
||||||
|
BASICZEEK=2
|
||||||
|
BASICSURI=2
|
||||||
|
# BLOGS=
|
||||||
|
BNICS=ens19
|
||||||
|
ZEEKVERSION=ZEEK
|
||||||
|
# CURCLOSEDAYS=
|
||||||
|
# EVALADVANCED=BASIC
|
||||||
|
# GRAFANA=1
|
||||||
|
# HELIXAPIKEY=
|
||||||
|
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
|
||||||
|
HNSENSOR=inherit
|
||||||
|
HOSTNAME=distributed-sensor
|
||||||
|
install_type=SENSOR
|
||||||
|
# LSINPUTBATCHCOUNT=
|
||||||
|
# LSINPUTTHREADS=
|
||||||
|
# LSPIPELINEBATCH=
|
||||||
|
# LSPIPELINEWORKERS=
|
||||||
|
# MANAGERADV=BASIC
|
||||||
|
MANAGERUPDATES=1
|
||||||
|
# MDNS=
|
||||||
|
# MGATEWAY=
|
||||||
|
# MIP=
|
||||||
|
# MMASK=
|
||||||
|
MNIC=ens18
|
||||||
|
# MSEARCH=
|
||||||
|
MSRV=distributed-manager
|
||||||
|
MSRVIP=10.66.166.66
|
||||||
|
# MTU=
|
||||||
|
# NIDS=Suricata
|
||||||
|
# NODE_ES_HEAP_SIZE=
|
||||||
|
# NODE_LS_HEAP_SIZE=
|
||||||
|
# NODESETUP=NODEBASIC
|
||||||
|
NSMSETUP=BASIC
|
||||||
|
NODEUPDATES=MANAGER
|
||||||
|
# OINKCODE=
|
||||||
|
# OSQUERY=1
|
||||||
|
# PATCHSCHEDULEDAYS=
|
||||||
|
# PATCHSCHEDULEHOURS=
|
||||||
|
PATCHSCHEDULENAME=auto
|
||||||
|
# PLAYBOOK=1
|
||||||
|
# REDIRECTHOST=
|
||||||
|
# REDIRECTINFO=IP
|
||||||
|
# RULESETUP=ETOPEN
|
||||||
|
# SHARDCOUNT=
|
||||||
|
# SKIP_REBOOT=
|
||||||
|
SOREMOTEPASS1=onionuser
|
||||||
|
SOREMOTEPASS2=onionuser
|
||||||
|
# STRELKA=1
|
||||||
|
# THEHIVE=1
|
||||||
|
# WAZUH=1
|
||||||
|
# WEBUSER=onionuser@somewhere.invalid
|
||||||
|
# WEBPASSWD1=0n10nus3r
|
||||||
|
# WEBPASSWD2=0n10nus3r
|
||||||
@@ -1,8 +1,20 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
if [[ "$DEVICE_IFACE" != "$MNIC" && "$DEVICE_IFACE" != *"docker"* && "$DEVICE_IFACE" != *"tun"* && "DEVICE_IFACE" != *"wg"* ]]; then
|
. /usr/sbin/so-common
|
||||||
for i in rx tx sg tso ufo gso gro lro; do
|
|
||||||
ethtool -K "$DEVICE_IFACE" "$i" off;
|
init_monitor $MNIC
|
||||||
done
|
|
||||||
ip link set dev "$DEVICE_IFACE" arp off multicast off allmulticast off promisc on
|
|
||||||
fi
|
|
||||||
|
|||||||
@@ -160,10 +160,10 @@ check_network_manager_conf() {
|
|||||||
} >> "$setup_log" 2>&1
|
} >> "$setup_log" 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if test -f "$nmconf"; then
|
#if test -f "$nmconf"; then
|
||||||
sed -i 's/managed=false/managed=true/g' "$nmconf" >> "$setup_log" 2>&1
|
# sed -i 's/managed=false/managed=true/g' "$nmconf" >> "$setup_log" 2>&1
|
||||||
systemctl restart NetworkManager >> "$setup_log" 2>&1
|
# systemctl restart NetworkManager >> "$setup_log" 2>&1
|
||||||
fi
|
# fi
|
||||||
|
|
||||||
if [[ ! -d "$preupdir" ]]; then
|
if [[ ! -d "$preupdir" ]]; then
|
||||||
mkdir "$preupdir" >> "$setup_log" 2>&1
|
mkdir "$preupdir" >> "$setup_log" 2>&1
|
||||||
@@ -271,7 +271,7 @@ collect_adminuser_inputs() {
|
|||||||
collect_cur_close_days() {
|
collect_cur_close_days() {
|
||||||
whiptail_cur_close_days "$CURCLOSEDAYS"
|
whiptail_cur_close_days "$CURCLOSEDAYS"
|
||||||
|
|
||||||
while ! valid_int "$CURCLOSEDAYS" "1"; do
|
while ! valid_int "$CURCLOSEDAYS"; do
|
||||||
whiptail_invalid_input
|
whiptail_invalid_input
|
||||||
whiptail_cur_close_days "$CURCLOSEDAYS"
|
whiptail_cur_close_days "$CURCLOSEDAYS"
|
||||||
done
|
done
|
||||||
@@ -322,7 +322,7 @@ collect_es_cluster_name() {
|
|||||||
collect_es_space_limit() {
|
collect_es_space_limit() {
|
||||||
whiptail_log_size_limit "$log_size_limit"
|
whiptail_log_size_limit "$log_size_limit"
|
||||||
|
|
||||||
while ! valid_int "$log_size_limit" "1"; do # Upper/lower bounds?
|
while ! valid_int "$log_size_limit"; do # Upper/lower bounds?
|
||||||
whiptail_invalid_input
|
whiptail_invalid_input
|
||||||
whiptail_log_size_limit "$log_size_limit"
|
whiptail_log_size_limit "$log_size_limit"
|
||||||
done
|
done
|
||||||
@@ -331,7 +331,7 @@ collect_es_space_limit() {
|
|||||||
collect_fleet_custom_hostname_inputs() {
|
collect_fleet_custom_hostname_inputs() {
|
||||||
whiptail_fleet_custom_hostname
|
whiptail_fleet_custom_hostname
|
||||||
|
|
||||||
while ! valid_fqdn "$FLEETCUSTOMHOSTNAME" || [[ $FLEETCUSTOMHOSTNAME != "" ]]; do
|
while [[ -n $FLEETCUSTOMHOSTNAME ]] && ! valid_fqdn "$FLEETCUSTOMHOSTNAME"; do
|
||||||
whiptail_invalid_input
|
whiptail_invalid_input
|
||||||
whiptail_fleet_custom_hostname "$FLEETCUSTOMHOSTNAME"
|
whiptail_fleet_custom_hostname "$FLEETCUSTOMHOSTNAME"
|
||||||
done
|
done
|
||||||
@@ -368,7 +368,7 @@ collect_gateway() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
collect_helix_key() {
|
collect_helix_key() {
|
||||||
whiptail_helix_apikey # validate?
|
whiptail_helix_apikey
|
||||||
}
|
}
|
||||||
|
|
||||||
collect_homenet_mngr() {
|
collect_homenet_mngr() {
|
||||||
@@ -398,7 +398,6 @@ collect_hostname() {
|
|||||||
|
|
||||||
whiptail_set_hostname "$HOSTNAME"
|
whiptail_set_hostname "$HOSTNAME"
|
||||||
|
|
||||||
|
|
||||||
if [[ $HOSTNAME == 'securityonion' ]]; then # Will only check HOSTNAME=securityonion once
|
if [[ $HOSTNAME == 'securityonion' ]]; then # Will only check HOSTNAME=securityonion once
|
||||||
if ! (whiptail_avoid_default_hostname); then
|
if ! (whiptail_avoid_default_hostname); then
|
||||||
whiptail_set_hostname
|
whiptail_set_hostname
|
||||||
@@ -446,7 +445,7 @@ collect_mngr_hostname() {
|
|||||||
collect_mtu() {
|
collect_mtu() {
|
||||||
whiptail_bond_nics_mtu "1500"
|
whiptail_bond_nics_mtu "1500"
|
||||||
|
|
||||||
while ! valid_int "$MTU" "68"; do
|
while ! valid_int "$MTU" "68" "10000"; do
|
||||||
whiptail_invalid_input
|
whiptail_invalid_input
|
||||||
whiptail_bond_nics_mtu "$MTU"
|
whiptail_bond_nics_mtu "$MTU"
|
||||||
done
|
done
|
||||||
@@ -454,20 +453,10 @@ collect_mtu() {
|
|||||||
|
|
||||||
collect_node_es_heap() {
|
collect_node_es_heap() {
|
||||||
whiptail_node_es_heap "$ES_HEAP_SIZE"
|
whiptail_node_es_heap "$ES_HEAP_SIZE"
|
||||||
|
|
||||||
while ! valid_int "$NODE_ES_HEAP_SIZE"; do
|
|
||||||
whiptail_invalid_input
|
|
||||||
whiptail_node_es_heap "$NODE_ES_HEAP_SIZE"
|
|
||||||
done
|
|
||||||
}
|
}
|
||||||
|
|
||||||
collect_node_ls_heap() {
|
collect_node_ls_heap() {
|
||||||
whiptail_node_ls_heap "$LS_HEAP_SIZE"
|
whiptail_node_ls_heap "$LS_HEAP_SIZE"
|
||||||
|
|
||||||
while ! valid_int "$NODE_LS_HEAP_SIZE"; do
|
|
||||||
whiptail_invalid_input
|
|
||||||
whiptail_node_ls_heap "$NODE_LS_HEAP_SIZE"
|
|
||||||
done
|
|
||||||
}
|
}
|
||||||
|
|
||||||
collect_node_ls_input() {
|
collect_node_ls_input() {
|
||||||
@@ -500,7 +489,7 @@ collect_node_ls_pipeline_worker_count() {
|
|||||||
collect_oinkcode() {
|
collect_oinkcode() {
|
||||||
whiptail_oinkcode
|
whiptail_oinkcode
|
||||||
|
|
||||||
while ! valid_string "$OINKCODE" "" "128"; do #TODO: verify max length here
|
while ! valid_string "$OINKCODE" "" "128"; do
|
||||||
whiptail_invalid_input
|
whiptail_invalid_input
|
||||||
whiptail_oinkcode "$OINKCODE"
|
whiptail_oinkcode "$OINKCODE"
|
||||||
done
|
done
|
||||||
@@ -569,6 +558,7 @@ collect_so_allow() {
|
|||||||
collect_soremote_inputs() {
|
collect_soremote_inputs() {
|
||||||
whiptail_create_soremote_user
|
whiptail_create_soremote_user
|
||||||
SCMATCH=no
|
SCMATCH=no
|
||||||
|
|
||||||
while [[ $SCMATCH != yes ]]; do
|
while [[ $SCMATCH != yes ]]; do
|
||||||
whiptail_create_soremote_user_password1
|
whiptail_create_soremote_user_password1
|
||||||
whiptail_create_soremote_user_password2
|
whiptail_create_soremote_user_password2
|
||||||
@@ -596,11 +586,11 @@ collect_webuser_inputs() {
|
|||||||
|
|
||||||
WPMATCH=no
|
WPMATCH=no
|
||||||
while [[ $WPMATCH != yes ]]; do
|
while [[ $WPMATCH != yes ]]; do
|
||||||
whiptail_create_web_user_password1
|
whiptail_create_web_user_password1
|
||||||
while ! check_password "$WEBPASSWD1"; do
|
while ! check_password "$WEBPASSWD1"; do
|
||||||
whiptail_invalid_pass_characters_warning
|
whiptail_invalid_pass_characters_warning
|
||||||
whiptail_create_web_user_password1
|
whiptail_create_web_user_password1
|
||||||
done
|
done
|
||||||
if echo "$WEBPASSWD1" | so-user valpass >> "$setup_log" 2>&1; then
|
if echo "$WEBPASSWD1" | so-user valpass >> "$setup_log" 2>&1; then
|
||||||
whiptail_create_web_user_password2
|
whiptail_create_web_user_password2
|
||||||
check_web_pass
|
check_web_pass
|
||||||
@@ -751,14 +741,25 @@ check_sos_appliance() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
compare_main_nic_ip() {
|
compare_main_nic_ip() {
|
||||||
if [[ "$MAINIP" != "$MNIC_IP" ]]; then
|
if ! [[ $MNIC =~ ^(tun|wg|vpn).*$ ]]; then
|
||||||
read -r -d '' message <<- EOM
|
if [[ "$MAINIP" != "$MNIC_IP" ]]; then
|
||||||
|
read -r -d '' message <<- EOM
|
||||||
The IP being routed by Linux is not the IP address assigned to the management interface ($MNIC).
|
The IP being routed by Linux is not the IP address assigned to the management interface ($MNIC).
|
||||||
|
|
||||||
This is not a supported configuration, please remediate and rerun setup.
|
This has been known to cause installs to fail in some scenarios.
|
||||||
EOM
|
|
||||||
whiptail --title "Security Onion Setup" --msgbox "$message" 10 75
|
Please select whether to continue the install or exit setup to remediate any potential issues.
|
||||||
kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1
|
EOM
|
||||||
|
whiptail --title "Security Onion Setup" \
|
||||||
|
--yesno "$message" 10 75 \
|
||||||
|
--yes-button "Continue" --no-button "Exit" --defaultno
|
||||||
|
|
||||||
|
kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# Setup uses MAINIP, but since we ignore the equality condition when using a VPN
|
||||||
|
# just set the variable to the IP of the VPN interface
|
||||||
|
MAINIP=$MNIC_IP
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1005,33 +1006,38 @@ disable_ipv6() {
|
|||||||
sysctl -w net.ipv6.conf.all.disable_ipv6=1
|
sysctl -w net.ipv6.conf.all.disable_ipv6=1
|
||||||
sysctl -w net.ipv6.conf.default.disable_ipv6=1
|
sysctl -w net.ipv6.conf.default.disable_ipv6=1
|
||||||
} >> "$setup_log" 2>&1
|
} >> "$setup_log" 2>&1
|
||||||
|
{
|
||||||
|
echo "net.ipv6.conf.all.disable_ipv6 = 1"
|
||||||
|
echo "net.ipv6.conf.default.disable_ipv6 = 1"
|
||||||
|
echo "net.ipv6.conf.lo.disable_ipv6 = 1"
|
||||||
|
} >> /etc/sysctl.conf
|
||||||
}
|
}
|
||||||
|
|
||||||
disable_misc_network_features() {
|
#disable_misc_network_features() {
|
||||||
filter_unused_nics
|
# filter_unused_nics
|
||||||
if [ ${#filtered_nics[@]} -ne 0 ]; then
|
# if [ ${#filtered_nics[@]} -ne 0 ]; then
|
||||||
for unused_nic in "${filtered_nics[@]}"; do
|
# for unused_nic in "${filtered_nics[@]}"; do
|
||||||
if [ -n "$unused_nic" ]; then
|
# if [ -n "$unused_nic" ]; then
|
||||||
echo "Disabling unused NIC: $unused_nic" >> "$setup_log" 2>&1
|
# echo "Disabling unused NIC: $unused_nic" >> "$setup_log" 2>&1
|
||||||
|
#
|
||||||
# Disable DHCPv4/v6 and autoconnect
|
# # Disable DHCPv4/v6 and autoconnect
|
||||||
nmcli con mod "$unused_nic" \
|
# nmcli con mod "$unused_nic" \
|
||||||
ipv4.method disabled \
|
# ipv4.method disabled \
|
||||||
ipv6.method ignore \
|
# ipv6.method ignore \
|
||||||
connection.autoconnect "no" >> "$setup_log" 2>&1
|
# connection.autoconnect "no" >> "$setup_log" 2>&1
|
||||||
|
#
|
||||||
# Flush any existing IPs
|
# # Flush any existing IPs
|
||||||
ip addr flush "$unused_nic" >> "$setup_log" 2>&1
|
# ip addr flush "$unused_nic" >> "$setup_log" 2>&1
|
||||||
fi
|
# fi
|
||||||
done
|
# done
|
||||||
fi
|
# fi
|
||||||
# Disable IPv6
|
# # Disable IPv6
|
||||||
{
|
# {
|
||||||
echo "net.ipv6.conf.all.disable_ipv6 = 1"
|
# echo "net.ipv6.conf.all.disable_ipv6 = 1"
|
||||||
echo "net.ipv6.conf.default.disable_ipv6 = 1"
|
# echo "net.ipv6.conf.default.disable_ipv6 = 1"
|
||||||
echo "net.ipv6.conf.lo.disable_ipv6 = 1"
|
# echo "net.ipv6.conf.lo.disable_ipv6 = 1"
|
||||||
} >> /etc/sysctl.conf
|
# } >> /etc/sysctl.conf
|
||||||
}
|
#}
|
||||||
|
|
||||||
docker_install() {
|
docker_install() {
|
||||||
|
|
||||||
@@ -1223,7 +1229,7 @@ filter_unused_nics() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Finally, set filtered_nics to any NICs we aren't using (and ignore interfaces that aren't of use)
|
# Finally, set filtered_nics to any NICs we aren't using (and ignore interfaces that aren't of use)
|
||||||
filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|tun|wg|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g')
|
filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g')
|
||||||
readarray -t filtered_nics <<< "$filtered_nics"
|
readarray -t filtered_nics <<< "$filtered_nics"
|
||||||
|
|
||||||
nic_list=()
|
nic_list=()
|
||||||
@@ -1720,17 +1726,11 @@ network_setup() {
|
|||||||
echo "... Verifying all network devices are managed by Network Manager";
|
echo "... Verifying all network devices are managed by Network Manager";
|
||||||
check_network_manager_conf;
|
check_network_manager_conf;
|
||||||
|
|
||||||
echo "... Disabling unused NICs";
|
|
||||||
disable_misc_network_features;
|
|
||||||
|
|
||||||
echo "... Setting ONBOOT for management interface";
|
|
||||||
command -v netplan &> /dev/null || nmcli con mod "$MNIC" connection.autoconnect "yes"
|
|
||||||
|
|
||||||
echo "... Copying 99-so-checksum-offload-disable";
|
echo "... Copying 99-so-checksum-offload-disable";
|
||||||
cp ./install_scripts/99-so-checksum-offload-disable /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable ;
|
cp ./install_scripts/99-so-checksum-offload-disable /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable ;
|
||||||
|
|
||||||
echo "... Modifying 99-so-checksum-offload-disable";
|
echo "... Modifying 99-so-checksum-offload-disable";
|
||||||
sed -i "s/\$MNIC/${MNIC}/g" /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable;
|
sed -i "s/\$MNIC/${INTERFACE}/g" /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable;
|
||||||
} >> "$setup_log" 2>&1
|
} >> "$setup_log" 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1899,7 +1899,7 @@ saltify() {
|
|||||||
if [ $OS = 'centos' ]; then
|
if [ $OS = 'centos' ]; then
|
||||||
set_progress_str 5 'Installing Salt repo'
|
set_progress_str 5 'Installing Salt repo'
|
||||||
{
|
{
|
||||||
sudo rpm --import https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.2/SALTSTACK-GPG-KEY.pub;
|
sudo rpm --import https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.5/SALTSTACK-GPG-KEY.pub;
|
||||||
cp ./yum_repos/saltstack.repo /etc/yum.repos.d/saltstack.repo;
|
cp ./yum_repos/saltstack.repo /etc/yum.repos.d/saltstack.repo;
|
||||||
} >> "$setup_log" 2>&1
|
} >> "$setup_log" 2>&1
|
||||||
set_progress_str 6 'Installing various dependencies'
|
set_progress_str 6 'Installing various dependencies'
|
||||||
@@ -1916,14 +1916,14 @@ saltify() {
|
|||||||
# Download Ubuntu Keys in case manager updates = 1
|
# Download Ubuntu Keys in case manager updates = 1
|
||||||
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
|
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
|
||||||
if [[ ! $is_airgap ]]; then
|
if [[ ! $is_airgap ]]; then
|
||||||
logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3002.2/SALTSTACK-GPG-KEY.pub"
|
logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3002.5/SALTSTACK-GPG-KEY.pub"
|
||||||
logCmd "wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg"
|
logCmd "wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg"
|
||||||
logCmd "wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH"
|
logCmd "wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH"
|
||||||
logCmd "cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo"
|
logCmd "cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo"
|
||||||
fi
|
fi
|
||||||
set_progress_str 7 'Installing salt-master'
|
set_progress_str 7 'Installing salt-master'
|
||||||
if [[ ! $is_iso ]]; then
|
if [[ ! $is_iso ]]; then
|
||||||
logCmd "yum -y install salt-master-3002.2"
|
logCmd "yum -y install salt-master-3002.5"
|
||||||
fi
|
fi
|
||||||
systemctl enable salt-master >> "$setup_log" 2>&1
|
systemctl enable salt-master >> "$setup_log" 2>&1
|
||||||
;;
|
;;
|
||||||
@@ -1951,7 +1951,7 @@ saltify() {
|
|||||||
{
|
{
|
||||||
if [[ ! $is_iso ]]; then
|
if [[ ! $is_iso ]]; then
|
||||||
yum -y install epel-release
|
yum -y install epel-release
|
||||||
yum -y install salt-minion-3002.2\
|
yum -y install salt-minion-3002.5\
|
||||||
python3\
|
python3\
|
||||||
python36-docker\
|
python36-docker\
|
||||||
python36-dateutil\
|
python36-dateutil\
|
||||||
@@ -2003,8 +2003,8 @@ saltify() {
|
|||||||
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT' | 'HELIXSENSOR')
|
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT' | 'HELIXSENSOR')
|
||||||
|
|
||||||
# Add saltstack repo(s)
|
# Add saltstack repo(s)
|
||||||
wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3002.2/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
|
wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3002.5/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
|
||||||
echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.2 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
|
echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.5 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
|
||||||
|
|
||||||
# Add Docker repo
|
# Add Docker repo
|
||||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - >> "$setup_log" 2>&1
|
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - >> "$setup_log" 2>&1
|
||||||
@@ -2012,7 +2012,7 @@ saltify() {
|
|||||||
|
|
||||||
# Get gpg keys
|
# Get gpg keys
|
||||||
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
|
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
|
||||||
wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/archive/3002.2/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
|
wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/archive/3002.5/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
|
||||||
wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1
|
wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1
|
||||||
wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1
|
wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1
|
||||||
|
|
||||||
@@ -2025,7 +2025,7 @@ saltify() {
|
|||||||
set_progress_str 6 'Installing various dependencies'
|
set_progress_str 6 'Installing various dependencies'
|
||||||
retry 50 10 "apt-get -y install sqlite3 argon2 libssl-dev" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get -y install sqlite3 argon2 libssl-dev" >> "$setup_log" 2>&1 || exit 1
|
||||||
set_progress_str 7 'Installing salt-master'
|
set_progress_str 7 'Installing salt-master'
|
||||||
retry 50 10 "apt-get -y install salt-master=3002.2+ds-1" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get -y install salt-master=3002.5+ds-1" >> "$setup_log" 2>&1 || exit 1
|
||||||
retry 50 10 "apt-mark hold salt-master" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-mark hold salt-master" >> "$setup_log" 2>&1 || exit 1
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
@@ -2036,14 +2036,14 @@ saltify() {
|
|||||||
echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1
|
echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1
|
||||||
apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
|
apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
|
||||||
apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1
|
apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1
|
||||||
echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.2/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
|
echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.5/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
|
||||||
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log"
|
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1
|
||||||
set_progress_str 8 'Installing salt-minion & python modules'
|
set_progress_str 8 'Installing salt-minion & python modules'
|
||||||
retry 50 10 "apt-get -y install salt-minion=3002.2+ds-1 salt-common=3002.2+ds-1" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get -y install salt-minion=3002.5+ds-1 salt-common=3002.5+ds-1" >> "$setup_log" 2>&1 || exit 1
|
||||||
retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1
|
||||||
if [[ $OSVER != 'xenial' ]]; then
|
if [[ $OSVER != 'xenial' ]]; then
|
||||||
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging" >> "$setup_log" 2>&1 || exit 1
|
||||||
@@ -2334,8 +2334,6 @@ set_hostname() {
|
|||||||
|
|
||||||
set_initial_firewall_policy() {
|
set_initial_firewall_policy() {
|
||||||
|
|
||||||
set_main_ip
|
|
||||||
|
|
||||||
if [ -f $default_salt_dir/pillar/data/addtotab.sh ]; then chmod +x $default_salt_dir/pillar/data/addtotab.sh; fi
|
if [ -f $default_salt_dir/pillar/data/addtotab.sh ]; then chmod +x $default_salt_dir/pillar/data/addtotab.sh; fi
|
||||||
if [ -f $default_salt_dir/salt/common/tools/sbin/so-firewall ]; then chmod +x $default_salt_dir/salt/common/tools/sbin/so-firewall; fi
|
if [ -f $default_salt_dir/salt/common/tools/sbin/so-firewall ]; then chmod +x $default_salt_dir/salt/common/tools/sbin/so-firewall; fi
|
||||||
|
|
||||||
|
|||||||
@@ -46,8 +46,8 @@ check_new_repos() {
|
|||||||
if [[ $OS == 'centos' ]]; then
|
if [[ $OS == 'centos' ]]; then
|
||||||
local repo_arr=(
|
local repo_arr=(
|
||||||
"https://download.docker.com/linux/centos/docker-ce.repo"
|
"https://download.docker.com/linux/centos/docker-ce.repo"
|
||||||
"https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.2/SALTSTACK-GPG-KEY.pub"
|
"https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.5/SALTSTACK-GPG-KEY.pub"
|
||||||
"https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3002.2/SALTSTACK-GPG-KEY.pub"
|
"https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3002.5/SALTSTACK-GPG-KEY.pub"
|
||||||
"https://download.docker.com/linux/ubuntu/gpg"
|
"https://download.docker.com/linux/ubuntu/gpg"
|
||||||
"https://packages.wazuh.com/key/GPG-KEY-WAZUH"
|
"https://packages.wazuh.com/key/GPG-KEY-WAZUH"
|
||||||
"https://packages.wazuh.com/3.x/yum/"
|
"https://packages.wazuh.com/3.x/yum/"
|
||||||
@@ -59,7 +59,7 @@ check_new_repos() {
|
|||||||
local repo_arr=(
|
local repo_arr=(
|
||||||
"https://download.docker.com/linux/ubuntu/gpg"
|
"https://download.docker.com/linux/ubuntu/gpg"
|
||||||
"https://download.docker.com/linux/ubuntu"
|
"https://download.docker.com/linux/ubuntu"
|
||||||
"https://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.2/SALTSTACK-GPG-KEY.pub"
|
"https://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.5/SALTSTACK-GPG-KEY.pub"
|
||||||
"https://packages.wazuh.com/key/GPG-KEY-WAZUH"
|
"https://packages.wazuh.com/key/GPG-KEY-WAZUH"
|
||||||
"https://packages.wazuh.com"
|
"https://packages.wazuh.com"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -191,7 +191,7 @@ if ! [[ -f $install_opt_file ]]; then
|
|||||||
fi
|
fi
|
||||||
if [[ $setup_type == 'iso' ]] && [ "$automated" == no ]; then
|
if [[ $setup_type == 'iso' ]] && [ "$automated" == no ]; then
|
||||||
whiptail_first_menu_iso
|
whiptail_first_menu_iso
|
||||||
if [[ $option == "Configure Network" ]]; then
|
if [[ $option == "CONFIGURENETWORK" ]]; then
|
||||||
network_init_whiptail
|
network_init_whiptail
|
||||||
whiptail_management_interface_setup
|
whiptail_management_interface_setup
|
||||||
network_init
|
network_init
|
||||||
|
|||||||
@@ -652,6 +652,8 @@ whiptail_first_menu_iso() {
|
|||||||
)
|
)
|
||||||
local exitstatus=$?
|
local exitstatus=$?
|
||||||
whiptail_check_exitstatus $exitstatus
|
whiptail_check_exitstatus $exitstatus
|
||||||
|
|
||||||
|
option=$(echo "${option^^}" | tr -d ' ')
|
||||||
}
|
}
|
||||||
whiptail_make_changes() {
|
whiptail_make_changes() {
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[saltstack]
|
[saltstack]
|
||||||
name=SaltStack repo for RHEL/CentOS $releasever PY3
|
name=SaltStack repo for RHEL/CentOS $releasever PY3
|
||||||
baseurl=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.2/
|
baseurl=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.5/
|
||||||
enabled=1
|
enabled=1
|
||||||
gpgcheck=1
|
gpgcheck=1
|
||||||
gpgkey=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.2/SALTSTACK-GPG-KEY.pub
|
gpgkey=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.5/SALTSTACK-GPG-KEY.pub
|
||||||
Reference in New Issue
Block a user