mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
@@ -1,6 +1,6 @@
|
|||||||
## Security Onion 2.3.30
|
## Security Onion 2.3.40
|
||||||
|
|
||||||
Security Onion 2.3.30 is here!
|
Security Onion 2.3.40 is here!
|
||||||
|
|
||||||
## Screenshots
|
## Screenshots
|
||||||
|
|
||||||
|
|||||||
@@ -1,16 +1,16 @@
|
|||||||
### 2.3.30 ISO image built on 2021/03/01
|
### 2.3.40 ISO image built on 2021/03/22
|
||||||
|
|
||||||
### Download and Verify
|
### Download and Verify
|
||||||
|
|
||||||
2.3.30 ISO image:
|
2.3.40 ISO image:
|
||||||
https://download.securityonion.net/file/securityonion/securityonion-2.3.30.iso
|
https://download.securityonion.net/file/securityonion/securityonion-2.3.40.iso
|
||||||
|
|
||||||
MD5: 65202BA0F7661A5E27087F097B8E571E
|
MD5: FB72C0675F262A714B287BB33CE82504
|
||||||
SHA1: 14E842E39EDBB55A104263281CF25BF88A2E9D67
|
SHA1: E8F5A9AA23990DF794611F9A178D88414F5DA81C
|
||||||
SHA256: 210B37B9E3DFC827AFE2940E2C87B175ADA968EDD04298A5926F63D9269847B7
|
SHA256: DB125D6E770F75C3FD35ABE3F8A8B21454B7A7618C2B446D11B6AC8574601070
|
||||||
|
|
||||||
Signature for ISO image:
|
Signature for ISO image:
|
||||||
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.30.iso.sig
|
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.40.iso.sig
|
||||||
|
|
||||||
Signing key:
|
Signing key:
|
||||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
|
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
|
||||||
@@ -24,22 +24,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma
|
|||||||
|
|
||||||
Download the signature file for the ISO:
|
Download the signature file for the ISO:
|
||||||
```
|
```
|
||||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.30.iso.sig
|
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.40.iso.sig
|
||||||
```
|
```
|
||||||
|
|
||||||
Download the ISO image:
|
Download the ISO image:
|
||||||
```
|
```
|
||||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.30.iso
|
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.40.iso
|
||||||
```
|
```
|
||||||
|
|
||||||
Verify the downloaded ISO image using the signature file:
|
Verify the downloaded ISO image using the signature file:
|
||||||
```
|
```
|
||||||
gpg --verify securityonion-2.3.30.iso.sig securityonion-2.3.30.iso
|
gpg --verify securityonion-2.3.40.iso.sig securityonion-2.3.40.iso
|
||||||
```
|
```
|
||||||
|
|
||||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||||
```
|
```
|
||||||
gpg: Signature made Mon 01 Mar 2021 02:15:28 PM EST using RSA key ID FE507013
|
gpg: Signature made Mon 22 Mar 2021 09:35:50 AM EDT using RSA key ID FE507013
|
||||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||||
gpg: WARNING: This key is not certified with a trusted signature!
|
gpg: WARNING: This key is not certified with a trusted signature!
|
||||||
gpg: There is no indication that the signature belongs to the owner.
|
gpg: There is no indication that the signature belongs to the owner.
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ logstash:
|
|||||||
pipelines:
|
pipelines:
|
||||||
manager:
|
manager:
|
||||||
config:
|
config:
|
||||||
- so/0008_input_fleet_livequery.conf.jinja
|
|
||||||
- so/0009_input_beats.conf
|
- so/0009_input_beats.conf
|
||||||
- so/0010_input_hhbeats.conf
|
- so/0010_input_hhbeats.conf
|
||||||
- so/9999_output_redis.conf.jinja
|
- so/9999_output_redis.conf.jinja
|
||||||
|
|||||||
@@ -7,8 +7,7 @@ logstash:
|
|||||||
- so/9000_output_zeek.conf.jinja
|
- so/9000_output_zeek.conf.jinja
|
||||||
- so/9002_output_import.conf.jinja
|
- so/9002_output_import.conf.jinja
|
||||||
- so/9034_output_syslog.conf.jinja
|
- so/9034_output_syslog.conf.jinja
|
||||||
- so/9100_output_osquery.conf.jinja
|
- so/9100_output_osquery.conf.jinja
|
||||||
- so/9101_output_osquery_livequery.conf.jinja
|
|
||||||
- so/9400_output_suricata.conf.jinja
|
- so/9400_output_suricata.conf.jinja
|
||||||
- so/9500_output_beats.conf.jinja
|
- so/9500_output_beats.conf.jinja
|
||||||
- so/9600_output_ossec.conf.jinja
|
- so/9600_output_ossec.conf.jinja
|
||||||
|
|||||||
@@ -90,7 +90,6 @@ commonpkgs:
|
|||||||
- ntpdate
|
- ntpdate
|
||||||
- jq
|
- jq
|
||||||
- python3-docker
|
- python3-docker
|
||||||
- docker-ce
|
|
||||||
- curl
|
- curl
|
||||||
- ca-certificates
|
- ca-certificates
|
||||||
- software-properties-common
|
- software-properties-common
|
||||||
@@ -104,12 +103,15 @@ commonpkgs:
|
|||||||
- python3-dateutil
|
- python3-dateutil
|
||||||
- python3-m2crypto
|
- python3-m2crypto
|
||||||
- python3-mysqldb
|
- python3-mysqldb
|
||||||
|
- python3-packaging
|
||||||
- git
|
- git
|
||||||
heldpackages:
|
heldpackages:
|
||||||
pkg.installed:
|
pkg.installed:
|
||||||
- pkgs:
|
- pkgs:
|
||||||
- containerd.io: 1.2.13-2
|
- containerd.io: 1.4.4-1
|
||||||
- docker-ce: 5:19.03.14~3-0~ubuntu-bionic
|
- docker-ce: 5:20.10.5~3-0~ubuntu-bionic
|
||||||
|
- docker-ce-cli: 5:20.10.5~3-0~ubuntu-bionic
|
||||||
|
- docker-ce-rootless-extras: 5:20.10.5~3-0~ubuntu-bionic
|
||||||
- hold: True
|
- hold: True
|
||||||
- update_holds: True
|
- update_holds: True
|
||||||
|
|
||||||
@@ -135,6 +137,7 @@ commonpkgs:
|
|||||||
- python36-dateutil
|
- python36-dateutil
|
||||||
- python36-m2crypto
|
- python36-m2crypto
|
||||||
- python36-mysql
|
- python36-mysql
|
||||||
|
- python36-packaging
|
||||||
- yum-utils
|
- yum-utils
|
||||||
- device-mapper-persistent-data
|
- device-mapper-persistent-data
|
||||||
- lvm2
|
- lvm2
|
||||||
@@ -144,8 +147,10 @@ commonpkgs:
|
|||||||
heldpackages:
|
heldpackages:
|
||||||
pkg.installed:
|
pkg.installed:
|
||||||
- pkgs:
|
- pkgs:
|
||||||
- containerd.io: 1.2.13-3.2.el7
|
- containerd.io: 1.4.4-3.1.el7
|
||||||
- docker-ce: 3:19.03.14-3.el7
|
- docker-ce: 3:20.10.5-3.el7
|
||||||
|
- docker-ce-cli: 1:20.10.5-3.el7
|
||||||
|
- docker-ce-rootless-extras: 20.10.5-3.el7
|
||||||
- hold: True
|
- hold: True
|
||||||
- update_holds: True
|
- update_holds: True
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
@@ -86,6 +86,19 @@ add_interface_bond0() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
check_airgap() {
|
||||||
|
# See if this is an airgap install
|
||||||
|
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global.sls | grep airgap: | awk '{print $2}')
|
||||||
|
if [[ "$AIRGAP" == "True" ]]; then
|
||||||
|
is_airgap=0
|
||||||
|
UPDATE_DIR=/tmp/soagupdate/SecurityOnion
|
||||||
|
AGDOCKER=/tmp/soagupdate/docker
|
||||||
|
AGREPO=/tmp/soagupdate/Packages
|
||||||
|
else
|
||||||
|
is_airgap=1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
check_container() {
|
check_container() {
|
||||||
docker ps | grep "$1:" > /dev/null 2>&1
|
docker ps | grep "$1:" > /dev/null 2>&1
|
||||||
return $?
|
return $?
|
||||||
@@ -97,6 +110,46 @@ check_password() {
|
|||||||
return $?
|
return $?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
check_elastic_license() {
|
||||||
|
|
||||||
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
|
# See if the user has already accepted the license
|
||||||
|
if [ ! -f /opt/so/state/yeselastic.txt ]; then
|
||||||
|
elastic_license
|
||||||
|
else
|
||||||
|
echo "Elastic License has already been accepted"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
elastic_license() {
|
||||||
|
|
||||||
|
read -r -d '' message <<- EOM
|
||||||
|
\n
|
||||||
|
Starting in Elastic Stack version 7.11, the Elastic Stack binaries are only available under the Elastic License:
|
||||||
|
https://securityonion.net/elastic-license
|
||||||
|
|
||||||
|
Please review the Elastic License:
|
||||||
|
https://www.elastic.co/licensing/elastic-license
|
||||||
|
|
||||||
|
Do you agree to the terms of the Elastic License?
|
||||||
|
|
||||||
|
If so, type AGREE to accept the Elastic License and continue. Otherwise, press Enter to exit this program without making any changes.
|
||||||
|
EOM
|
||||||
|
|
||||||
|
AGREED=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||||
|
"$message" 20 75 3>&1 1>&2 2>&3)
|
||||||
|
|
||||||
|
if [ "${AGREED^^}" = 'AGREE' ]; then
|
||||||
|
mkdir -p /opt/so/state
|
||||||
|
touch /opt/so/state/yeselastic.txt
|
||||||
|
else
|
||||||
|
echo "Starting in 2.3.40 you must accept the Elastic license if you want to run Security Onion."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
fail() {
|
fail() {
|
||||||
msg=$1
|
msg=$1
|
||||||
echo "ERROR: $msg"
|
echo "ERROR: $msg"
|
||||||
@@ -250,6 +303,12 @@ set_minionid() {
|
|||||||
MINIONID=$(lookup_grain id)
|
MINIONID=$(lookup_grain id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
set_palette() {
|
||||||
|
if [ "$OS" == ubuntu ]; then
|
||||||
|
update-alternatives --set newt-palette /etc/newt/palette.original
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
set_version() {
|
set_version() {
|
||||||
CURRENTVERSION=0.0.0
|
CURRENTVERSION=0.0.0
|
||||||
if [ -f /etc/soversion ]; then
|
if [ -f /etc/soversion ]; then
|
||||||
@@ -340,6 +399,26 @@ valid_int() {
|
|||||||
|
|
||||||
# {% raw %}
|
# {% raw %}
|
||||||
|
|
||||||
|
valid_proxy() {
|
||||||
|
local proxy=$1
|
||||||
|
local url_prefixes=( 'http://' 'https://' )
|
||||||
|
|
||||||
|
local has_prefix=false
|
||||||
|
for prefix in "${url_prefixes[@]}"; do
|
||||||
|
echo "$proxy" | grep -q "$prefix" && has_prefix=true && proxy=${proxy#"$prefix"} && break
|
||||||
|
done
|
||||||
|
|
||||||
|
local url_arr
|
||||||
|
mapfile -t url_arr <<< "$(echo "$proxy" | tr ":" "\n")"
|
||||||
|
|
||||||
|
local valid_url=true
|
||||||
|
if ! valid_ip4 "${url_arr[0]}" && ! valid_fqdn "${url_arr[0]}" && ! valid_hostname "${url_arr[0]}"; then
|
||||||
|
valid_url=false
|
||||||
|
fi
|
||||||
|
|
||||||
|
[[ $has_prefix == true ]] && [[ $valid_url == true ]] && return 0 || return 1
|
||||||
|
}
|
||||||
|
|
||||||
valid_string() {
|
valid_string() {
|
||||||
local str=$1
|
local str=$1
|
||||||
local min_length=${2:-1}
|
local min_length=${2:-1}
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ fi
|
|||||||
|
|
||||||
USER=$1
|
USER=$1
|
||||||
|
|
||||||
CORTEX_KEY=$(lookup_pillar cortexkey)
|
CORTEX_KEY=$(lookup_pillar cortexorguserkey)
|
||||||
CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api"
|
CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api"
|
||||||
CORTEX_ORG_NAME=$(lookup_pillar cortexorgname)
|
CORTEX_ORG_NAME=$(lookup_pillar cortexorgname)
|
||||||
CORTEX_USER=$USER
|
CORTEX_USER=$USER
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ fi
|
|||||||
|
|
||||||
USER=$1
|
USER=$1
|
||||||
|
|
||||||
CORTEX_KEY=$(lookup_pillar cortexkey)
|
CORTEX_KEY=$(lookup_pillar cortexorguserkey)
|
||||||
CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api"
|
CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api"
|
||||||
CORTEX_USER=$USER
|
CORTEX_USER=$USER
|
||||||
|
|
||||||
|
|||||||
85
salt/common/tools/sbin/so-docker-prune
Executable file
85
salt/common/tools/sbin/so-docker-prune
Executable file
@@ -0,0 +1,85 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import sys, argparse, re, docker
|
||||||
|
from packaging.version import Version, InvalidVersion
|
||||||
|
from itertools import groupby, chain
|
||||||
|
|
||||||
|
|
||||||
|
def get_image_name(string) -> str:
|
||||||
|
return ':'.join(string.split(':')[:-1])
|
||||||
|
|
||||||
|
|
||||||
|
def get_so_image_basename(string) -> str:
|
||||||
|
return get_image_name(string).split('/so-')[-1]
|
||||||
|
|
||||||
|
|
||||||
|
def get_image_version(string) -> str:
|
||||||
|
ver = string.split(':')[-1]
|
||||||
|
if ver == 'latest':
|
||||||
|
# Version doesn't like "latest", so use a high semver
|
||||||
|
return '999999.9.9'
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
Version(ver)
|
||||||
|
except InvalidVersion:
|
||||||
|
# Strip the last substring following a hyphen for automated branches
|
||||||
|
ver = '-'.join(ver.split('-')[:-1])
|
||||||
|
return ver
|
||||||
|
|
||||||
|
|
||||||
|
def main(quiet):
|
||||||
|
client = docker.from_env()
|
||||||
|
|
||||||
|
image_list = client.images.list(filters={ 'dangling': False })
|
||||||
|
|
||||||
|
# Map list of image objects to flattened list of tags (format: "name:version")
|
||||||
|
tag_list = list(chain.from_iterable(list(map(lambda x: x.attrs.get('RepoTags'), image_list))))
|
||||||
|
|
||||||
|
# Filter to only SO images (base name begins with "so-")
|
||||||
|
tag_list = list(filter(lambda x: re.match(r'^.*\/so-[^\/]*$', get_image_name(x)), tag_list))
|
||||||
|
|
||||||
|
# Group tags into lists by base name (sort by same projection first)
|
||||||
|
tag_list.sort(key=lambda x: get_so_image_basename(x))
|
||||||
|
grouped_tag_lists = [ list(it) for _, it in groupby(tag_list, lambda x: get_so_image_basename(x)) ]
|
||||||
|
|
||||||
|
no_prunable = True
|
||||||
|
for t_list in grouped_tag_lists:
|
||||||
|
try:
|
||||||
|
# Keep the 2 most current images
|
||||||
|
t_list.sort(key=lambda x: Version(get_image_version(x)), reverse=True)
|
||||||
|
if len(t_list) <= 2:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
no_prunable = False
|
||||||
|
for tag in t_list[2:]:
|
||||||
|
if not quiet: print(f'Removing image {tag}')
|
||||||
|
client.images.remove(tag)
|
||||||
|
except InvalidVersion as e:
|
||||||
|
print(f'so-{get_so_image_basename(t_list[0])}: {e.args[0]}', file=sys.stderr)
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
if no_prunable and not quiet:
|
||||||
|
print('No Security Onion images to prune')
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main_parser = argparse.ArgumentParser(add_help=False)
|
||||||
|
main_parser.add_argument('-q', '--quiet', action='store_const', const=True, required=False)
|
||||||
|
args = main_parser.parse_args(sys.argv[1:])
|
||||||
|
|
||||||
|
main(args.quiet)
|
||||||
13
salt/common/tools/sbin/so-kibana-space-defaults
Normal file
13
salt/common/tools/sbin/so-kibana-space-defaults
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
wait_for_web_response "http://localhost:5601/app/kibana" "Elastic"
|
||||||
|
## This hackery will be removed if using Elastic Auth ##
|
||||||
|
|
||||||
|
# Let's snag a cookie from Kibana
|
||||||
|
THECOOKIE=$(curl -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
|
||||||
|
|
||||||
|
# Disable certain Features from showing up in the Kibana UI
|
||||||
|
echo
|
||||||
|
echo "Setting up default Space:"
|
||||||
|
curl -b "sid=$THECOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","siem","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","fleet"]} ' >> /opt/so/log/kibana/misc.log
|
||||||
|
echo
|
||||||
0
salt/common/tools/sbin/so-monitor-add
Normal file → Executable file
0
salt/common/tools/sbin/so-monitor-add
Normal file → Executable file
0
salt/common/tools/sbin/so-playbook-sigma-refresh
Normal file → Executable file
0
salt/common/tools/sbin/so-playbook-sigma-refresh
Normal file → Executable file
0
salt/common/tools/sbin/so-raid-status
Normal file → Executable file
0
salt/common/tools/sbin/so-raid-status
Normal file → Executable file
27
salt/common/tools/sbin/so-rule
Normal file → Executable file
27
salt/common/tools/sbin/so-rule
Normal file → Executable file
@@ -37,11 +37,9 @@ def print_err(string: str):
|
|||||||
|
|
||||||
|
|
||||||
def check_apply(args: dict, prompt: bool = True):
|
def check_apply(args: dict, prompt: bool = True):
|
||||||
cmd_arr = ['salt-call', 'state.apply', 'idstools', 'queue=True']
|
|
||||||
|
|
||||||
if args.apply:
|
if args.apply:
|
||||||
print('Configuration updated. Applying idstools state...')
|
print('Configuration updated. Applying changes:')
|
||||||
return subprocess.run(cmd_arr)
|
return apply()
|
||||||
else:
|
else:
|
||||||
if prompt:
|
if prompt:
|
||||||
message = 'Configuration updated. Would you like to apply your changes now? (y/N) '
|
message = 'Configuration updated. Would you like to apply your changes now? (y/N) '
|
||||||
@@ -51,12 +49,24 @@ def check_apply(args: dict, prompt: bool = True):
|
|||||||
if answer.lower() in [ 'n', '' ]:
|
if answer.lower() in [ 'n', '' ]:
|
||||||
return 0
|
return 0
|
||||||
else:
|
else:
|
||||||
print('Applying idstools state...')
|
print('Applying changes:')
|
||||||
return subprocess.run(cmd_arr)
|
return apply()
|
||||||
else:
|
else:
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def apply():
|
||||||
|
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', 'idstools.sync_files', 'queue=True']
|
||||||
|
update_cmd = ['so-rule-update']
|
||||||
|
print('Syncing config files...')
|
||||||
|
cmd = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL)
|
||||||
|
if cmd.returncode == 0:
|
||||||
|
print('Updating rules...')
|
||||||
|
return subprocess.run(update_cmd).returncode
|
||||||
|
else:
|
||||||
|
return cmd.returncode
|
||||||
|
|
||||||
|
|
||||||
def find_minion_pillar() -> str:
|
def find_minion_pillar() -> str:
|
||||||
regex = '^.*_(manager|managersearch|standalone|import|eval)\.sls$'
|
regex = '^.*_(manager|managersearch|standalone|import|eval)\.sls$'
|
||||||
|
|
||||||
@@ -442,10 +452,7 @@ def main():
|
|||||||
modify.print_help()
|
modify.print_help()
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
if isinstance(exit_code, subprocess.CompletedProcess):
|
sys.exit(exit_code)
|
||||||
sys.exit(exit_code.returncode)
|
|
||||||
else:
|
|
||||||
sys.exit(exit_code)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
0
salt/common/tools/sbin/so-suricata-testrule
Normal file → Executable file
0
salt/common/tools/sbin/so-suricata-testrule
Normal file → Executable file
@@ -19,13 +19,12 @@
|
|||||||
|
|
||||||
UPDATE_DIR=/tmp/sogh/securityonion
|
UPDATE_DIR=/tmp/sogh/securityonion
|
||||||
INSTALLEDVERSION=$(cat /etc/soversion)
|
INSTALLEDVERSION=$(cat /etc/soversion)
|
||||||
|
POSTVERSION=$INSTALLEDVERSION
|
||||||
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'})
|
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'})
|
||||||
DEFAULT_SALT_DIR=/opt/so/saltstack/default
|
DEFAULT_SALT_DIR=/opt/so/saltstack/default
|
||||||
BATCHSIZE=5
|
BATCHSIZE=5
|
||||||
SOUP_LOG=/root/soup.log
|
SOUP_LOG=/root/soup.log
|
||||||
|
|
||||||
exec 3>&1 1>${SOUP_LOG} 2>&1
|
|
||||||
|
|
||||||
add_common() {
|
add_common() {
|
||||||
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||||
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||||
@@ -101,19 +100,6 @@ update_registry() {
|
|||||||
salt-call state.apply registry queue=True
|
salt-call state.apply registry queue=True
|
||||||
}
|
}
|
||||||
|
|
||||||
check_airgap() {
|
|
||||||
# See if this is an airgap install
|
|
||||||
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global.sls | grep airgap: | awk '{print $2}')
|
|
||||||
if [[ "$AIRGAP" == "True" ]]; then
|
|
||||||
is_airgap=0
|
|
||||||
UPDATE_DIR=/tmp/soagupdate/SecurityOnion
|
|
||||||
AGDOCKER=/tmp/soagupdate/docker
|
|
||||||
AGREPO=/tmp/soagupdate/Packages
|
|
||||||
else
|
|
||||||
is_airgap=1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
check_sudoers() {
|
check_sudoers() {
|
||||||
if grep -q "so-setup" /etc/sudoers; then
|
if grep -q "so-setup" /etc/sudoers; then
|
||||||
echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"."
|
echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"."
|
||||||
@@ -243,22 +229,10 @@ masterunlock() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
playbook() {
|
preupgrade_changes() {
|
||||||
echo "Applying playbook settings"
|
|
||||||
if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then
|
|
||||||
salt-call state.apply playbook.OLD_db_init
|
|
||||||
rm -f /opt/so/rules/elastalert/playbook/*.yaml
|
|
||||||
so-playbook-ruleupdate >> /root/soup_playbook_rule_update.log 2>&1 &
|
|
||||||
fi
|
|
||||||
if [[ "$INSTALLEDVERSION" != 2.3.30 ]]; then
|
|
||||||
so-playbook-sigma-refresh >> /root/soup_playbook_sigma_refresh.log 2>&1 &
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
pillar_changes() {
|
|
||||||
# This function is to add any new pillar items if needed.
|
# This function is to add any new pillar items if needed.
|
||||||
echo "Checking to see if pillar changes are needed."
|
echo "Checking to see if changes are needed."
|
||||||
|
|
||||||
[[ "$INSTALLEDVERSION" =~ rc.1 ]] && rc1_to_rc2
|
[[ "$INSTALLEDVERSION" =~ rc.1 ]] && rc1_to_rc2
|
||||||
[[ "$INSTALLEDVERSION" =~ rc.2 ]] && rc2_to_rc3
|
[[ "$INSTALLEDVERSION" =~ rc.2 ]] && rc2_to_rc3
|
||||||
[[ "$INSTALLEDVERSION" =~ rc.3 ]] && rc3_to_2.3.0
|
[[ "$INSTALLEDVERSION" =~ rc.3 ]] && rc3_to_2.3.0
|
||||||
@@ -266,6 +240,34 @@ pillar_changes() {
|
|||||||
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_2.3.2X_to_2.3.30
|
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_2.3.2X_to_2.3.30
|
||||||
}
|
}
|
||||||
|
|
||||||
|
postupgrade_changes() {
|
||||||
|
# This function is to add any new pillar items if needed.
|
||||||
|
echo "Running post upgrade processes."
|
||||||
|
|
||||||
|
[[ "$POSTVERSION" =~ rc.1 ]] && post_rc1_to_rc2
|
||||||
|
[[ "$POSTVERSION" == 2.3.20 || "$POSTVERSION" == 2.3.21 ]] && post_2.3.2X_to_2.3.30
|
||||||
|
[[ "$POSTVERSION" == 2.3.30 ]] && post_2.3.30_to_2.3.40
|
||||||
|
}
|
||||||
|
|
||||||
|
post_rc1_to_2.3.21() {
|
||||||
|
salt-call state.apply playbook.OLD_db_init
|
||||||
|
rm -f /opt/so/rules/elastalert/playbook/*.yaml
|
||||||
|
so-playbook-ruleupdate >> /root/soup_playbook_rule_update.log 2>&1 &
|
||||||
|
POSTVERSION=2.3.21
|
||||||
|
}
|
||||||
|
|
||||||
|
post_2.3.2X_to_2.3.30() {
|
||||||
|
so-playbook-sigma-refresh >> /root/soup_playbook_sigma_refresh.log 2>&1 &
|
||||||
|
POSTVERSION=2.3.30
|
||||||
|
}
|
||||||
|
|
||||||
|
post_2.3.30_to_2.3.40() {
|
||||||
|
so-playbook-sigma-refresh >> /root/soup_playbook_sigma_refresh.log 2>&1 &
|
||||||
|
so-kibana-space-defaults
|
||||||
|
POSTVERSION=2.3.40
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
rc1_to_rc2() {
|
rc1_to_rc2() {
|
||||||
|
|
||||||
# Move the static file to global.sls
|
# Move the static file to global.sls
|
||||||
@@ -296,15 +298,14 @@ rc1_to_rc2() {
|
|||||||
done </tmp/nodes.txt
|
done </tmp/nodes.txt
|
||||||
# Add the nodes back using hostname
|
# Add the nodes back using hostname
|
||||||
while read p; do
|
while read p; do
|
||||||
local NAME=$(echo $p | awk '{print $1}')
|
local NAME=$(echo $p | awk '{print $1}')
|
||||||
local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}')
|
local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}')
|
||||||
local IP=$(echo $p | awk '{print $2}')
|
local IP=$(echo $p | awk '{print $2}')
|
||||||
echo "Adding the new cross cluster config for $NAME"
|
echo "Adding the new cross cluster config for $NAME"
|
||||||
curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}'
|
curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}'
|
||||||
done </tmp/nodes.txt
|
done </tmp/nodes.txt
|
||||||
|
|
||||||
INSTALLEDVERSION=rc.2
|
INSTALLEDVERSION=rc.2
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rc2_to_rc3() {
|
rc2_to_rc3() {
|
||||||
@@ -334,10 +335,10 @@ rc3_to_2.3.0() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
{
|
{
|
||||||
echo "redis_settings:"
|
echo "redis_settings:"
|
||||||
echo " redis_maxmemory: 827"
|
echo " redis_maxmemory: 827"
|
||||||
echo "playbook:"
|
echo "playbook:"
|
||||||
echo " api_key: de6639318502476f2fa5aa06f43f51fb389a3d7f"
|
echo " api_key: de6639318502476f2fa5aa06f43f51fb389a3d7f"
|
||||||
} >> /opt/so/saltstack/local/pillar/global.sls
|
} >> /opt/so/saltstack/local/pillar/global.sls
|
||||||
|
|
||||||
sed -i 's/playbook:/playbook_db:/' /opt/so/saltstack/local/pillar/secrets.sls
|
sed -i 's/playbook:/playbook_db:/' /opt/so/saltstack/local/pillar/secrets.sls
|
||||||
@@ -385,7 +386,6 @@ up_2.3.0_to_2.3.20(){
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
INSTALLEDVERSION=2.3.20
|
INSTALLEDVERSION=2.3.20
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
up_2.3.2X_to_2.3.30() {
|
up_2.3.2X_to_2.3.30() {
|
||||||
@@ -395,11 +395,11 @@ up_2.3.2X_to_2.3.30() {
|
|||||||
sed -i -r "s/ (\{\{.*}})$/ '\1'/g" "$pillar"
|
sed -i -r "s/ (\{\{.*}})$/ '\1'/g" "$pillar"
|
||||||
done
|
done
|
||||||
|
|
||||||
# Change the IMAGEREPO
|
# Change the IMAGEREPO
|
||||||
sed -i "/ imagerepo: 'securityonion'/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
|
sed -i "/ imagerepo: 'securityonion'/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
|
||||||
sed -i "/ imagerepo: securityonion/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
|
sed -i "/ imagerepo: securityonion/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
|
||||||
|
|
||||||
# Strelka rule repo pillar addition
|
# Strelka rule repo pillar addition
|
||||||
if [ $is_airgap -eq 0 ]; then
|
if [ $is_airgap -eq 0 ]; then
|
||||||
# Add manager as default Strelka YARA rule repo
|
# Add manager as default Strelka YARA rule repo
|
||||||
sed -i "/^strelka:/a \\ repos: \n - https://$HOSTNAME/repo/rules/strelka" /opt/so/saltstack/local/pillar/global.sls;
|
sed -i "/^strelka:/a \\ repos: \n - https://$HOSTNAME/repo/rules/strelka" /opt/so/saltstack/local/pillar/global.sls;
|
||||||
@@ -410,16 +410,26 @@ up_2.3.2X_to_2.3.30() {
|
|||||||
check_log_size_limit
|
check_log_size_limit
|
||||||
}
|
}
|
||||||
|
|
||||||
space_check() {
|
verify_upgradespace() {
|
||||||
# Check to see if there is enough space
|
|
||||||
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
|
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
|
||||||
if [ "$CURRENTSPACE" -lt "10" ]; then
|
if [ "$CURRENTSPACE" -lt "10" ]; then
|
||||||
echo "You are low on disk space. Upgrade will try and clean up space.";
|
echo "You are low on disk space."
|
||||||
clean_dockers
|
return 1
|
||||||
else
|
else
|
||||||
echo "Plenty of space for upgrading"
|
return 0
|
||||||
fi
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
upgrade_space() {
|
||||||
|
if ! verify_upgradespace; then
|
||||||
|
clean_dockers
|
||||||
|
if ! verify_upgradespace; then
|
||||||
|
echo "There is not enough space to perform the upgrade. Please free up space and try again"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "You have enough space for upgrade. Proceeding with soup."
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
thehive_maint() {
|
thehive_maint() {
|
||||||
@@ -427,16 +437,16 @@ thehive_maint() {
|
|||||||
COUNT=0
|
COUNT=0
|
||||||
THEHIVE_CONNECTED="no"
|
THEHIVE_CONNECTED="no"
|
||||||
while [[ "$COUNT" -le 240 ]]; do
|
while [[ "$COUNT" -le 240 ]]; do
|
||||||
curl --output /dev/null --silent --head --fail -k "https://localhost/thehive/api/alert"
|
curl --output /dev/null --silent --head --fail -k "https://localhost/thehive/api/alert"
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
THEHIVE_CONNECTED="yes"
|
THEHIVE_CONNECTED="yes"
|
||||||
echo "connected!"
|
echo "connected!"
|
||||||
break
|
break
|
||||||
else
|
else
|
||||||
((COUNT+=1))
|
((COUNT+=1))
|
||||||
sleep 1
|
sleep 1
|
||||||
echo -n "."
|
echo -n "."
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
|
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
|
||||||
echo "Migrating thehive databases if needed."
|
echo "Migrating thehive databases if needed."
|
||||||
@@ -471,83 +481,84 @@ update_version() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
upgrade_check() {
|
upgrade_check() {
|
||||||
# Let's make sure we actually need to update.
|
# Let's make sure we actually need to update.
|
||||||
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
|
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
|
||||||
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
|
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
|
||||||
echo "You are already running the latest version of Security Onion."
|
echo "You are already running the latest version of Security Onion."
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
upgrade_check_salt() {
|
upgrade_check_salt() {
|
||||||
NEWSALTVERSION=$(grep version: $UPDATE_DIR/salt/salt/master.defaults.yaml | awk {'print $2'})
|
NEWSALTVERSION=$(grep version: $UPDATE_DIR/salt/salt/master.defaults.yaml | awk {'print $2'})
|
||||||
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
|
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
|
||||||
echo "You are already running the correct version of Salt for Security Onion."
|
echo "You are already running the correct version of Salt for Security Onion."
|
||||||
else
|
else
|
||||||
UPGRADESALT=1
|
UPGRADESALT=1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
upgrade_salt() {
|
upgrade_salt() {
|
||||||
SALTUPGRADED=True
|
SALTUPGRADED=True
|
||||||
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
|
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
|
||||||
echo ""
|
echo ""
|
||||||
# If CentOS
|
# If CentOS
|
||||||
if [ "$OS" == "centos" ]; then
|
if [ "$OS" == "centos" ]; then
|
||||||
echo "Removing yum versionlock for Salt."
|
echo "Removing yum versionlock for Salt."
|
||||||
echo ""
|
echo ""
|
||||||
yum versionlock delete "salt-*"
|
yum versionlock delete "salt-*"
|
||||||
echo "Updating Salt packages and restarting services."
|
echo "Updating Salt packages and restarting services."
|
||||||
echo ""
|
echo ""
|
||||||
if [ $is_airgap -eq 0 ]; then
|
if [ $is_airgap -eq 0 ]; then
|
||||||
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -r -F -M -x python3 stable "$NEWSALTVERSION"
|
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -r -F -M -x python3 stable "$NEWSALTVERSION"
|
||||||
else
|
else
|
||||||
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
|
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
|
||||||
fi
|
fi
|
||||||
echo "Applying yum versionlock for Salt."
|
echo "Applying yum versionlock for Salt."
|
||||||
echo ""
|
echo ""
|
||||||
yum versionlock add "salt-*"
|
yum versionlock add "salt-*"
|
||||||
# Else do Ubuntu things
|
# Else do Ubuntu things
|
||||||
elif [ "$OS" == "ubuntu" ]; then
|
elif [ "$OS" == "ubuntu" ]; then
|
||||||
echo "Removing apt hold for Salt."
|
echo "Removing apt hold for Salt."
|
||||||
echo ""
|
echo ""
|
||||||
apt-mark unhold "salt-common"
|
apt-mark unhold "salt-common"
|
||||||
apt-mark unhold "salt-master"
|
apt-mark unhold "salt-master"
|
||||||
apt-mark unhold "salt-minion"
|
apt-mark unhold "salt-minion"
|
||||||
echo "Updating Salt packages and restarting services."
|
echo "Updating Salt packages and restarting services."
|
||||||
echo ""
|
echo ""
|
||||||
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
|
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
|
||||||
echo "Applying apt hold for Salt."
|
echo "Applying apt hold for Salt."
|
||||||
echo ""
|
echo ""
|
||||||
apt-mark hold "salt-common"
|
apt-mark hold "salt-common"
|
||||||
apt-mark hold "salt-master"
|
apt-mark hold "salt-master"
|
||||||
apt-mark hold "salt-minion"
|
apt-mark hold "salt-minion"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
verify_latest_update_script() {
|
verify_latest_update_script() {
|
||||||
# Check to see if the update scripts match. If not run the new one.
|
# Check to see if the update scripts match. If not run the new one.
|
||||||
CURRENTSOUP=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/soup | awk '{print $1}')
|
CURRENTSOUP=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/soup | awk '{print $1}')
|
||||||
GITSOUP=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/soup | awk '{print $1}')
|
GITSOUP=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/soup | awk '{print $1}')
|
||||||
CURRENTCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-common | awk '{print $1}')
|
CURRENTCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-common | awk '{print $1}')
|
||||||
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
|
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
|
||||||
CURRENTIMGCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common | awk '{print $1}')
|
CURRENTIMGCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common | awk '{print $1}')
|
||||||
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
|
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
|
||||||
|
|
||||||
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" ]]; then
|
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" ]]; then
|
||||||
echo "This version of the soup script is up to date. Proceeding."
|
echo "This version of the soup script is up to date. Proceeding."
|
||||||
else
|
else
|
||||||
echo "You are not running the latest soup version. Updating soup and its components. Might take multiple runs to complete"
|
echo "You are not running the latest soup version. Updating soup and its components. Might take multiple runs to complete"
|
||||||
cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||||
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||||
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||||
salt-call state.apply common queue=True
|
salt-call state.apply common queue=True
|
||||||
echo ""
|
echo ""
|
||||||
echo "soup has been updated. Please run soup again."
|
echo "soup has been updated. Please run soup again."
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
main () {
|
main () {
|
||||||
|
echo "### Preparing soup at `date` ###"
|
||||||
while getopts ":b" opt; do
|
while getopts ":b" opt; do
|
||||||
case "$opt" in
|
case "$opt" in
|
||||||
b ) # process option b
|
b ) # process option b
|
||||||
@@ -557,9 +568,10 @@ while getopts ":b" opt; do
|
|||||||
echo "Batch size must be a number greater than 0."
|
echo "Batch size must be a number greater than 0."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
\? ) echo "Usage: cmd [-b]"
|
\? )
|
||||||
;;
|
echo "Usage: cmd [-b]"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
@@ -573,6 +585,8 @@ check_airgap
|
|||||||
echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
|
echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
|
||||||
echo ""
|
echo ""
|
||||||
set_os
|
set_os
|
||||||
|
set_palette
|
||||||
|
check_elastic_license
|
||||||
echo ""
|
echo ""
|
||||||
if [ $is_airgap -eq 0 ]; then
|
if [ $is_airgap -eq 0 ]; then
|
||||||
# Let's mount the ISO since this is airgap
|
# Let's mount the ISO since this is airgap
|
||||||
@@ -599,7 +613,7 @@ fi
|
|||||||
|
|
||||||
echo "Let's see if we need to update Security Onion."
|
echo "Let's see if we need to update Security Onion."
|
||||||
upgrade_check
|
upgrade_check
|
||||||
space_check
|
upgrade_space
|
||||||
|
|
||||||
echo "Checking for Salt Master and Minion updates."
|
echo "Checking for Salt Master and Minion updates."
|
||||||
upgrade_check_salt
|
upgrade_check_salt
|
||||||
@@ -649,8 +663,7 @@ else
|
|||||||
echo ""
|
echo ""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Making pillar changes."
|
preupgrade_changes
|
||||||
pillar_changes
|
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
if [ $is_airgap -eq 0 ]; then
|
if [ $is_airgap -eq 0 ]; then
|
||||||
@@ -704,7 +717,7 @@ echo "Starting Salt Master service."
|
|||||||
systemctl start salt-master
|
systemctl start salt-master
|
||||||
echo "Running a highstate. This could take several minutes."
|
echo "Running a highstate. This could take several minutes."
|
||||||
salt-call state.highstate -l info queue=True
|
salt-call state.highstate -l info queue=True
|
||||||
playbook
|
postupgrade_changes
|
||||||
unmount_update
|
unmount_update
|
||||||
thehive_maint
|
thehive_maint
|
||||||
|
|
||||||
@@ -741,6 +754,7 @@ NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | wc -l)
|
|||||||
if [ $NUM_MINIONS -gt 1 ]; then
|
if [ $NUM_MINIONS -gt 1 ]; then
|
||||||
|
|
||||||
cat << EOF
|
cat << EOF
|
||||||
|
|
||||||
This appears to be a distributed deployment. Other nodes should update themselves at the next Salt highstate (typically within 15 minutes). Do not manually restart anything until you know that all the search/heavy nodes in your deployment are updated. This is especially important if you are using true clustering for Elasticsearch.
|
This appears to be a distributed deployment. Other nodes should update themselves at the next Salt highstate (typically within 15 minutes). Do not manually restart anything until you know that all the search/heavy nodes in your deployment are updated. This is especially important if you are using true clustering for Elasticsearch.
|
||||||
|
|
||||||
Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete.
|
Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete.
|
||||||
@@ -751,7 +765,23 @@ For more information, please see https://docs.securityonion.net/en/2.3/soup.html
|
|||||||
EOF
|
EOF
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
echo "### soup has been served at `date` ###"
|
||||||
}
|
}
|
||||||
|
|
||||||
main "$@" | tee /dev/fd/3
|
cat << EOF
|
||||||
|
|
||||||
|
SOUP - Security Onion UPdater
|
||||||
|
|
||||||
|
Please review the following for more information about the update process and recent updates:
|
||||||
|
https://docs.securityonion.net/soup
|
||||||
|
https://blog.securityonion.net
|
||||||
|
|
||||||
|
Please note that soup only updates Security Onion components and does NOT update the underlying operating system (OS). When you installed Security Onion, there was an option to automatically update the OS packages. If you did not enable this option, then you will want to ensure that the OS is fully updated before running soup.
|
||||||
|
|
||||||
|
Press Enter to continue or Ctrl-C to cancel.
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
read input
|
||||||
|
|
||||||
|
main "$@" | tee -a $SOUP_LOG
|
||||||
|
|||||||
@@ -4,12 +4,11 @@
|
|||||||
{%- if grains['role'] in ['so-node', 'so-heavynode'] %}
|
{%- if grains['role'] in ['so-node', 'so-heavynode'] %}
|
||||||
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||||
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port', '') -%}
|
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port', '') -%}
|
||||||
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
|
|
||||||
{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
|
{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
|
||||||
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('manager:mainip', '') -%}
|
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('manager:mainip', '') -%}
|
||||||
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port', '') -%}
|
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port', '') -%}
|
||||||
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('manager:log_size_limit', '') -%}
|
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
|
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
|
||||||
|
|
||||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||||
#
|
#
|
||||||
|
|||||||
@@ -1,86 +1,9 @@
|
|||||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
{% if sls in allowed_states %}
|
{% if sls in allowed_states %}
|
||||||
|
|
||||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
prune_images:
|
||||||
{% set MANAGER = salt['grains.get']('master') %}
|
cmd.run:
|
||||||
{% set OLDVERSIONS = ['2.0.0-rc.1','2.0.1-rc.1','2.0.2-rc.1','2.0.3-rc.1','2.1.0-rc.2','2.2.0-rc.3','2.3.0','2.3.1','2.3.2','2.3.10','2.3.20']%}
|
- name: so-docker-prune
|
||||||
|
|
||||||
{% for VERSION in OLDVERSIONS %}
|
|
||||||
remove_images_{{ VERSION }}:
|
|
||||||
docker_image.absent:
|
|
||||||
- force: True
|
|
||||||
- images:
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-acng:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive-cortex:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-curator:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-domainstats:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elastalert:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-filebeat:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-fleet:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-fleet-launcher:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-freqserver:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-grafana:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-idstools:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-influxdb:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-kibana:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-kratos:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-logstash:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-minio:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-mysql:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-nginx:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-playbook:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-redis:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-soc:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-soctopus:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-steno:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-frontend:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-manager:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-backend:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-filestream:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-telegraf:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive-es:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-wazuh:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-zeek:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-acng:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-thehive-cortex:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-curator:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-domainstats:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-elastalert:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-elasticsearch:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-filebeat:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-fleet:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-fleet-launcher:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-freqserver:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-grafana:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-idstools:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-influxdb:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-kibana:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-kratos:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-logstash:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-minio:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-mysql:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-nginx:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-pcaptools:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-playbook:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-redis:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-soc:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-soctopus:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-steno:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-strelka-frontend:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-strelka-manager:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-strelka-backend:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-strelka-filestream:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-suricata:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-telegraf:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-thehive:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-thehive-es:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-wazuh:{{ VERSION }}'
|
|
||||||
- '{{ MANAGER }}:5000/securityonion/so-zeek:{{ VERSION }}'
|
|
||||||
{% endfor %}
|
|
||||||
|
|
||||||
{% else %}
|
{% else %}
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,9 @@ from time import gmtime, strftime
|
|||||||
import requests,json
|
import requests,json
|
||||||
from elastalert.alerts import Alerter
|
from elastalert.alerts import Alerter
|
||||||
|
|
||||||
|
import urllib3
|
||||||
|
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||||
|
|
||||||
class PlaybookESAlerter(Alerter):
|
class PlaybookESAlerter(Alerter):
|
||||||
"""
|
"""
|
||||||
Use matched data to create alerts in elasticsearch
|
Use matched data to create alerts in elasticsearch
|
||||||
@@ -17,7 +20,7 @@ class PlaybookESAlerter(Alerter):
|
|||||||
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S", gmtime())
|
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S", gmtime())
|
||||||
headers = {"Content-Type": "application/json"}
|
headers = {"Content-Type": "application/json"}
|
||||||
payload = {"rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
|
payload = {"rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
|
||||||
url = f"http://{self.rule['elasticsearch_host']}/so-playbook-alerts-{today}/_doc/"
|
url = f"https://{self.rule['elasticsearch_host']}/so-playbook-alerts-{today}/_doc/"
|
||||||
requests.post(url, data=json.dumps(payload), headers=headers, verify=False)
|
requests.post(url, data=json.dumps(payload), headers=headers, verify=False)
|
||||||
|
|
||||||
def get_info(self):
|
def get_info(self):
|
||||||
|
|||||||
@@ -32,8 +32,6 @@
|
|||||||
{ "rename": { "field": "category", "target_field": "event.category", "ignore_failure": true, "ignore_missing": true } },
|
{ "rename": { "field": "category", "target_field": "event.category", "ignore_failure": true, "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_failure": true, "ignore_missing": true } },
|
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_failure": true, "ignore_missing": true } },
|
||||||
{ "lowercase": { "field": "event.dataset", "ignore_failure": true, "ignore_missing": true } },
|
{ "lowercase": { "field": "event.dataset", "ignore_failure": true, "ignore_missing": true } },
|
||||||
{ "convert": { "field": "destination.port", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
|
|
||||||
{ "convert": { "field": "source.port", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
|
|
||||||
{ "convert": { "field": "log.id.uid", "type": "string", "ignore_failure": true, "ignore_missing": true } },
|
{ "convert": { "field": "log.id.uid", "type": "string", "ignore_failure": true, "ignore_missing": true } },
|
||||||
{ "convert": { "field": "agent.id", "type": "string", "ignore_failure": true, "ignore_missing": true } },
|
{ "convert": { "field": "agent.id", "type": "string", "ignore_failure": true, "ignore_missing": true } },
|
||||||
{ "convert": { "field": "event.severity", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
|
{ "convert": { "field": "event.severity", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
|
||||||
|
|||||||
@@ -51,16 +51,29 @@
|
|||||||
"match_mapping_type": "string",
|
"match_mapping_type": "string",
|
||||||
"path_match": "*.ip",
|
"path_match": "*.ip",
|
||||||
"mapping": {
|
"mapping": {
|
||||||
"type": "ip"
|
"type": "ip",
|
||||||
|
"fields" : {
|
||||||
|
"keyword" : {
|
||||||
|
"ignore_above" : 45,
|
||||||
|
"type" : "keyword"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"port": {
|
"port": {
|
||||||
"match_mapping_type": "string",
|
|
||||||
"path_match": "*.port",
|
"path_match": "*.port",
|
||||||
"mapping": {
|
"mapping": {
|
||||||
"type": "integer"
|
"type": "integer",
|
||||||
|
"fields" : {
|
||||||
|
"keyword" : {
|
||||||
|
"ignore_above" : 6,
|
||||||
|
"type" : "keyword"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -26,15 +26,6 @@ iptables_fix_fwd:
|
|||||||
- position: 1
|
- position: 1
|
||||||
- target: DOCKER-USER
|
- target: DOCKER-USER
|
||||||
|
|
||||||
# Allow related/established sessions
|
|
||||||
iptables_allow_established:
|
|
||||||
iptables.append:
|
|
||||||
- table: filter
|
|
||||||
- chain: INPUT
|
|
||||||
- jump: ACCEPT
|
|
||||||
- match: conntrack
|
|
||||||
- ctstate: 'RELATED,ESTABLISHED'
|
|
||||||
|
|
||||||
# I like pings
|
# I like pings
|
||||||
iptables_allow_pings:
|
iptables_allow_pings:
|
||||||
iptables.append:
|
iptables.append:
|
||||||
@@ -77,17 +68,6 @@ enable_docker_user_fw_policy:
|
|||||||
- out-interface: docker0
|
- out-interface: docker0
|
||||||
- position: 1
|
- position: 1
|
||||||
|
|
||||||
enable_docker_user_established:
|
|
||||||
iptables.insert:
|
|
||||||
- table: filter
|
|
||||||
- chain: DOCKER-USER
|
|
||||||
- jump: ACCEPT
|
|
||||||
- in-interface: '!docker0'
|
|
||||||
- out-interface: docker0
|
|
||||||
- position: 1
|
|
||||||
- match: conntrack
|
|
||||||
- ctstate: 'RELATED,ESTABLISHED'
|
|
||||||
|
|
||||||
{% set count = namespace(value=0) %}
|
{% set count = namespace(value=0) %}
|
||||||
{% for chain, hg in assigned_hostgroups.chain.items() %}
|
{% for chain, hg in assigned_hostgroups.chain.items() %}
|
||||||
{% for hostgroup, portgroups in assigned_hostgroups.chain[chain].hostgroups.items() %}
|
{% for hostgroup, portgroups in assigned_hostgroups.chain[chain].hostgroups.items() %}
|
||||||
@@ -120,6 +100,27 @@ enable_docker_user_established:
|
|||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
|
# Allow related/established sessions
|
||||||
|
iptables_allow_established:
|
||||||
|
iptables.insert:
|
||||||
|
- table: filter
|
||||||
|
- chain: INPUT
|
||||||
|
- jump: ACCEPT
|
||||||
|
- position: 1
|
||||||
|
- match: conntrack
|
||||||
|
- ctstate: 'RELATED,ESTABLISHED'
|
||||||
|
|
||||||
|
enable_docker_user_established:
|
||||||
|
iptables.insert:
|
||||||
|
- table: filter
|
||||||
|
- chain: DOCKER-USER
|
||||||
|
- jump: ACCEPT
|
||||||
|
- in-interface: '!docker0'
|
||||||
|
- out-interface: docker0
|
||||||
|
- position: 1
|
||||||
|
- match: conntrack
|
||||||
|
- ctstate: 'RELATED,ESTABLISHED'
|
||||||
|
|
||||||
# Block icmp timestamp response
|
# Block icmp timestamp response
|
||||||
block_icmp_timestamp_reply:
|
block_icmp_timestamp_reply:
|
||||||
iptables.append:
|
iptables.append:
|
||||||
|
|||||||
@@ -19,13 +19,12 @@
|
|||||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||||
{% set MANAGER = salt['grains.get']('master') %}
|
{% set MANAGER = salt['grains.get']('master') %}
|
||||||
{% set ENGINE = salt['pillar.get']('global:mdengine', '') %}
|
{% set ENGINE = salt['pillar.get']('global:mdengine', '') %}
|
||||||
|
{% set proxy = salt['pillar.get']('manager:proxy') %}
|
||||||
|
|
||||||
|
include:
|
||||||
|
- idstools.sync_files
|
||||||
|
|
||||||
# IDSTools Setup
|
# IDSTools Setup
|
||||||
idstoolsdir:
|
|
||||||
file.directory:
|
|
||||||
- name: /opt/so/conf/idstools/etc
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
- makedirs: True
|
|
||||||
|
|
||||||
idstoolslogdir:
|
idstoolslogdir:
|
||||||
file.directory:
|
file.directory:
|
||||||
@@ -34,14 +33,6 @@ idstoolslogdir:
|
|||||||
- group: 939
|
- group: 939
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
|
|
||||||
idstoolsetcsync:
|
|
||||||
file.recurse:
|
|
||||||
- name: /opt/so/conf/idstools/etc
|
|
||||||
- source: salt://idstools/etc
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
- template: jinja
|
|
||||||
|
|
||||||
so-ruleupdatecron:
|
so-ruleupdatecron:
|
||||||
cron.present:
|
cron.present:
|
||||||
- name: /usr/sbin/so-rule-update > /opt/so/log/idstools/download.log 2>&1
|
- name: /usr/sbin/so-rule-update > /opt/so/log/idstools/download.log 2>&1
|
||||||
@@ -49,28 +40,17 @@ so-ruleupdatecron:
|
|||||||
- minute: '1'
|
- minute: '1'
|
||||||
- hour: '7'
|
- hour: '7'
|
||||||
|
|
||||||
rulesdir:
|
|
||||||
file.directory:
|
|
||||||
- name: /opt/so/rules/nids
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
- makedirs: True
|
|
||||||
|
|
||||||
# Don't show changes because all.rules can be large
|
|
||||||
synclocalnidsrules:
|
|
||||||
file.recurse:
|
|
||||||
- name: /opt/so/rules/nids/
|
|
||||||
- source: salt://idstools/
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
- show_changes: False
|
|
||||||
- include_pat: 'E@.rules'
|
|
||||||
|
|
||||||
so-idstools:
|
so-idstools:
|
||||||
docker_container.running:
|
docker_container.running:
|
||||||
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-idstools:{{ VERSION }}
|
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-idstools:{{ VERSION }}
|
||||||
- hostname: so-idstools
|
- hostname: so-idstools
|
||||||
- user: socore
|
- user: socore
|
||||||
|
{% if proxy %}
|
||||||
|
- environment:
|
||||||
|
- http_proxy={{ proxy }}
|
||||||
|
- https_proxy={{ proxy }}
|
||||||
|
- no_proxy={{ salt['pillar.get']('manager:no_proxy') }}
|
||||||
|
{% endif %}
|
||||||
- binds:
|
- binds:
|
||||||
- /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro
|
- /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro
|
||||||
- /opt/so/rules/nids:/opt/so/rules/nids:rw
|
- /opt/so/rules/nids:/opt/so/rules/nids:rw
|
||||||
|
|||||||
46
salt/idstools/sync_files.sls
Normal file
46
salt/idstools/sync_files.sls
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
idstoolsdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/idstools/etc
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
idstoolsetcsync:
|
||||||
|
file.recurse:
|
||||||
|
- name: /opt/so/conf/idstools/etc
|
||||||
|
- source: salt://idstools/etc
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- template: jinja
|
||||||
|
|
||||||
|
rulesdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/rules/nids
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
# Don't show changes because all.rules can be large
|
||||||
|
synclocalnidsrules:
|
||||||
|
file.recurse:
|
||||||
|
- name: /opt/so/rules/nids/
|
||||||
|
- source: salt://idstools/
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- show_changes: False
|
||||||
|
- include_pat: 'E@.rules'
|
||||||
@@ -23,4 +23,4 @@ wait_for_web_response "http://localhost:5601/app/kibana" "Elastic"
|
|||||||
THECOOKIE=$(curl -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
|
THECOOKIE=$(curl -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
|
||||||
|
|
||||||
# Load saved objects
|
# Load saved objects
|
||||||
curl -b "sid=$THECOOKIE" -L -X POST "localhost:5601/api/saved_objects/_import?overwrite=true" -H "kbn-xsrf: true" --form file=@/opt/so/conf/kibana/saved_objects.ndjson > /dev/null 2>&1
|
curl -b "sid=$THECOOKIE" -L -X POST "localhost:5601/api/saved_objects/_import?overwrite=true" -H "kbn-xsrf: true" --form file=@/opt/so/conf/kibana/saved_objects.ndjson >> /opt/so/log/kibana/misc.log
|
||||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -89,7 +89,7 @@ def run():
|
|||||||
|
|
||||||
# Update the Fleet host in the static pillar
|
# Update the Fleet host in the static pillar
|
||||||
for line in fileinput.input(STATICFILE, inplace=True):
|
for line in fileinput.input(STATICFILE, inplace=True):
|
||||||
line = re.sub(r'fleet_custom_hostname:.*\n', f"fleet_custom_hostname: {CUSTOMHOSTNAME}", line.rstrip())
|
line = re.sub(r'fleet_custom_hostname:.*$', f"fleet_custom_hostname: {CUSTOMHOSTNAME}", line.rstrip())
|
||||||
print(line)
|
print(line)
|
||||||
|
|
||||||
return {}
|
return {}
|
||||||
|
|||||||
@@ -1,52 +1,49 @@
|
|||||||
{
|
{
|
||||||
"title": "Security Onion 2.3.30 is here!",
|
"title": "Security Onion 2.3.40 is here!",
|
||||||
"changes": [
|
"changes": [
|
||||||
{ "summary": "Zeek is now at version 3.0.13." },
|
{ "summary": "FEATURE: Add option for HTTP Method Specification/POST to Hunt/Alerts Actions <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/2904\">#2904</a>" },
|
||||||
{ "summary": "CyberChef is now at version 9.27.2." },
|
{ "summary": "FEATURE: Add option to configure proxy for various tools used during setup + persist the proxy configuration <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/529\">#529</a>" },
|
||||||
{ "summary": "Elastic components are now at version 7.10.2. This is the last version that uses the Apache license." },
|
{ "summary": "FEATURE: Alerts/Hunt - Provide method for base64-encoding pivot value <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/1749\">#1749</a>" },
|
||||||
{ "summary": "Suricata is now at version 6.0.1." },
|
{ "summary": "FEATURE: Allow users to customize links in SOC <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/1248\">#1248</a>" },
|
||||||
{ "summary": "Salt is now at version 3002.5." },
|
{ "summary": "FEATURE: Display user who requested PCAP in SOC <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/2775\">#2775</a>" },
|
||||||
{ "summary": "Suricata metadata parsing is now vastly improved." },
|
{ "summary": "FEATURE: Make SOC browser app connection timeouts adjustable <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/2408\">#2408</a>" },
|
||||||
{ "summary": "If you choose Suricata for metadata parsing, it will now extract files from the network and send them to Strelka. You can add additional mime types <a href='https://github.com/Security-Onion-Solutions/securityonion/blob/dev/salt/idstools/sorules/extraction.rules'>here</a>." },
|
{ "summary": "FEATURE: Move to FleetDM <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3483\">#3483</a>" },
|
||||||
{ "summary": "It is now possible to filter Suricata events from being written to the logs. This is a new Suricata 6 feature. We have included some examples <a href='https://github.com/Security-Onion-Solutions/securityonion/blob/dev/salt/idstools/sorules/filters.rules'>here</a>." },
|
{ "summary": "FEATURE: Reduce field cache expiration from 1d to 5m, and expose value as a salt pillar <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3537\">#3537</a>" },
|
||||||
{ "summary": "The Kratos docker container will now perform DNS lookups locally before reaching out to the network DNS provider." },
|
{ "summary": "FEATURE: Refactor docker_clean salt state to use loop w/ inspection instead of hardcoded image list <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3113\">#3113</a>" },
|
||||||
{ "summary": "Network configuration is now more compatible with manually configured OpenVPN or Wireguard VPN interfaces." },
|
{ "summary": "FEATURE: Run so-ssh-harden during setup <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/1932\">#1932</a>" },
|
||||||
{ "summary": "<code>so-sensor-clean</code> will no longer spawn multiple instances." },
|
{ "summary": "FEATURE: SOC should only display links to tools that are enabled <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/1643\">#1643</a>" },
|
||||||
{ "summary": "Suricata eve.json logs will now be cleaned up after 7 days. This can be changed via the pillar setting." },
|
{ "summary": "FEATURE: Update Sigmac Osquery Field Mappings <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3137\">#3137</a>" },
|
||||||
{ "summary": "Fixed a security issue where the backup directory had improper file permissions." },
|
{ "summary": "FEATURE: User must accept the Elastic licence during setup <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3233\">#3233</a>" },
|
||||||
{ "summary": "The automated backup script on the manager now backs up all keys along with the salt configurations. Backup retention is now set to 7 days." },
|
{ "summary": "FEATURE: soup should output more guidance for distributed deployments at the end <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3340\">#3340</a>" },
|
||||||
{ "summary": "Strelka logs are now being rotated properly." },
|
{ "summary": "FEATURE: soup should provide some initial information and then prompt the user to continue <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3486\">#3486</a>" },
|
||||||
{ "summary": "Elastalert can now be customized via a pillar." },
|
{ "summary": "FIX: Add cronjob for so-suricata-eve-clean script <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3515\">#3515</a>" },
|
||||||
{ "summary": "Introduced new script <code>so-monitor-add</code> that allows the user to easily add interfaces to the bond for monitoring." },
|
{ "summary": "FIX: Change Elasticsearch heap formula <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/1686\">#1686</a>" },
|
||||||
{ "summary": "Setup now validates all user input fields to give up-front feedback if an entered value is invalid." },
|
{ "summary": "FIX: Create a post install version loop in soup <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3102\">#3102</a>" },
|
||||||
{ "summary": "There have been several changes to improve install reliability. Many install steps have had their validation processes reworked to ensure that required tasks have been completed before moving on to the next step of the install." },
|
{ "summary": "FIX: Custom Kibana settings are not being applied properly on upgrades <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3254\">#3254</a>" },
|
||||||
{ "summary": "Users are now warned if they try to set <i>securityonion</i> as their hostname." },
|
{ "summary": "FIX: Hunt query issues with quotes <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3320\">#3320</a>" },
|
||||||
{ "summary": "The ISO should now identify xvda and nvme devices as install targets." },
|
{ "summary": "FIX: IP Addresses don't work with .security <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3327\">#3327</a>" },
|
||||||
{ "summary": "At the end of the first stage of the ISO setup, the ISO device should properly unmount and eject." },
|
{ "summary": "FIX: Improve DHCP leases query in Hunt <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3395\">#3395</a>" },
|
||||||
{ "summary": "The text selection of choosing Suricata vs Zeek for metadata is now more descriptive." },
|
{ "summary": "FIX: Improve Setup verbiage <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3422\">#3422</a>" },
|
||||||
{ "summary": "The logic for properly setting the <code>LOG_SIZE_LIMIT</code> variable has been improved." },
|
{ "summary": "FIX: Improve Suricata DHCP logging and parsing <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3397\">#3397</a>" },
|
||||||
{ "summary": "When installing on Ubuntu, Setup will now wait for cloud init to complete before trying to start the install of packages." },
|
{ "summary": "FIX: Keep RELATED,ESTABLISHED rules at the top of iptables chains <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3288\">#3288</a>" },
|
||||||
{ "summary": "The firewall state runs considerably faster now." },
|
{ "summary": "FIX: Populate http.status_message field <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3408\">#3408</a>" },
|
||||||
{ "summary": "ICMP timestamps are now disabled." },
|
{ "summary": "FIX: Remove 'types removal' deprecation messages from elastic log. <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3345\">#3345</a>" },
|
||||||
{ "summary": "Copyright dates on all Security Onion specific files have been updated." },
|
{ "summary": "FIX: Reword + fix formatting on ES data storage prompt <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3205\">#3205</a>" },
|
||||||
{ "summary": "<code>so-tcpreplay</code> (and indirectly <code>so-test</code>) should now work properly." },
|
{ "summary": "FIX: SMTP shoud read SNMP on Kibana SNMP view <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3413\">#3413</a>" },
|
||||||
{ "summary": "The Zeek packet loss script is now more accurate." },
|
{ "summary": "FIX: Sensors can temporarily show offline while processing large PCAP jobs <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3279\">#3279</a>" },
|
||||||
{ "summary": "Grafana now includes an estimated EPS graph for events ingested on the manager." },
|
{ "summary": "FIX: Soup should log to the screen as well as to a file <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3467\">#3467</a>" },
|
||||||
{ "summary": "Updated Elastalert to release 0.2.4-alt2 based on the <a href='https://github.com/jertel/elastalert'>jertel/elastalert</a> alt branch." },
|
{ "summary": "FIX: Strelka port 57314 not immediately relinquished upon restart <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3457\">#3457</a>" },
|
||||||
{ "summary": "Pivots from Alerts/Hunts to action links will properly URI encode values." },
|
{ "summary": "FIX: Switch SOC to pull from fieldcaps API due to field caching changes in Kibana 7.11 <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3502\">#3502</a>" },
|
||||||
{ "summary": "Hunt timeline graph will properly scale the data point interval based on the search date range." },
|
{ "summary": "FIX: Syntax error in /etc/sysctl.d/99-reserved-ports.conf <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3308\">#3308</a>" },
|
||||||
{ "summary": "Grid interface will properly show <i>Search</i> as the node type instead of <i>so-node</i>." },
|
{ "summary": "FIX: Telegraf hardcoded to use https and is not aware of elasticsearch features <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/2061\">#2061</a>" },
|
||||||
{ "summary": "Import node now supports airgap environments." },
|
{ "summary": "FIX: Zeek Index Close and Delete Count for curator <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3274\">#3274</a>" },
|
||||||
{ "summary": "The so-mysql container will now show <i>healthy</i> when viewing the docker ps output." },
|
{ "summary": "FIX: so-cortex-user-add and so-cortex-user-enable use wrong pillar value for api key <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3388\">#3388</a>" },
|
||||||
{ "summary": "The Soctopus configuration now uses private IPs instead of public IPs, allowing network communications to succeed within the grid." },
|
{ "summary": "FIX: so-rule does not completely apply change <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3289\">#3289</a>" },
|
||||||
{ "summary": "The Correlate action in Hunt now groups the OR filters together to ensure subsequent user-added filters are correctly ANDed to the entire OR group." },
|
{ "summary": "FIX: soup should recheck disk space after it tries to clean up. <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3235\">#3235</a>" },
|
||||||
{ "summary": "Add support to <code>so-firewall</code> script to display existing port groups and host groups." },
|
{ "summary": "UPGRADE: Elastic 7.11.2 <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3389\">#3389</a>" },
|
||||||
{ "summary": "TheHive initialization during Security Onion setup will now properly check for a running ES instance and will retry connectivity checks to TheHive before proceeding." },
|
{ "summary": "UPGRADE: Suricata 6.0.2 <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3217\">#3217</a>" },
|
||||||
{ "summary": "Changes to the <i>.security</i> analyzer yields more accurate query results when using Playbook." },
|
{ "summary": "UPGRADE: Zeek 4 <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3216\">#3216</a>" },
|
||||||
{ "summary": "Several Hunt queries have been updated." },
|
{ "summary": "UPGRADE: Zeek container to use Python 3 <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/1113\">#1113</a>" },
|
||||||
{ "summary": "The pfSense firewall log parser has been updated to improve compatibility." },
|
{ "summary": "UPGRADE: docker-ce to latest <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3493\">#3493</a>" }
|
||||||
{ "summary": "Kibana dashboard hyperlinks have been updated for faster navigation." },
|
|
||||||
{ "summary": "Added a new <code>so-rule</code> script to make it easier to disable, enable, and modify SIDs." },
|
|
||||||
{ "summary": "ISO now gives the option to just configure the network during setup." }
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -1,17 +1,23 @@
|
|||||||
{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
|
{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
|
||||||
{%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') %}
|
{%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') %}
|
||||||
{%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %}
|
{%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %}
|
||||||
|
{%- set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
|
||||||
|
{%- set THEHIVE = salt['pillar.get']('manager:thehive', '0') %}
|
||||||
|
{%- set OSQUERY = salt['pillar.get']('manager:osquery', '0') %}
|
||||||
|
{%- set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
|
||||||
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
||||||
{%- set API_TIMEOUT = salt['pillar.get']('sensoroni:api_timeout_ms', 0) %}
|
{%- set API_TIMEOUT = salt['pillar.get']('sensoroni:api_timeout_ms', 0) %}
|
||||||
{%- set WEBSOCKET_TIMEOUT = salt['pillar.get']('sensoroni:websocket_timeout_ms', 0) %}
|
{%- set WEBSOCKET_TIMEOUT = salt['pillar.get']('sensoroni:websocket_timeout_ms', 0) %}
|
||||||
{%- set TIP_TIMEOUT = salt['pillar.get']('sensoroni:tip_timeout_ms', 0) %}
|
{%- set TIP_TIMEOUT = salt['pillar.get']('sensoroni:tip_timeout_ms', 0) %}
|
||||||
{%- set CACHE_EXPIRATION = salt['pillar.get']('sensoroni:cache_expiration_ms', 0) %}
|
{%- set CACHE_EXPIRATION = salt['pillar.get']('sensoroni:cache_expiration_ms', 0) %}
|
||||||
|
{%- set ES_FIELDCAPS_CACHE = salt['pillar.get']('sensoroni:es_fieldcaps_cache_ms', '300000') %}
|
||||||
{%- import_json "soc/files/soc/alerts.queries.json" as alerts_queries %}
|
{%- import_json "soc/files/soc/alerts.queries.json" as alerts_queries %}
|
||||||
{%- import_json "soc/files/soc/alerts.actions.json" as alerts_actions %}
|
{%- import_json "soc/files/soc/alerts.actions.json" as alerts_actions %}
|
||||||
{%- import_json "soc/files/soc/alerts.eventfields.json" as alerts_eventfields %}
|
{%- import_json "soc/files/soc/alerts.eventfields.json" as alerts_eventfields %}
|
||||||
{%- import_json "soc/files/soc/hunt.queries.json" as hunt_queries %}
|
{%- import_json "soc/files/soc/hunt.queries.json" as hunt_queries %}
|
||||||
{%- import_json "soc/files/soc/hunt.actions.json" as hunt_actions %}
|
{%- import_json "soc/files/soc/hunt.actions.json" as hunt_actions %}
|
||||||
{%- import_json "soc/files/soc/hunt.eventfields.json" as hunt_eventfields %}
|
{%- import_json "soc/files/soc/hunt.eventfields.json" as hunt_eventfields %}
|
||||||
|
{%- import_json "soc/files/soc/tools.json" as tools %}
|
||||||
{%- set DNET = salt['pillar.get']('global:dockernet', '172.17.0.0') %}
|
{%- set DNET = salt['pillar.get']('global:dockernet', '172.17.0.0') %}
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -44,6 +50,7 @@
|
|||||||
{%- endif %}
|
{%- endif %}
|
||||||
"username": "",
|
"username": "",
|
||||||
"password": "",
|
"password": "",
|
||||||
|
"cacheMs": {{ ES_FIELDCAPS_CACHE }},
|
||||||
"verifyCert": false
|
"verifyCert": false
|
||||||
},
|
},
|
||||||
"sostatus": {
|
"sostatus": {
|
||||||
@@ -74,6 +81,22 @@
|
|||||||
"webSocketTimeoutMs": {{ WEBSOCKET_TIMEOUT }},
|
"webSocketTimeoutMs": {{ WEBSOCKET_TIMEOUT }},
|
||||||
"tipTimeoutMs": {{ TIP_TIMEOUT }},
|
"tipTimeoutMs": {{ TIP_TIMEOUT }},
|
||||||
"cacheExpirationMs": {{ CACHE_EXPIRATION }},
|
"cacheExpirationMs": {{ CACHE_EXPIRATION }},
|
||||||
|
"inactiveTools": [
|
||||||
|
{%- if PLAYBOOK == 0 %}
|
||||||
|
"toolPlaybook",
|
||||||
|
{%- endif %}
|
||||||
|
{%- if THEHIVE == 0 %}
|
||||||
|
"toolTheHive",
|
||||||
|
{%- endif %}
|
||||||
|
{%- if OSQUERY == 0 %}
|
||||||
|
"toolFleet",
|
||||||
|
{%- endif %}
|
||||||
|
{%- if GRAFANA == 0 %}
|
||||||
|
"toolGrafana",
|
||||||
|
{%- endif %}
|
||||||
|
"toolUnused"
|
||||||
|
],
|
||||||
|
"tools": {{ tools | json }},
|
||||||
"hunt": {
|
"hunt": {
|
||||||
"advanced": true,
|
"advanced": true,
|
||||||
"groupItemsPerPage": 10,
|
"groupItemsPerPage": 10,
|
||||||
|
|||||||
9
salt/soc/files/soc/tools.json
Normal file
9
salt/soc/files/soc/tools.json
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
[
|
||||||
|
{ "name": "toolKibana", "description": "toolKibanaHelp", "icon": "fa-external-link-alt", "target": "so-kibana", "link": "/kibana/" },
|
||||||
|
{ "name": "toolGrafana", "description": "toolGrafanaHelp", "icon": "fa-external-link-alt", "target": "so-grafana", "link": "/grafana/d/so_overview" },
|
||||||
|
{ "name": "toolCyberchef", "description": "toolCyberchefHelp", "icon": "fa-external-link-alt", "target": "so-cyberchef", "link": "/cyberchef/" },
|
||||||
|
{ "name": "toolPlaybook", "description": "toolPlaybookHelp", "icon": "fa-external-link-alt", "target": "so-playbook", "link": "/playbook/projects/detection-playbooks/issues/" },
|
||||||
|
{ "name": "toolFleet", "description": "toolFleetHelp", "icon": "fa-external-link-alt", "target": "so-fleet", "link": "/fleet/" },
|
||||||
|
{ "name": "toolTheHive", "description": "toolTheHiveHelp", "icon": "fa-external-link-alt", "target": "so-thehive", "link": "/thehive/" },
|
||||||
|
{ "name": "toolNavigator", "description": "toolNavigatorHelp", "icon": "fa-external-link-alt", "target": "so-navigator", "link": "/navigator/" }
|
||||||
|
]
|
||||||
@@ -19,7 +19,8 @@ files:
|
|||||||
- '/nsm/strelka/unprocessed/*'
|
- '/nsm/strelka/unprocessed/*'
|
||||||
delete: false
|
delete: false
|
||||||
gatekeeper: true
|
gatekeeper: true
|
||||||
|
processed: '/nsm/strelka/processed'
|
||||||
response:
|
response:
|
||||||
report: 5s
|
report: 5s
|
||||||
delta: 5s
|
delta: 5s
|
||||||
staging: '/nsm/strelka/processed'
|
staging: '/nsm/strelka/staging'
|
||||||
|
|||||||
@@ -86,6 +86,13 @@ strelkaprocessed:
|
|||||||
- group: 939
|
- group: 939
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
|
|
||||||
|
strelkastaging:
|
||||||
|
file.directory:
|
||||||
|
- name: /nsm/strelka/staging
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
strelkaunprocessed:
|
strelkaunprocessed:
|
||||||
file.directory:
|
file.directory:
|
||||||
- name: /nsm/strelka/unprocessed
|
- name: /nsm/strelka/unprocessed
|
||||||
@@ -96,7 +103,7 @@ strelkaunprocessed:
|
|||||||
# Check to see if Strelka frontend port is available
|
# Check to see if Strelka frontend port is available
|
||||||
strelkaportavailable:
|
strelkaportavailable:
|
||||||
cmd.run:
|
cmd.run:
|
||||||
- name: netstat -utanp | grep ":57314" | grep -qv docker && PROCESS=$(netstat -utanp | grep ":57314" | uniq) && echo "Another process ($PROCESS) appears to be using port 57314. Please terminate this process, or reboot to ensure a clean state so that Strelka can start properly." && exit 1 || exit 0
|
- name: netstat -utanp | grep ":57314" | grep -qvE 'docker|TIME_WAIT' && PROCESS=$(netstat -utanp | grep ":57314" | uniq) && echo "Another process ($PROCESS) appears to be using port 57314. Please terminate this process, or reboot to ensure a clean state so that Strelka can start properly." && exit 1 || exit 0
|
||||||
|
|
||||||
strelka_coordinator:
|
strelka_coordinator:
|
||||||
docker_container.running:
|
docker_container.running:
|
||||||
@@ -213,4 +220,4 @@ strelka_zeek_extracted_sync:
|
|||||||
test.fail_without_changes:
|
test.fail_without_changes:
|
||||||
- name: {{sls}}_state_not_allowed
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
@@ -179,6 +179,26 @@ disable_so-suricata_so-status.conf:
|
|||||||
- month: '*'
|
- month: '*'
|
||||||
- dayweek: '*'
|
- dayweek: '*'
|
||||||
|
|
||||||
|
so-suricata-eve-clean:
|
||||||
|
file.managed:
|
||||||
|
- name: /usr/sbin/so-suricata-eve-clean
|
||||||
|
- user: root
|
||||||
|
- group: root
|
||||||
|
- mode: 755
|
||||||
|
- template: jinja
|
||||||
|
- source: salt://suricata/cron/so-suricata-eve-clean
|
||||||
|
|
||||||
|
# Add eve clean cron
|
||||||
|
clean_suricata_eve_files:
|
||||||
|
cron.present:
|
||||||
|
- name: /usr/sbin/so-suricata-eve-clean > /dev/null 2>&1
|
||||||
|
- user: root
|
||||||
|
- minute: '*/5'
|
||||||
|
- hour: '*'
|
||||||
|
- daymonth: '*'
|
||||||
|
- month: '*'
|
||||||
|
- dayweek: '*'
|
||||||
|
|
||||||
{% else %}
|
{% else %}
|
||||||
|
|
||||||
{{sls}}_state_not_allowed:
|
{{sls}}_state_not_allowed:
|
||||||
|
|||||||
@@ -618,11 +618,8 @@
|
|||||||
# # Read stats from one or more Elasticsearch servers or clusters
|
# # Read stats from one or more Elasticsearch servers or clusters
|
||||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
|
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
|
||||||
[[inputs.elasticsearch]]
|
[[inputs.elasticsearch]]
|
||||||
|
|
||||||
# ## specify a list of one or more Elasticsearch servers
|
|
||||||
# # you can add username and password to your url to use basic authentication:
|
|
||||||
# # servers = ["http://user:pass@localhost:9200"]
|
|
||||||
servers = ["https://{{ MANAGER }}:9200"]
|
servers = ["https://{{ MANAGER }}:9200"]
|
||||||
|
insecure_skip_verify = true
|
||||||
{% elif grains['role'] in ['so-node', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %}
|
{% elif grains['role'] in ['so-node', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %}
|
||||||
[[inputs.elasticsearch]]
|
[[inputs.elasticsearch]]
|
||||||
servers = ["https://{{ NODEIP }}:9200"]
|
servers = ["https://{{ NODEIP }}:9200"]
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
{% set proxy = salt['pillar.get']('manager:proxy') -%}
|
||||||
[main]
|
[main]
|
||||||
cachedir=/var/cache/yum/$basearch/$releasever
|
cachedir=/var/cache/yum/$basearch/$releasever
|
||||||
keepcache=0
|
keepcache=0
|
||||||
@@ -11,7 +12,8 @@ installonly_limit={{ salt['pillar.get']('yum:config:installonly_limit', 2) }}
|
|||||||
bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum
|
bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum
|
||||||
distroverpkg=centos-release
|
distroverpkg=centos-release
|
||||||
clean_requirements_on_remove=1
|
clean_requirements_on_remove=1
|
||||||
|
{% if (grains['role'] not in ['so-eval','so-managersearch', 'so-manager', 'so-standalone']) and salt['pillar.get']('global:managerupdate', '0') -%}
|
||||||
{% if (grains['role'] not in ['so-eval','so-managersearch', 'so-manager', 'so-standalone']) and salt['pillar.get']('global:managerupdate', '0') %}
|
|
||||||
proxy=http://{{ salt['pillar.get']('yum:config:proxy', salt['config.get']('master')) }}:3142
|
proxy=http://{{ salt['pillar.get']('yum:config:proxy', salt['config.get']('master')) }}:3142
|
||||||
|
{% elif proxy -%}
|
||||||
|
proxy={{ proxy }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
78
setup/automation/standalone-net-centos-proxy
Normal file
78
setup/automation/standalone-net-centos-proxy
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
TESTING=true
|
||||||
|
|
||||||
|
# address_type=DHCP
|
||||||
|
ADMINUSER=onionuser
|
||||||
|
ADMINPASS1=onionuser
|
||||||
|
ADMINPASS2=onionuser
|
||||||
|
ALLOW_CIDR=0.0.0.0/0
|
||||||
|
ALLOW_ROLE=a
|
||||||
|
BASICZEEK=2
|
||||||
|
BASICSURI=2
|
||||||
|
# BLOGS=
|
||||||
|
BNICS=eth1
|
||||||
|
ZEEKVERSION=ZEEK
|
||||||
|
# CURCLOSEDAYS=
|
||||||
|
# EVALADVANCED=BASIC
|
||||||
|
GRAFANA=1
|
||||||
|
# HELIXAPIKEY=
|
||||||
|
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
|
||||||
|
HNSENSOR=inherit
|
||||||
|
HOSTNAME=standalone
|
||||||
|
install_type=STANDALONE
|
||||||
|
# LSINPUTBATCHCOUNT=
|
||||||
|
# LSINPUTTHREADS=
|
||||||
|
# LSPIPELINEBATCH=
|
||||||
|
# LSPIPELINEWORKERS=
|
||||||
|
MANAGERADV=BASIC
|
||||||
|
MANAGERUPDATES=1
|
||||||
|
# MDNS=
|
||||||
|
# MGATEWAY=
|
||||||
|
# MIP=
|
||||||
|
# MMASK=
|
||||||
|
MNIC=eth0
|
||||||
|
# MSEARCH=
|
||||||
|
# MSRV=
|
||||||
|
# MTU=
|
||||||
|
NIDS=Suricata
|
||||||
|
# NODE_ES_HEAP_SIZE=
|
||||||
|
# NODE_LS_HEAP_SIZE=
|
||||||
|
NODESETUP=NODEBASIC
|
||||||
|
NSMSETUP=BASIC
|
||||||
|
NODEUPDATES=MANAGER
|
||||||
|
# OINKCODE=
|
||||||
|
OSQUERY=1
|
||||||
|
# PATCHSCHEDULEDAYS=
|
||||||
|
# PATCHSCHEDULEHOURS=
|
||||||
|
PATCHSCHEDULENAME=auto
|
||||||
|
PLAYBOOK=1
|
||||||
|
so_proxy=http://onionuser:0n10nus3r@10.66.166.30:3128
|
||||||
|
# REDIRECTHOST=
|
||||||
|
REDIRECTINFO=IP
|
||||||
|
RULESETUP=ETOPEN
|
||||||
|
# SHARDCOUNT=
|
||||||
|
# SKIP_REBOOT=
|
||||||
|
SOREMOTEPASS1=onionuser
|
||||||
|
SOREMOTEPASS2=onionuser
|
||||||
|
STRELKA=1
|
||||||
|
THEHIVE=1
|
||||||
|
WAZUH=1
|
||||||
|
WEBUSER=onionuser@somewhere.invalid
|
||||||
|
WEBPASSWD1=0n10nus3r
|
||||||
|
WEBPASSWD2=0n10nus3r
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
[Service]
|
|
||||||
ExecStart=/usr/bin/dockerd /usr/bin/dockerd -H fd:// --registry-mirror "$proxy_addr"
|
|
||||||
@@ -535,6 +535,56 @@ collect_patch_schedule_name_import() {
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
collect_proxy() {
|
||||||
|
[[ -n $TESTING ]] && return
|
||||||
|
collect_proxy_details
|
||||||
|
while ! proxy_validate; do
|
||||||
|
if whiptail_invalid_proxy; then
|
||||||
|
collect_proxy_details no_ask
|
||||||
|
else
|
||||||
|
so_proxy=""
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
collect_proxy_details() {
|
||||||
|
local ask=${1:-true}
|
||||||
|
local use_proxy
|
||||||
|
if [[ $ask != true ]]; then
|
||||||
|
use_proxy=0
|
||||||
|
else
|
||||||
|
whiptail_proxy_ask
|
||||||
|
use_proxy=$?
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $use_proxy == 0 ]]; then
|
||||||
|
whiptail_proxy_addr "$proxy_addr"
|
||||||
|
|
||||||
|
while ! valid_proxy "$proxy_addr"; do
|
||||||
|
whiptail_invalid_input
|
||||||
|
whiptail_proxy_addr "$proxy_addr"
|
||||||
|
done
|
||||||
|
|
||||||
|
if whiptail_proxy_auth_ask; then
|
||||||
|
whiptail_proxy_auth_user "$proxy_user"
|
||||||
|
whiptail_proxy_auth_pass "$proxy_pass"
|
||||||
|
|
||||||
|
local url_prefixes=( 'http://' 'https://' )
|
||||||
|
for prefix in "${url_prefixes[@]}"; do
|
||||||
|
if echo "$proxy_addr" | grep -q "$prefix"; then
|
||||||
|
local proxy=${proxy_addr#"$prefix"}
|
||||||
|
so_proxy="${prefix}${proxy_user}:${proxy_pass}@${proxy}"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
so_proxy="$proxy_addr"
|
||||||
|
fi
|
||||||
|
export proxy
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
collect_redirect_host() {
|
collect_redirect_host() {
|
||||||
whiptail_set_redirect_host "$HOSTNAME"
|
whiptail_set_redirect_host "$HOSTNAME"
|
||||||
|
|
||||||
@@ -691,10 +741,10 @@ check_requirements() {
|
|||||||
else
|
else
|
||||||
req_storage=100
|
req_storage=100
|
||||||
fi
|
fi
|
||||||
if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then
|
if [[ $free_space_root -lt $req_storage ]]; then
|
||||||
whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB"
|
whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB"
|
||||||
fi
|
fi
|
||||||
if (( $(echo "$free_space_nsm < $req_storage" | bc -l) )); then
|
if [[ $free_space_nsm -lt $req_storage ]]; then
|
||||||
whiptail_storage_requirements "/nsm" "${free_space_nsm} GB" "${req_storage} GB"
|
whiptail_storage_requirements "/nsm" "${free_space_nsm} GB" "${req_storage} GB"
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
@@ -703,7 +753,7 @@ check_requirements() {
|
|||||||
else
|
else
|
||||||
req_storage=200
|
req_storage=200
|
||||||
fi
|
fi
|
||||||
if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then
|
if [[ $free_space_root -lt $req_storage ]]; then
|
||||||
whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB"
|
whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
@@ -743,12 +793,14 @@ check_sos_appliance() {
|
|||||||
compare_main_nic_ip() {
|
compare_main_nic_ip() {
|
||||||
if ! [[ $MNIC =~ ^(tun|wg|vpn).*$ ]]; then
|
if ! [[ $MNIC =~ ^(tun|wg|vpn).*$ ]]; then
|
||||||
if [[ "$MAINIP" != "$MNIC_IP" ]]; then
|
if [[ "$MAINIP" != "$MNIC_IP" ]]; then
|
||||||
|
error "[ERROR] Main gateway ($MAINIP) does not match ip address of managament NIC ($MNIC_IP)."
|
||||||
|
|
||||||
read -r -d '' message <<- EOM
|
read -r -d '' message <<- EOM
|
||||||
The IP being routed by Linux is not the IP address assigned to the management interface ($MNIC).
|
The IP being routed by Linux is not the IP address assigned to the management interface ($MNIC).
|
||||||
|
|
||||||
This is not a supported configuration, please remediate and rerun setup.
|
This is not a supported configuration, please remediate and rerun setup.
|
||||||
EOM
|
EOM
|
||||||
whiptail --title "Security Onion Setup" --msgbox "$message" 10 75
|
[[ -n $TESTING ]] || whiptail --title "Security Onion Setup" --msgbox "$message" 10 75
|
||||||
kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1
|
kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
@@ -897,7 +949,7 @@ create_repo() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
detect_cloud() {
|
detect_cloud() {
|
||||||
echo "Testing if setup is running on a cloud instance..." >> "$setup_log" 2>&1
|
echo "Testing if setup is running on a cloud instance..." | tee -a "$setup_log"
|
||||||
if ( curl --fail -s -m 5 http://169.254.169.254/latest/meta-data/instance-id > /dev/null ) || ( dmidecode -s bios-vendor | grep -q Google > /dev/null); then export is_cloud="true"; fi
|
if ( curl --fail -s -m 5 http://169.254.169.254/latest/meta-data/instance-id > /dev/null ) || ( dmidecode -s bios-vendor | grep -q Google > /dev/null); then export is_cloud="true"; fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -939,36 +991,29 @@ detect_os() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
installer_prereq_packages() {
|
installer_progress_loop() {
|
||||||
|
local i=0
|
||||||
|
while true; do
|
||||||
|
[[ $i -lt 98 ]] && ((i++))
|
||||||
|
set_progress_str "$i" 'Checking that all required packages are installed and enabled...' nolog
|
||||||
|
[[ $i -gt 0 ]] && sleep 5s
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
installer_prereq_packages() {
|
||||||
if [ "$OS" == centos ]; then
|
if [ "$OS" == centos ]; then
|
||||||
# Print message to stdout so the user knows setup is doing something
|
|
||||||
echo "Installing required packages to run installer..."
|
|
||||||
# Install bind-utils so the host command exists
|
|
||||||
if [[ ! $is_iso ]]; then
|
if [[ ! $is_iso ]]; then
|
||||||
if ! command -v host > /dev/null 2>&1; then
|
if ! yum versionlock > /dev/null 2>&1; then
|
||||||
yum -y install bind-utils >> "$setup_log" 2>&1
|
yum -y install yum-plugin-versionlock >> "$setup_log" 2>&1
|
||||||
fi
|
fi
|
||||||
if ! command -v nmcli > /dev/null 2>&1; then
|
if ! command -v nmcli > /dev/null 2>&1; then
|
||||||
{
|
yum -y install NetworkManager >> "$setup_log" 2>&1
|
||||||
yum -y install NetworkManager;
|
fi
|
||||||
systemctl enable NetworkManager;
|
fi
|
||||||
systemctl start NetworkManager;
|
logCmd "systemctl enable NetworkManager"
|
||||||
} >> "$setup_log" 2<&1
|
logCmd "systemctl start NetworkManager"
|
||||||
fi
|
|
||||||
if ! command -v bc > /dev/null 2>&1; then
|
|
||||||
yum -y install bc >> "$setup_log" 2>&1
|
|
||||||
fi
|
|
||||||
if ! yum versionlock > /dev/null 2>&1; then
|
|
||||||
yum -y install yum-plugin-versionlock >> "$setup_log" 2>&1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
logCmd "systemctl enable NetworkManager"
|
|
||||||
logCmd "systemctl start NetworkManager"
|
|
||||||
fi
|
|
||||||
elif [ "$OS" == ubuntu ]; then
|
elif [ "$OS" == ubuntu ]; then
|
||||||
# Print message to stdout so the user knows setup is doing something
|
# Print message to stdout so the user knows setup is doing something
|
||||||
echo "Installing required packages to run installer..."
|
|
||||||
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1
|
||||||
# Install network manager so we can do interface stuff
|
# Install network manager so we can do interface stuff
|
||||||
if ! command -v nmcli > /dev/null 2>&1; then
|
if ! command -v nmcli > /dev/null 2>&1; then
|
||||||
@@ -978,7 +1023,7 @@ installer_prereq_packages() {
|
|||||||
systemctl start NetworkManager
|
systemctl start NetworkManager
|
||||||
} >> "$setup_log" 2<&1
|
} >> "$setup_log" 2<&1
|
||||||
fi
|
fi
|
||||||
retry 50 10 "apt-get -y install bc curl" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get -y install curl" >> "$setup_log" 2>&1 || exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1002,11 +1047,11 @@ disable_ipv6() {
|
|||||||
sysctl -w net.ipv6.conf.all.disable_ipv6=1
|
sysctl -w net.ipv6.conf.all.disable_ipv6=1
|
||||||
sysctl -w net.ipv6.conf.default.disable_ipv6=1
|
sysctl -w net.ipv6.conf.default.disable_ipv6=1
|
||||||
} >> "$setup_log" 2>&1
|
} >> "$setup_log" 2>&1
|
||||||
{
|
{
|
||||||
echo "net.ipv6.conf.all.disable_ipv6 = 1"
|
echo "net.ipv6.conf.all.disable_ipv6 = 1"
|
||||||
echo "net.ipv6.conf.default.disable_ipv6 = 1"
|
echo "net.ipv6.conf.default.disable_ipv6 = 1"
|
||||||
echo "net.ipv6.conf.lo.disable_ipv6 = 1"
|
echo "net.ipv6.conf.lo.disable_ipv6 = 1"
|
||||||
} >> /etc/sysctl.conf
|
} >> /etc/sysctl.conf
|
||||||
}
|
}
|
||||||
|
|
||||||
#disable_misc_network_features() {
|
#disable_misc_network_features() {
|
||||||
@@ -1044,10 +1089,11 @@ docker_install() {
|
|||||||
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo;
|
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo;
|
||||||
fi
|
fi
|
||||||
if [[ ! $is_iso ]]; then
|
if [[ ! $is_iso ]]; then
|
||||||
yum -y install docker-ce-19.03.14-3.el7 containerd.io-1.2.13-3.2.el7;
|
yum -y install docker-ce-20.10.5-3.el7 containerd.io-1.4.4-3.1.el7;
|
||||||
fi
|
fi
|
||||||
yum versionlock docker-ce-19.03.14-3.el7;
|
yum versionlock docker-ce-20.10.5-3.el7;
|
||||||
yum versionlock containerd.io-1.2.13-3.2.el7
|
yum versionlock docker-ce-cli-20.10.5-3.el7;
|
||||||
|
yum versionlock containerd.io-1.4.4-3.1.el7
|
||||||
} >> "$setup_log" 2>&1
|
} >> "$setup_log" 2>&1
|
||||||
|
|
||||||
else
|
else
|
||||||
@@ -1201,8 +1247,13 @@ es_heapsize() {
|
|||||||
# https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html
|
# https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html
|
||||||
ES_HEAP_SIZE="25000m"
|
ES_HEAP_SIZE="25000m"
|
||||||
else
|
else
|
||||||
# Set heap size to 25% of available memory
|
# Set heap size to 33% of available memory
|
||||||
ES_HEAP_SIZE=$(( total_mem / 4 ))"m"
|
ES_HEAP_SIZE=$(( total_mem / 3 ))
|
||||||
|
if [ "$ES_HEAP_SIZE" -ge 25001 ] ; then
|
||||||
|
ES_HEAP_SIZE="25000m"
|
||||||
|
else
|
||||||
|
ES_HEAP_SIZE=$ES_HEAP_SIZE"m"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
export ES_HEAP_SIZE
|
export ES_HEAP_SIZE
|
||||||
|
|
||||||
@@ -1385,6 +1436,8 @@ install_cleanup() {
|
|||||||
info "Removing so-setup permission entry from sudoers file"
|
info "Removing so-setup permission entry from sudoers file"
|
||||||
sed -i '/so-setup/d' /etc/sudoers
|
sed -i '/so-setup/d' /etc/sudoers
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
so-ssh-harden -q
|
||||||
}
|
}
|
||||||
|
|
||||||
import_registry_docker() {
|
import_registry_docker() {
|
||||||
@@ -1432,6 +1485,8 @@ manager_pillar() {
|
|||||||
"manager:"\
|
"manager:"\
|
||||||
" mainip: '$MAINIP'"\
|
" mainip: '$MAINIP'"\
|
||||||
" mainint: '$MNIC'"\
|
" mainint: '$MNIC'"\
|
||||||
|
" proxy: '$so_proxy'"\
|
||||||
|
" no_proxy: '$no_proxy_string'"\
|
||||||
" esheap: '$ES_HEAP_SIZE'"\
|
" esheap: '$ES_HEAP_SIZE'"\
|
||||||
" esclustername: '{{ grains.host }}'"\
|
" esclustername: '{{ grains.host }}'"\
|
||||||
" freq: 0"\
|
" freq: 0"\
|
||||||
@@ -1446,7 +1501,6 @@ manager_pillar() {
|
|||||||
printf '%s\n'\
|
printf '%s\n'\
|
||||||
" elastalert: 1"\
|
" elastalert: 1"\
|
||||||
" es_port: $node_es_port"\
|
" es_port: $node_es_port"\
|
||||||
" log_size_limit: $log_size_limit"\
|
|
||||||
" cur_close_days: $CURCLOSEDAYS"\
|
" cur_close_days: $CURCLOSEDAYS"\
|
||||||
" grafana: $GRAFANA"\
|
" grafana: $GRAFANA"\
|
||||||
" osquery: $OSQUERY"\
|
" osquery: $OSQUERY"\
|
||||||
@@ -1512,7 +1566,6 @@ manager_global() {
|
|||||||
" hnmanager: '$HNMANAGER'"\
|
" hnmanager: '$HNMANAGER'"\
|
||||||
" ntpserver: '$NTPSERVER'"\
|
" ntpserver: '$NTPSERVER'"\
|
||||||
" dockernet: '$DOCKERNET'"\
|
" dockernet: '$DOCKERNET'"\
|
||||||
" proxy: '$PROXY'"\
|
|
||||||
" mdengine: '$ZEEKVERSION'"\
|
" mdengine: '$ZEEKVERSION'"\
|
||||||
" ids: '$NIDS'"\
|
" ids: '$NIDS'"\
|
||||||
" url_base: '$REDIRECTIT'"\
|
" url_base: '$REDIRECTIT'"\
|
||||||
@@ -1642,8 +1695,8 @@ manager_global() {
|
|||||||
" so-zeek:"\
|
" so-zeek:"\
|
||||||
" shards: 5"\
|
" shards: 5"\
|
||||||
" warm: 7"\
|
" warm: 7"\
|
||||||
" close: 365"\
|
" close: 45"\
|
||||||
" delete: 45"\
|
" delete: 365"\
|
||||||
"minio:"\
|
"minio:"\
|
||||||
" access_key: '$ACCESS_KEY'"\
|
" access_key: '$ACCESS_KEY'"\
|
||||||
" access_secret: '$ACCESS_SECRET'"\
|
" access_secret: '$ACCESS_SECRET'"\
|
||||||
@@ -1695,7 +1748,6 @@ network_init() {
|
|||||||
network_init_whiptail() {
|
network_init_whiptail() {
|
||||||
case "$setup_type" in
|
case "$setup_type" in
|
||||||
'iso')
|
'iso')
|
||||||
collect_hostname
|
|
||||||
whiptail_management_nic
|
whiptail_management_nic
|
||||||
whiptail_dhcp_or_static
|
whiptail_dhcp_or_static
|
||||||
|
|
||||||
@@ -1709,7 +1761,6 @@ network_init_whiptail() {
|
|||||||
'network')
|
'network')
|
||||||
whiptail_network_notice
|
whiptail_network_notice
|
||||||
whiptail_dhcp_warn
|
whiptail_dhcp_warn
|
||||||
collect_hostname
|
|
||||||
whiptail_management_nic
|
whiptail_management_nic
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -1777,6 +1828,22 @@ print_salt_state_apply() {
|
|||||||
echo "Applying $state Salt state"
|
echo "Applying $state Salt state"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
proxy_validate() {
|
||||||
|
local test_url="https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS"
|
||||||
|
proxy_test_err=$(curl -sS "$test_url" --proxy "$so_proxy" 2>&1)
|
||||||
|
local ret=$?
|
||||||
|
|
||||||
|
if [[ $ret != 0 ]]; then
|
||||||
|
error "Could not reach $test_url using proxy $so_proxy"
|
||||||
|
error "Received error: $proxy_test_err"
|
||||||
|
if [[ -n $TESTING ]]; then
|
||||||
|
error "Exiting setup"
|
||||||
|
kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
return $ret
|
||||||
|
}
|
||||||
|
|
||||||
reserve_group_ids() {
|
reserve_group_ids() {
|
||||||
# This is a hack to fix CentOS from taking group IDs that we need
|
# This is a hack to fix CentOS from taking group IDs that we need
|
||||||
groupadd -g 928 kratos
|
groupadd -g 928 kratos
|
||||||
@@ -1870,6 +1937,24 @@ reinstall_init() {
|
|||||||
} >> "$setup_log" 2>&1
|
} >> "$setup_log" 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
reset_proxy() {
|
||||||
|
[[ -f /etc/profile.d/so-proxy.sh ]] && rm -f /etc/profile.d/so-proxy.sh
|
||||||
|
|
||||||
|
[[ -f /etc/systemd/system/docker.service.d/http-proxy.conf ]] && rm -f /etc/systemd/system/docker.service.d/http-proxy.conf
|
||||||
|
systemctl daemon-reload
|
||||||
|
command -v docker &> /dev/null && echo "Restarting Docker..." | tee -a "$setup_log" && systemctl restart docker
|
||||||
|
|
||||||
|
[[ -f /root/.docker/config.json ]] && rm -f /root/.docker/config.json
|
||||||
|
|
||||||
|
[[ -f /etc/gitconfig ]] && rm -f /etc/gitconfig
|
||||||
|
|
||||||
|
if [[ $OS == 'centos' ]]; then
|
||||||
|
sed -i "/proxy=/d" /etc/yum.conf
|
||||||
|
else
|
||||||
|
[[ -f /etc/apt/apt.conf.d/00-proxy.conf ]] && rm -f /etc/apt/apt.conf.d/00-proxy.conf
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
backup_dir() {
|
backup_dir() {
|
||||||
dir=$1
|
dir=$1
|
||||||
backup_suffix=$2
|
backup_suffix=$2
|
||||||
@@ -1963,6 +2048,7 @@ saltify() {
|
|||||||
python36-dateutil\
|
python36-dateutil\
|
||||||
python36-m2crypto\
|
python36-m2crypto\
|
||||||
python36-mysql\
|
python36-mysql\
|
||||||
|
python36-packaging\
|
||||||
yum-utils\
|
yum-utils\
|
||||||
device-mapper-persistent-data\
|
device-mapper-persistent-data\
|
||||||
lvm2\
|
lvm2\
|
||||||
@@ -2051,9 +2137,9 @@ saltify() {
|
|||||||
retry 50 10 "apt-get -y install salt-minion=3002.5+ds-1 salt-common=3002.5+ds-1" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get -y install salt-minion=3002.5+ds-1 salt-common=3002.5+ds-1" >> "$setup_log" 2>&1 || exit 1
|
||||||
retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1
|
||||||
if [[ $OSVER != 'xenial' ]]; then
|
if [[ $OSVER != 'xenial' ]]; then
|
||||||
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging" >> "$setup_log" 2>&1 || exit 1
|
||||||
else
|
else
|
||||||
retry 50 10 "apt-get -y install python-pip python-dateutil python-m2crypto python-mysqldb" >> "$setup_log" 2>&1 || exit 1
|
retry 50 10 "apt-get -y install python-pip python-dateutil python-m2crypto python-mysqldb python-packaging" >> "$setup_log" 2>&1 || exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@@ -2195,7 +2281,70 @@ set_main_ip() {
|
|||||||
|
|
||||||
# Add /usr/sbin to everyone's path
|
# Add /usr/sbin to everyone's path
|
||||||
set_path() {
|
set_path() {
|
||||||
echo "complete -cf sudo" > /etc/profile.d/securityonion.sh
|
echo "complete -cf sudo" >> /etc/profile.d/securityonion.sh
|
||||||
|
}
|
||||||
|
|
||||||
|
set_proxy() {
|
||||||
|
|
||||||
|
# Don't proxy localhost, local ip, and management ip
|
||||||
|
no_proxy_string="localhost, 127.0.0.1, ${MAINIP}, ${HOSTNAME}"
|
||||||
|
if [[ -n $MSRV ]] && [[ -n $MSRVIP ]];then
|
||||||
|
no_proxy_string="${no_proxy_string}, ${MSRVIP}, ${MSRV}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set proxy environment variables used by curl, wget, docker, and others
|
||||||
|
{
|
||||||
|
echo "export use_proxy=on"
|
||||||
|
echo "export http_proxy=\"${so_proxy}\""
|
||||||
|
echo "export https_proxy=\"\$http_proxy\""
|
||||||
|
echo "export ftp_proxy=\"\$http_proxy\""
|
||||||
|
echo "export no_proxy=\"${no_proxy_string}\""
|
||||||
|
} > /etc/profile.d/so-proxy.sh
|
||||||
|
|
||||||
|
source /etc/profile.d/so-proxy.sh
|
||||||
|
|
||||||
|
[[ -d '/etc/systemd/system/docker.service.d' ]] || mkdir -p /etc/systemd/system/docker.service.d
|
||||||
|
|
||||||
|
# Create proxy config for dockerd
|
||||||
|
printf '%s\n'\
|
||||||
|
"[Service]"\
|
||||||
|
"Environment=\"HTTP_PROXY=${so_proxy}\""\
|
||||||
|
"Environment=\"HTTPS_PROXY=${so_proxy}\""\
|
||||||
|
"Environment=\"NO_PROXY=${no_proxy_string}\"" > /etc/systemd/system/docker.service.d/http-proxy.conf
|
||||||
|
|
||||||
|
systemctl daemon-reload
|
||||||
|
command -v docker &> /dev/null && systemctl restart docker
|
||||||
|
|
||||||
|
# Create config.json for docker containers
|
||||||
|
[[ -d /root/.docker ]] || mkdir /root/.docker
|
||||||
|
printf '%s\n'\
|
||||||
|
"{"\
|
||||||
|
" \"proxies\":"\
|
||||||
|
" {"\
|
||||||
|
" \"default\":"\
|
||||||
|
" {"\
|
||||||
|
" \"httpProxy\":\"${so_proxy}\","\
|
||||||
|
" \"httpsProxy\":\"${so_proxy}\","\
|
||||||
|
" \"ftpProxy\":\"${so_proxy}\","\
|
||||||
|
" \"noProxy\":\"${no_proxy_string}\""\
|
||||||
|
" }"\
|
||||||
|
" }"\
|
||||||
|
"}" > /root/.docker/config.json
|
||||||
|
|
||||||
|
# Set proxy for package manager
|
||||||
|
if [ "$OS" = 'centos' ]; then
|
||||||
|
echo "proxy=$so_proxy" >> /etc/yum.conf
|
||||||
|
else
|
||||||
|
# Set it up so the updates roll through the manager
|
||||||
|
printf '%s\n'\
|
||||||
|
"Acquire::http::Proxy \"$so_proxy\";"\
|
||||||
|
"Acquire::https::Proxy \"$so_proxy\";" > /etc/apt/apt.conf.d/00-proxy.conf
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set global git proxy
|
||||||
|
printf '%s\n'\
|
||||||
|
"[http]"\
|
||||||
|
" proxy = ${so_proxy}" > /etc/gitconfig
|
||||||
}
|
}
|
||||||
|
|
||||||
setup_salt_master_dirs() {
|
setup_salt_master_dirs() {
|
||||||
@@ -2226,6 +2375,7 @@ set_progress_str() {
|
|||||||
local percentage_input=$1
|
local percentage_input=$1
|
||||||
progress_bar_text=$2
|
progress_bar_text=$2
|
||||||
export progress_bar_text
|
export progress_bar_text
|
||||||
|
local nolog=$2
|
||||||
|
|
||||||
if (( "$percentage_input" >= "$percentage" )); then
|
if (( "$percentage_input" >= "$percentage" )); then
|
||||||
percentage="$percentage_input"
|
percentage="$percentage_input"
|
||||||
@@ -2235,12 +2385,14 @@ set_progress_str() {
|
|||||||
|
|
||||||
echo -e "$percentage_str"
|
echo -e "$percentage_str"
|
||||||
|
|
||||||
info "Progressing ($percentage%): $progress_bar_text"
|
if [[ -z $nolog ]]; then
|
||||||
|
info "Progressing ($percentage%): $progress_bar_text"
|
||||||
|
|
||||||
printf '%s\n' \
|
# printf '%s\n' \
|
||||||
'----'\
|
# '----'\
|
||||||
"$percentage% - ${progress_bar_text^^}"\
|
# "$percentage% - ${progress_bar_text^^}"\
|
||||||
"----" >> "$setup_log" 2>&1
|
# "----" >> "$setup_log" 2>&1
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
set_ssh_cmds() {
|
set_ssh_cmds() {
|
||||||
|
|||||||
100
setup/so-setup
100
setup/so-setup
@@ -27,6 +27,8 @@ original_args=("$@")
|
|||||||
|
|
||||||
cd "$(dirname "$0")" || exit 255
|
cd "$(dirname "$0")" || exit 255
|
||||||
|
|
||||||
|
echo "Getting started..."
|
||||||
|
|
||||||
# Source the generic function libraries that are also used by the product after
|
# Source the generic function libraries that are also used by the product after
|
||||||
# setup. These functions are intended to be reusable outside of the setup process.
|
# setup. These functions are intended to be reusable outside of the setup process.
|
||||||
source ../salt/common/tools/sbin/so-common
|
source ../salt/common/tools/sbin/so-common
|
||||||
@@ -93,12 +95,23 @@ if ! [ -f $install_opt_file ]; then
|
|||||||
analyze_system
|
analyze_system
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Set up handler for setup to exit early (use `kill -SIGUSR1 "$setup_proc"; exit 1` in child scripts)
|
||||||
|
trap 'catch $LINENO' SIGUSR1
|
||||||
|
setup_proc="$$"
|
||||||
|
catch() {
|
||||||
|
info "Fatal error occurred at $1 in so-setup, failing setup."
|
||||||
|
grep --color=never "ERROR" "$setup_log" > "$error_log"
|
||||||
|
whiptail_setup_failed
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
automated=no
|
automated=no
|
||||||
function progress() {
|
progress() {
|
||||||
local title='Security Onion Install'
|
local title='Security Onion Setup'
|
||||||
|
local msg=${1:-'Please wait while installing...'}
|
||||||
|
|
||||||
if [ $automated == no ]; then
|
if [ $automated == no ]; then
|
||||||
whiptail --title "$title" --gauge 'Please wait while installing...' 6 60 0 # append to text
|
whiptail --title "$title" --gauge "$msg" 6 70 0 # append to text
|
||||||
else
|
else
|
||||||
cat >> $setup_log 2>&1
|
cat >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
@@ -154,12 +167,9 @@ set_ssh_cmds $automated
|
|||||||
local_sbin="$(pwd)/../salt/common/tools/sbin"
|
local_sbin="$(pwd)/../salt/common/tools/sbin"
|
||||||
export PATH=$PATH:$local_sbin
|
export PATH=$PATH:$local_sbin
|
||||||
|
|
||||||
installer_prereq_packages && detect_cloud
|
|
||||||
set_network_dev_status_list
|
set_network_dev_status_list
|
||||||
|
set_palette >> $setup_log 2>&1
|
||||||
|
|
||||||
if [ "$OS" == ubuntu ]; then
|
|
||||||
update-alternatives --set newt-palette /etc/newt/palette.original >> $setup_log 2>&1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Kernel messages can overwrite whiptail screen #812
|
# Kernel messages can overwrite whiptail screen #812
|
||||||
# https://github.com/Security-Onion-Solutions/securityonion/issues/812
|
# https://github.com/Security-Onion-Solutions/securityonion/issues/812
|
||||||
@@ -192,19 +202,24 @@ if ! [[ -f $install_opt_file ]]; then
|
|||||||
if [[ $setup_type == 'iso' ]] && [ "$automated" == no ]; then
|
if [[ $setup_type == 'iso' ]] && [ "$automated" == no ]; then
|
||||||
whiptail_first_menu_iso
|
whiptail_first_menu_iso
|
||||||
if [[ $option == "CONFIGURENETWORK" ]]; then
|
if [[ $option == "CONFIGURENETWORK" ]]; then
|
||||||
|
collect_hostname
|
||||||
network_init_whiptail
|
network_init_whiptail
|
||||||
whiptail_management_interface_setup
|
whiptail_management_interface_setup
|
||||||
network_init
|
network_init
|
||||||
printf '%s\n' \
|
printf '%s\n' \
|
||||||
"MNIC=$MNIC" \
|
"MNIC=$MNIC" \
|
||||||
"HOSTNAME=$HOSTNAME" > "$net_init_file"
|
"HOSTNAME=$HOSTNAME" > "$net_init_file"
|
||||||
|
set_main_ip >> $setup_log 2>&1
|
||||||
|
compare_main_nic_ip
|
||||||
|
reset_proxy
|
||||||
|
collect_proxy
|
||||||
|
[[ -n "$so_proxy" ]] && set_proxy >> $setup_log 2>&1
|
||||||
whiptail_net_setup_complete
|
whiptail_net_setup_complete
|
||||||
else
|
else
|
||||||
whiptail_install_type
|
true
|
||||||
fi
|
fi
|
||||||
else
|
|
||||||
whiptail_install_type
|
|
||||||
fi
|
fi
|
||||||
|
whiptail_install_type
|
||||||
else
|
else
|
||||||
source $install_opt_file
|
source $install_opt_file
|
||||||
fi
|
fi
|
||||||
@@ -257,6 +272,10 @@ if [[ ( $is_manager || $is_import ) && $is_iso ]]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ $is_manager || $is_import ]]; then
|
||||||
|
check_elastic_license
|
||||||
|
fi
|
||||||
|
|
||||||
if ! [[ -f $install_opt_file ]]; then
|
if ! [[ -f $install_opt_file ]]; then
|
||||||
if [[ $is_manager && $is_sensor ]]; then
|
if [[ $is_manager && $is_sensor ]]; then
|
||||||
check_requirements "standalone"
|
check_requirements "standalone"
|
||||||
@@ -273,25 +292,31 @@ if ! [[ -f $install_opt_file ]]; then
|
|||||||
[[ -f $net_init_file ]] && whiptail_net_reinit && reinit_networking=true
|
[[ -f $net_init_file ]] && whiptail_net_reinit && reinit_networking=true
|
||||||
|
|
||||||
if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
|
if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
|
||||||
|
collect_hostname
|
||||||
network_init_whiptail
|
network_init_whiptail
|
||||||
else
|
else
|
||||||
source "$net_init_file"
|
source "$net_init_file"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
|
||||||
|
network_init
|
||||||
|
fi
|
||||||
|
|
||||||
|
set_main_ip >> $setup_log 2>&1
|
||||||
|
compare_main_nic_ip
|
||||||
|
|
||||||
if [[ $is_minion ]]; then
|
if [[ $is_minion ]]; then
|
||||||
collect_mngr_hostname
|
collect_mngr_hostname
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $is_minion ]] || [[ $reinit_networking ]] || [[ $is_iso ]] && ! [[ -f $net_init_file ]]; then
|
reset_proxy
|
||||||
whiptail_management_interface_setup
|
if [[ -z $is_airgap ]]; then
|
||||||
|
collect_proxy
|
||||||
|
[[ -n "$so_proxy" ]] && set_proxy >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
|
if [[ $is_minion ]] || [[ $reinit_networking ]] || [[ $is_iso ]] && ! [[ -f $net_init_file ]]; then
|
||||||
network_init
|
whiptail_management_interface_setup
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n "$TURBO" ]]; then
|
|
||||||
use_turbo_proxy
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $is_minion ]]; then
|
if [[ $is_minion ]]; then
|
||||||
@@ -310,6 +335,7 @@ if ! [[ -f $install_opt_file ]]; then
|
|||||||
"HOSTNAME=$HOSTNAME" \
|
"HOSTNAME=$HOSTNAME" \
|
||||||
"MSRV=$MSRV" \
|
"MSRV=$MSRV" \
|
||||||
"MSRVIP=$MSRVIP" > "$install_opt_file"
|
"MSRVIP=$MSRVIP" > "$install_opt_file"
|
||||||
|
[[ -n $so_proxy ]] && echo "so_proxy=$so_proxy" >> "$install_opt_file"
|
||||||
download_repo_tarball
|
download_repo_tarball
|
||||||
exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}"
|
exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}"
|
||||||
fi
|
fi
|
||||||
@@ -323,6 +349,22 @@ else
|
|||||||
rm -rf $install_opt_file >> "$setup_log" 2>&1
|
rm -rf $install_opt_file >> "$setup_log" 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
percentage=0
|
||||||
|
{
|
||||||
|
installer_progress_loop & # Run progress bar to 98 in ~8 minutes while waiting for package installs
|
||||||
|
progress_bg_proc=$!
|
||||||
|
installer_prereq_packages
|
||||||
|
install_success=$?
|
||||||
|
kill -9 "$progress_bg_proc"
|
||||||
|
wait "$progress_bg_proc" &> /dev/null # Kill just sends signal, redirect output of wait to catch stdout
|
||||||
|
if [[ $install_success -gt 0 ]]; then
|
||||||
|
echo "Could not install packages required for setup, exiting now." >> "$setup_log" 2>&1
|
||||||
|
kill -SIGUSR1 "$setup_proc"; exit 1
|
||||||
|
fi
|
||||||
|
} | progress '...'
|
||||||
|
|
||||||
|
detect_cloud
|
||||||
|
|
||||||
short_name=$(echo "$HOSTNAME" | awk -F. '{print $1}')
|
short_name=$(echo "$HOSTNAME" | awk -F. '{print $1}')
|
||||||
|
|
||||||
MINION_ID=$(echo "${short_name}_${install_type}" | tr '[:upper:]' '[:lower:]')
|
MINION_ID=$(echo "${short_name}_${install_type}" | tr '[:upper:]' '[:lower:]')
|
||||||
@@ -336,14 +378,14 @@ minion_type=$(get_minion_type)
|
|||||||
set_default_log_size >> $setup_log 2>&1
|
set_default_log_size >> $setup_log 2>&1
|
||||||
|
|
||||||
if [[ $is_helix ]]; then
|
if [[ $is_helix ]]; then
|
||||||
RULESETUP=${RULESETUP:-ETOPEN}
|
RULESETUP=${RULESETUP:-ETOPEN}
|
||||||
NSMSETUP=${NSMSETUP:-BASIC}
|
NSMSETUP=${NSMSETUP:-BASIC}
|
||||||
HNSENSOR=${HNSENSOR:-inherit}
|
HNSENSOR=${HNSENSOR:-inherit}
|
||||||
MANAGERUPDATES=${MANAGERUPDATES:-0}
|
MANAGERUPDATES=${MANAGERUPDATES:-0}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $is_helix || ( $is_manager && $is_node ) ]]; then
|
if [[ $is_helix || ( $is_manager && $is_node ) ]]; then
|
||||||
RULESETUP=${RULESETUP:-ETOPEN}
|
RULESETUP=${RULESETUP:-ETOPEN}
|
||||||
NSMSETUP=${NSMSETUP:-BASIC}
|
NSMSETUP=${NSMSETUP:-BASIC}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -363,7 +405,7 @@ fi
|
|||||||
if [[ $is_import ]]; then
|
if [[ $is_import ]]; then
|
||||||
PATCHSCHEDULENAME=${PATCHSCHEDULENAME:-auto}
|
PATCHSCHEDULENAME=${PATCHSCHEDULENAME:-auto}
|
||||||
MTU=${MTU:-1500}
|
MTU=${MTU:-1500}
|
||||||
RULESETUP=${RULESETUP:-ETOPEN}
|
RULESETUP=${RULESETUP:-ETOPEN}
|
||||||
NSMSETUP=${NSMSETUP:-BASIC}
|
NSMSETUP=${NSMSETUP:-BASIC}
|
||||||
HNSENSOR=${HNSENSOR:-inherit}
|
HNSENSOR=${HNSENSOR:-inherit}
|
||||||
MANAGERUPDATES=${MANAGERUPDATES:-0}
|
MANAGERUPDATES=${MANAGERUPDATES:-0}
|
||||||
@@ -527,21 +569,10 @@ whiptail_make_changes
|
|||||||
# From here on changes will be made.
|
# From here on changes will be made.
|
||||||
echo "1" > /root/accept_changes
|
echo "1" > /root/accept_changes
|
||||||
|
|
||||||
# Set up handler for setup to exit early (use `kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1` in child scripts)
|
|
||||||
trap 'catch $LINENO' SIGUSR1
|
|
||||||
|
|
||||||
catch() {
|
|
||||||
info "Fatal error occurred at $1 in so-setup, failing setup."
|
|
||||||
grep --color=never "ERROR" "$setup_log" > "$error_log"
|
|
||||||
whiptail_setup_failed
|
|
||||||
exit
|
|
||||||
}
|
|
||||||
|
|
||||||
# This block sets REDIRECTIT which is used by a function outside the below subshell
|
# This block sets REDIRECTIT which is used by a function outside the below subshell
|
||||||
set_main_ip >> $setup_log 2>&1
|
|
||||||
compare_main_nic_ip
|
|
||||||
set_redirect >> $setup_log 2>&1
|
set_redirect >> $setup_log 2>&1
|
||||||
|
|
||||||
|
|
||||||
# Begin install
|
# Begin install
|
||||||
{
|
{
|
||||||
# Set initial percentage to 0
|
# Set initial percentage to 0
|
||||||
@@ -768,6 +799,9 @@ set_redirect >> $setup_log 2>&1
|
|||||||
|
|
||||||
set_progress_str 70 "$(print_salt_state_apply 'kibana')"
|
set_progress_str 70 "$(print_salt_state_apply 'kibana')"
|
||||||
salt-call state.apply -l info kibana >> $setup_log 2>&1
|
salt-call state.apply -l info kibana >> $setup_log 2>&1
|
||||||
|
|
||||||
|
set_progress_str 70 "Setting up default Space in Kibana"
|
||||||
|
so-kibana-space-defaults >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$PLAYBOOK" = 1 ]]; then
|
if [[ "$PLAYBOOK" = 1 ]]; then
|
||||||
|
|||||||
@@ -588,8 +588,21 @@ whiptail_invalid_input() { # TODO: This should accept a list of arguments to spe
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
whiptail_invalid_proxy() {
|
||||||
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
|
local message
|
||||||
|
read -r -d '' message <<- EOM
|
||||||
|
Could not reach test url using proxy ${proxy_addr}.
|
||||||
|
|
||||||
|
Error was: ${proxy_test_err}
|
||||||
|
EOM
|
||||||
|
|
||||||
|
whiptail --title "Security Onion Setup" --yesno "$message" --yes-button "Enter Again" --no-button "Skip" 11 60
|
||||||
|
}
|
||||||
|
|
||||||
whiptail_invalid_string() {
|
whiptail_invalid_string() {
|
||||||
[ -n "$TESTING" ] && return
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
whiptail --title "Security Onion Setup" --msgbox "Invalid input, please try again.\n\nThe $1 cannot contain spaces." 9 45
|
whiptail --title "Security Onion Setup" --msgbox "Invalid input, please try again.\n\nThe $1 cannot contain spaces." 9 45
|
||||||
|
|
||||||
@@ -1216,6 +1229,58 @@ whiptail_patch_schedule_select_hours() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
whiptail_proxy_ask() {
|
||||||
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
|
whiptail --title "Security Onion Setup" --yesno "Do you want to set a proxy server for this installation?" 7 60 --defaultno
|
||||||
|
}
|
||||||
|
|
||||||
|
whiptail_proxy_addr() {
|
||||||
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
|
local message
|
||||||
|
read -r -d '' message <<- EOM
|
||||||
|
Please input the proxy server you wish to use, including the URL prefix (ex: https://your.proxy.com:1234).
|
||||||
|
|
||||||
|
If your proxy requires a username and password do not include them in your input. Setup will ask for those values next.
|
||||||
|
EOM
|
||||||
|
|
||||||
|
proxy_addr=$(whiptail --title "Security Onion Setup" --inputbox "$message" 13 60 "$1" 3>&1 1>&2 2>&3)
|
||||||
|
|
||||||
|
local exitstatus=$?
|
||||||
|
whiptail_check_exitstatus $exitstatus
|
||||||
|
}
|
||||||
|
|
||||||
|
whiptail_proxy_auth_ask() {
|
||||||
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
|
whiptail --title "Security Onion Setup" --yesno "Does your proxy require authentication?" 7 60
|
||||||
|
}
|
||||||
|
|
||||||
|
whiptail_proxy_auth_user() {
|
||||||
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
|
proxy_user=$(whiptail --title "Security Onion Setup" --inputbox "Please input the proxy user:" 8 60 "$1" 3>&1 1>&2 2>&3)
|
||||||
|
|
||||||
|
local exitstatus=$?
|
||||||
|
whiptail_check_exitstatus $exitstatus
|
||||||
|
}
|
||||||
|
|
||||||
|
whiptail_proxy_auth_pass() {
|
||||||
|
local arg=$1
|
||||||
|
|
||||||
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
|
if [[ $arg != 'confirm' ]]; then
|
||||||
|
proxy_pass=$(whiptail --title "Security Onion Setup" --passwordbox "Please input the proxy password:" 8 60 3>&1 1>&2 2>&3)
|
||||||
|
else
|
||||||
|
proxy_pass_confirm=$(whiptail --title "Security Onion Setup" --passwordbox "Please confirm the proxy password:" 8 60 3>&1 1>&2 2>&3)
|
||||||
|
fi
|
||||||
|
|
||||||
|
local exitstatus=$?
|
||||||
|
whiptail_check_exitstatus $exitstatus
|
||||||
|
}
|
||||||
|
|
||||||
whiptail_requirements_error() {
|
whiptail_requirements_error() {
|
||||||
|
|
||||||
local requirement_needed=$1
|
local requirement_needed=$1
|
||||||
|
|||||||
BIN
sigs/securityonion-2.3.40.iso.sig
Normal file
BIN
sigs/securityonion-2.3.40.iso.sig
Normal file
Binary file not shown.
Reference in New Issue
Block a user