mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Merge remote-tracking branch 'remotes/origin/dev' into issue/1831
This commit is contained in:
@@ -1,8 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ ! -f /opt/so/state/dockernet.state ]; then
|
||||
docker network create -d bridge so-elastic-net
|
||||
touch /opt/so/state/dockernet.state
|
||||
else
|
||||
exit
|
||||
fi
|
||||
@@ -40,9 +40,11 @@ version_check() {
|
||||
|
||||
manager_check
|
||||
version_check
|
||||
operating_system
|
||||
|
||||
# Use the hostname
|
||||
HOSTNAME=$(hostname)
|
||||
# List all the containers
|
||||
container_list
|
||||
CURLTYPE=refresh
|
||||
update_docker_containers
|
||||
|
||||
@@ -51,22 +51,61 @@ manager_check() {
|
||||
}
|
||||
|
||||
manager_check
|
||||
|
||||
# Let's make sure we have the public key
|
||||
curl -sSL https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS | gpg --import -
|
||||
|
||||
CONTAINER_REGISTRY=quay.io
|
||||
SIGNPATH=/root/sosigs
|
||||
rm -rf $SIGNPATH
|
||||
mkdir -p $SIGNPATH
|
||||
if [ -z "$BRANCH" ]; then
|
||||
BRANCH="master"
|
||||
fi
|
||||
|
||||
VERSION=$(lookup_pillar soversion)
|
||||
# Modify global.sls to enable Features
|
||||
sed -i 's/features: False/features: True/' $local_salt_dir/pillar/global.sls
|
||||
SUFFIX="-features"
|
||||
TRUSTED_CONTAINERS=( \
|
||||
"so-elasticsearch:$VERSION$SUFFIX" \
|
||||
"so-filebeat:$VERSION$SUFFIX" \
|
||||
"so-kibana:$VERSION$SUFFIX" \
|
||||
"so-logstash:$VERSION$SUFFIX" )
|
||||
"so-elasticsearch" \
|
||||
"so-filebeat" \
|
||||
"so-kibana" \
|
||||
"so-logstash" )
|
||||
|
||||
for i in "${TRUSTED_CONTAINERS[@]}"
|
||||
do
|
||||
# Pull down the trusted docker image
|
||||
echo "Downloading $i"
|
||||
docker pull --disable-content-trust=false docker.io/$IMAGEREPO/$i
|
||||
# Tag it with the new registry destination
|
||||
docker tag $IMAGEREPO/$i $HOSTNAME:5000/$IMAGEREPO/$i
|
||||
docker push $HOSTNAME:5000/$IMAGEREPO/$i
|
||||
docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$i:$VERSION$SUFFIX
|
||||
|
||||
# Get signature
|
||||
curl https://sigs.securityonion.net/$VERSION/$i:$VERSION$SUFFIX.sig --output $SIGNPATH/$i:$VERSION$SUFFIX.sig
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Unable to pull signature file for $i:$VERSION$SUFFIX"
|
||||
exit 1
|
||||
fi
|
||||
# Dump our hash values
|
||||
DOCKERINSPECT=$(docker inspect $CONTAINER_REGISTRY/$IMAGEREPO/$i:$VERSION$SUFFIX)
|
||||
|
||||
echo "$DOCKERINSPECT" | jq ".[0].RepoDigests[] | select(. | contains(\"$CONTAINER_REGISTRY\"))" > $SIGNPATH/$i:$VERSION$SUFFIX.txt
|
||||
echo "$DOCKERINSPECT" | jq ".[0].Created, .[0].RootFS.Layers" >> $SIGNPATH/$i:$VERSION$SUFFIX.txt
|
||||
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Unable to inspect $i:$VERSION:$SUFFIX"
|
||||
exit 1
|
||||
fi
|
||||
GPGTEST=$(gpg --verify $SIGNPATH/$i:$VERSION$SUFFIX.sig $SIGNPATH/$i:$VERSION$SUFFIX.txt 2>&1)
|
||||
if [[ $? -eq 0 ]]; then
|
||||
# Tag it with the new registry destination
|
||||
docker tag $CONTAINER_REGISTRY/$IMAGEREPO/$i:$VERSION$SUFFIX $HOSTNAME:5000/$IMAGEREPO/$i:$VERSION$SUFFIX
|
||||
docker push $HOSTNAME:5000/$IMAGEREPO/$i:$VERSION$SUFFIX
|
||||
else
|
||||
echo "There is a problem downloading the $i:$VERSION$SUFFIX image. Details: "
|
||||
echo ""
|
||||
echo $GPGTEST
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
done
|
||||
sed -i 's/features: False/features: True/' $local_salt_dir/pillar/global.sls
|
||||
|
||||
@@ -87,6 +87,14 @@ container_list() {
|
||||
fi
|
||||
}
|
||||
|
||||
operating_system() {
|
||||
if [ -f /etc/redhat-release ]; then
|
||||
OS=centos
|
||||
else
|
||||
OS=ubuntu
|
||||
fi
|
||||
}
|
||||
|
||||
update_docker_containers() {
|
||||
# Let's make sure we have the public key
|
||||
curl -sSL https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS | gpg --import -
|
||||
@@ -95,9 +103,7 @@ update_docker_containers() {
|
||||
SIGNPATH=/root/sosigs
|
||||
rm -rf $SIGNPATH
|
||||
mkdir -p $SIGNPATH
|
||||
if [ -z "$BRANCH" ]; then
|
||||
BRANCH="master"
|
||||
fi
|
||||
|
||||
# Download the containers from the interwebs
|
||||
for i in "${TRUSTED_CONTAINERS[@]}"
|
||||
do
|
||||
@@ -106,22 +112,28 @@ update_docker_containers() {
|
||||
docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$i:$VERSION
|
||||
|
||||
# Get signature
|
||||
curl https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/$BRANCH/sigs/images/$VERSION/$i.sig --output $SIGNPATH/$i.sig
|
||||
curl -A "$CURLTYPE/$OS/$(uname -r)" https://sigs.securityonion.net/$VERSION/$i:$VERSION.sig --output $SIGNPATH/$i:$VERSION.sig
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Unable to pull signature file for $i:$VERSION"
|
||||
exit 1
|
||||
fi
|
||||
# Dump our hash values
|
||||
docker inspect $CONTAINER_REGISTRY/$IMAGEREPO/$i:$VERSION | jq '.[0].Created, .[0].RepoDigests, .[0].RootFS.Layers' > $SIGNPATH/$i.txt
|
||||
DOCKERINSPECT=$(docker inspect $CONTAINER_REGISTRY/$IMAGEREPO/$i:$VERSION)
|
||||
|
||||
echo "$DOCKERINSPECT" | jq ".[0].RepoDigests[] | select(. | contains(\"$CONTAINER_REGISTRY\"))" > $SIGNPATH/$i:$VERSION.txt
|
||||
echo "$DOCKERINSPECT" | jq ".[0].Created, .[0].RootFS.Layers" >> $SIGNPATH/$i:$VERSION.txt
|
||||
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Unable to inspect $i:$VERSION"
|
||||
exit 1
|
||||
fi
|
||||
GPGTEST=$(gpg --verify $SIGNPATH/$i.sig $SIGNPATH/$i.txt 2>&1)
|
||||
GPGTEST=$(gpg --verify $SIGNPATH/$i:$VERSION.sig $SIGNPATH/$i:$VERSION.txt 2>&1)
|
||||
if [[ $? -eq 0 ]]; then
|
||||
# Tag it with the new registry destination
|
||||
docker tag $CONTAINER_REGISTRY/$IMAGEREPO/$i:$VERSION $HOSTNAME:5000/$IMAGEREPO/$i:$VERSION
|
||||
docker push $HOSTNAME:5000/$IMAGEREPO/$i:$VERSION
|
||||
if [[ -z "$SKIP_TAGPUSH" ]]; then
|
||||
# Tag it with the new registry destination
|
||||
docker tag $CONTAINER_REGISTRY/$IMAGEREPO/$i:$VERSION $HOSTNAME:5000/$IMAGEREPO/$i:$VERSION
|
||||
docker push $HOSTNAME:5000/$IMAGEREPO/$i:$VERSION
|
||||
fi
|
||||
else
|
||||
echo "There is a problem downloading the $i:$VERSION image. Details: "
|
||||
echo ""
|
||||
@@ -130,4 +142,4 @@ update_docker_containers() {
|
||||
fi
|
||||
done
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
26
salt/common/tools/sbin/so-playbook-reset
Normal file
26
salt/common/tools/sbin/so-playbook-reset
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
salt-call state.apply playbook.db_init,playbook,playbook.automation_user_create
|
||||
|
||||
/usr/sbin/so-soctopus-restart
|
||||
|
||||
echo "Importing Plays - this will take some time...."
|
||||
wait 5
|
||||
/usr/sbin/so-playbook-ruleupdate
|
||||
49
salt/common/tools/sbin/so-ssh-harden
Normal file
49
salt/common/tools/sbin/so-ssh-harden
Normal file
@@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [[ $1 =~ ^(q|--quiet) ]]; then
|
||||
quiet=true
|
||||
fi
|
||||
|
||||
print_sshd_t() {
|
||||
local string=$1
|
||||
local state=$2
|
||||
echo "${state}:"
|
||||
sshd -T | grep "^${string}"
|
||||
}
|
||||
|
||||
if ! [[ $quiet ]]; then print_sshd_t "ciphers" "Before"; fi
|
||||
sshd -T | grep "^ciphers" | sed -e "s/\(3des-cbc\|aes128-cbc\|aes192-cbc\|aes256-cbc\|arcfour\|arcfour128\|arcfour256\|blowfish-cbc\|cast128-cbc\|rijndael-cbc@lysator.liu.se\)\,\?//g" >> /etc/ssh/sshd_config
|
||||
if ! [[ $quiet ]]; then
|
||||
print_sshd_t "ciphers" "After"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
if ! [[ $quiet ]]; then print_sshd_t "kexalgorithms" "Before"; fi
|
||||
sshd -T | grep "^kexalgorithms" | sed -e "s/\(diffie-hellman-group14-sha1\|ecdh-sha2-nistp256\|diffie-hellman-group-exchange-sha256\|diffie-hellman-group1-sha1\|diffie-hellman-group-exchange-sha1\|ecdh-sha2-nistp521\|ecdh-sha2-nistp384\)\,\?//g" >> /etc/ssh/sshd_config
|
||||
if ! [[ $quiet ]]; then
|
||||
print_sshd_t "kexalgorithms" "After"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
if ! [[ $quiet ]]; then print_sshd_t "macs" "Before"; fi
|
||||
sshd -T | grep "^macs" | sed -e "s/\(hmac-sha2-512,\|umac-128@openssh.com,\|hmac-sha2-256,\|umac-64@openssh.com,\|hmac-sha1,\|hmac-sha1-etm@openssh.com,\|umac-64-etm@openssh.com,\|hmac-sha1\)//g" >> /etc/ssh/sshd_config
|
||||
if ! [[ $quiet ]]; then
|
||||
print_sshd_t "macs" "After"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
if ! [[ $quiet ]]; then print_sshd_t "hostkeyalgorithms" "Before"; fi
|
||||
sshd -T | grep "^hostkeyalgorithms" | sed "s|ecdsa-sha2-nistp256,||g" | sed "s|ssh-rsa,||g" >> /etc/ssh/sshd_config
|
||||
if ! [[ $quiet ]]; then
|
||||
print_sshd_t "hostkeyalgorithms" "After"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
{% if grains['os'] != 'CentOS' %}
|
||||
echo "----"
|
||||
echo "[ WARNING ] Any new ssh sessions will need to remove and reaccept the ECDSA key for this server before reconnecting."
|
||||
echo "----"
|
||||
{% endif %}
|
||||
|
||||
@@ -16,13 +16,17 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
. /usr/sbin/so-image-common
|
||||
if [ -f /usr/sbin/so-image-common ]; then
|
||||
. /usr/sbin/so-image-common
|
||||
fi
|
||||
UPDATE_DIR=/tmp/sogh/securityonion
|
||||
INSTALLEDVERSION=$(cat /etc/soversion)
|
||||
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'})
|
||||
DEFAULT_SALT_DIR=/opt/so/saltstack/default
|
||||
BATCHSIZE=5
|
||||
SOUP_LOG=/root/soup.log
|
||||
CURLTYPE=soup
|
||||
|
||||
exec 3>&1 1>${SOUP_LOG} 2>&1
|
||||
|
||||
manager_check() {
|
||||
@@ -119,7 +123,6 @@ clean_dockers() {
|
||||
}
|
||||
|
||||
clone_to_tmp() {
|
||||
# TODO Need to add a air gap option
|
||||
# Clean old files
|
||||
rm -rf /tmp/sogh
|
||||
# Make a temp location for the files
|
||||
@@ -379,6 +382,7 @@ verify_latest_update_script() {
|
||||
else
|
||||
echo "You are not running the latest soup version. Updating soup."
|
||||
cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
salt-call state.apply common queue=True
|
||||
echo ""
|
||||
echo "soup has been updated. Please run soup again."
|
||||
|
||||
@@ -1,28 +1,5 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
APP=closedeletedelete
|
||||
lf=/tmp/$APP-pidLockFile
|
||||
# create empty lock file if none exists
|
||||
cat /dev/null >> $lf
|
||||
read lastPID < $lf
|
||||
# if lastPID is not null and a process with that pid exists , exit
|
||||
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
|
||||
echo $$ > $lf
|
||||
#!/bin/bash
|
||||
|
||||
{%- if grains['role'] in ['so-node', 'so-heavynode'] %}
|
||||
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
@@ -49,40 +26,36 @@ echo $$ > $lf
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#. /usr/sbin/so-elastic-common
|
||||
#. /etc/nsm/securityonion.conf
|
||||
|
||||
LOG="/opt/so/log/curator/so-curator-closed-delete.log"
|
||||
|
||||
overlimit() {
|
||||
|
||||
[[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]]
|
||||
}
|
||||
|
||||
closedindices() {
|
||||
|
||||
INDICES=$(curl -s -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed 2> /dev/null)
|
||||
[ $? -eq 1 ] && return false
|
||||
echo ${INDICES} | grep -q -E "(logstash-|so-)"
|
||||
}
|
||||
|
||||
# Check for 2 conditions:
|
||||
# 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT?
|
||||
# 2. Are there any closed indices that we can delete?
|
||||
# If both conditions are true, keep on looping until one of the conditions is false.
|
||||
while [[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]] &&
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -s -k -L https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed > /dev/null; do
|
||||
{% else %}
|
||||
curl -s -L {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed > /dev/null; do
|
||||
{% endif %}
|
||||
while overlimit && closedindices; do
|
||||
|
||||
# We need to determine OLDEST_INDEX:
|
||||
# First, get the list of closed indices using _cat/indices?h=index\&expand_wildcards=closed.
|
||||
# Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field.
|
||||
# Finally, select the first entry in that sorted list.
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
OLDEST_INDEX=$(curl -s -k -L https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | sort -t- -k3 | head -1)
|
||||
{% else %}
|
||||
OLDEST_INDEX=$(curl -s -L {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | sort -t- -k3 | head -1)
|
||||
{% endif %}
|
||||
OLDEST_INDEX=$(curl -s -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1)
|
||||
|
||||
# Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -XDELETE -k -L https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
{% else %}
|
||||
curl -XDELETE -L {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
{% endif %}
|
||||
curl -XDELETE -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
|
||||
# Finally, write a log entry that says we deleted it.
|
||||
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OLDEST_INDEX} deleted ..." >> ${LOG}
|
||||
|
||||
done
|
||||
done
|
||||
@@ -12,22 +12,23 @@
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"grok":
|
||||
{
|
||||
"field": "message",
|
||||
"patterns": [
|
||||
"^<%{INT:syslog.priority}>%{DATA:syslog.timestamp} %{WORD:source.application}: %{GREEDYDATA:real_message}$",
|
||||
"^%{SYSLOGTIMESTAMP:syslog.timestamp} %{SYSLOGHOST:syslog.host} %{SYSLOGPROG:syslog.program}: CEF:0\\|%{DATA:vendor}\\|%{DATA:product}\\|%{GREEDYDATA:message2}$"
|
||||
{
|
||||
"grok":
|
||||
{
|
||||
"field": "message",
|
||||
"patterns": [
|
||||
"^<%{INT:syslog.priority}>%{DATA:syslog.timestamp} %{WORD:source.application}: %{GREEDYDATA:real_message}$",
|
||||
"^%{SYSLOGTIMESTAMP:syslog.timestamp} %{SYSLOGHOST:syslog.host} %{SYSLOGPROG:syslog.program}: CEF:0\\|%{DATA:vendor}\\|%{DATA:product}\\|%{GREEDYDATA:message2}$"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{ "set": { "if": "ctx.source?.application == 'filterlog'", "field": "dataset", "value": "firewall", "ignore_failure": true } },
|
||||
{ "set": { "if": "ctx.vendor != null", "field": "module", "value": "{{ vendor }}", "ignore_failure": true } },
|
||||
{ "set": { "if": "ctx.product != null", "field": "dataset", "value": "{{ product }}", "ignore_failure": true } },
|
||||
{ "set": { "field": "ingest.timestamp", "value": "{{ @timestamp }}" } },
|
||||
{ "date": { "if": "ctx.syslog?.timestamp != null", "field": "syslog.timestamp", "target_field": "@timestamp", "formats": ["MMM d HH:mm:ss", "MMM dd HH:mm:ss", "ISO8601", "UNIX"], "ignore_failure": true } },
|
||||
{ "remove": { "field": ["pid", "program"], "ignore_missing": true, "ignore_failure": true } },
|
||||
{ "pipeline": { "if": "ctx.vendor != null && ctx.product != null", "name": "{{ vendor }}.{{ product }}", "ignore_failure": true } },
|
||||
{ "pipeline": { "if": "ctx.dataset == 'firewall'", "name": "filterlog", "ignore_failure": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
"changes": [
|
||||
{ "summary": "Updated salt to 3002.1 to address CVE-2020-16846, CVE-2020-17490, CVE-2020-25592." },
|
||||
{ "summary": "Cheat sheet is now available for airgap installs." },
|
||||
{ "summary": "Updated Go to correct DST/Timezone issue in SOC." },
|
||||
{ "summary": "Known Issues <ul><li>It is still possible to update your grid from any release candidate to 2.3. However, if you have a true production deployment, then we recommend a fresh image and install for best results.</li><li>In 2.3.0 we made some changes to data types in the elastic index templates. This will cause some errors in Kibana around field conflicts. You can address this in 2 ways:<ol><li>Delete all the data on the ES nodes preserving all of your other settings suchs as BPFs by running sudo so-elastic-clear on all the search nodes</li><li>Re-Index the data. This is not a quick process but you can find more information at <a href='https://docs.securityonion.net/en/2.3/elasticsearch.html#re-indexing' target='so-help'>https://docs.securityonion.net/en/2.3/elasticsearch.html#re-indexing</a></li></ol><li>Please be patient as we update our documentation. We have made a concerted effort to update as much as possible but some things still may be incorrect or ommited. If you have questions or feedback, please start a discussion at <a href='https://securityonion.net/discuss' target='so-discuss'>https://securityonion.net/discuss</a>.</li><li>Once you update your grid to 2.3.0, any new nodes that join the grid must be 2.3.0. For example, if you try to join a new RC1 node it will fail. For best results, use the latest ISO (or 2.3.0 installer from github) when joining to an 2.3.0 grid.</li><li>Shipping Windows Eventlogs with Osquery will fail intermittently with utf8 errors logged in the Application log. This is scheduled to be fixed in Osquery 4.5.</li><li>When running soup to upgrade from RC1/RC2/RC3 to 2.3.0, there is a Salt error that occurs during the final highstate. This error is related to the patch_os_schedule and can be ignored as it will not occur again in subsequent highstates.</li><li>When Search Nodes are upgraded from RC1 to 2.3.0, there is a chance of a race condition where certificates are missing. This will show errors in the manager log to the remote node. To fix this run the following on the search node that is having the issue:<ol><li>Stop elasticsearch - <i>sudo so-elasticsearch-stop</i></li><li>Run the SSL state - <i>sudo salt-call state.apply ssl</i></li><li>Restart elasticsearch - <i>sudo so-elasticsearch-restart</i></li></ol></li><li>If you are upgrading from RC1 you might see errors around registry:2 missing. This error does not break the actual upgrade. To fix, run the following on the manager:</li><ol><li>Stop the Docker registry - sudo docker stop so-dockerregistry</li><li>Remove the container - sudo docker rm so-dockerregistry</li><li>Run the registry state - sudo salt-call state.apply registry</li></ol></ul>" }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -949,21 +949,25 @@ docker_seed_registry() {
|
||||
set_progress_str "$percent" "Downloading $i:$VERSION"
|
||||
{
|
||||
echo "Downloading $i"
|
||||
docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$i:$VERSION
|
||||
docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$i:$VERSION
|
||||
|
||||
# Get signature
|
||||
curl https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/$BRANCH/sigs/images/$VERSION/$i.sig --output $SIGNPATH/$i.sig
|
||||
curl -A "netinstall/$OS/$(uname -r)" https://sigs.securityonion.net/$VERSION/$i:$VERSION.sig --output $SIGNPATH/$i:$VERSION.sig
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Unable to pull signature file for $i:$VERSION"
|
||||
exit 1
|
||||
fi
|
||||
# Dump our hash values
|
||||
docker inspect $CONTAINER_REGISTRY/$IMAGEREPO/$i:$VERSION | jq '.[0].Created, .[0].RepoDigests, .[0].RootFS.Layers' > $SIGNPATH/$i.txt
|
||||
if [[ $? -ne 0 ]]; then
|
||||
DOCKERINSPECT=$(docker inspect $CONTAINER_REGISTRY/$IMAGEREPO/$i:$VERSION)
|
||||
|
||||
echo "$DOCKERINSPECT" | jq ".[0].RepoDigests[] | select(. | contains(\"$CONTAINER_REGISTRY\"))" > $SIGNPATH/$i:$VERSION.txt
|
||||
echo "$DOCKERINSPECT" | jq ".[0].Created, .[0].RootFS.Layers" >> $SIGNPATH/$i:$VERSION.txt
|
||||
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Unable to inspect $i"
|
||||
exit 1
|
||||
fi
|
||||
GPGTEST=$(gpg --verify $SIGNPATH/$i.sig $SIGNPATH/$i.txt 2>&1)
|
||||
GPGTEST=$(gpg --verify $SIGNPATH/$i:$VERSION.sig $SIGNPATH/$i:$VERSION.txt 2>&1)
|
||||
if [[ $? -eq 0 ]]; then
|
||||
# Tag it with the new registry destination
|
||||
docker tag $CONTAINER_REGISTRY/$IMAGEREPO/$i:$VERSION $HOSTNAME:5000/$IMAGEREPO/$i:$VERSION
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Reference in New Issue
Block a user