mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Merge branch 'dev' of https://github.com/Security-Onion-Solutions/securityonion into dev
This commit is contained in:
@@ -18,6 +18,7 @@
|
||||
/opt/so/log/filebeat/*.log
|
||||
/opt/so/log/telegraf/*.log
|
||||
/opt/so/log/redis/*.log
|
||||
/opt/so/log/stenographer/*.log
|
||||
/opt/so/log/salt/so-salt-minion-check
|
||||
/opt/so/log/salt/minion
|
||||
/opt/so/log/salt/master
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# NOTE: This script depends on so-common
|
||||
IMAGEREPO=securityonion
|
||||
IMAGEREPO=security-onion-solutions
|
||||
|
||||
container_list() {
|
||||
MANAGERCHECK=$1
|
||||
@@ -103,7 +103,7 @@ update_docker_containers() {
|
||||
local PROGRESS_CALLBACK=$3
|
||||
local LOG_FILE=$4
|
||||
|
||||
local CONTAINER_REGISTRY=quay.io
|
||||
local CONTAINER_REGISTRY=ghcr.io
|
||||
local SIGNPATH=/root/sosigs
|
||||
|
||||
if [ -z "$CURLTYPE" ]; then
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
{%- else %}
|
||||
{%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:esclustername') %}
|
||||
{%- endif %}
|
||||
{%- set NODE_ROLES = salt['pillar.get']('elasticsearch:node_roles', ['data', 'ingest']) %}
|
||||
cluster.name: "{{ ESCLUSTERNAME }}"
|
||||
network.host: 0.0.0.0
|
||||
|
||||
@@ -24,24 +25,24 @@ cluster.routing.allocation.disk.threshold_enabled: true
|
||||
cluster.routing.allocation.disk.watermark.low: 95%
|
||||
cluster.routing.allocation.disk.watermark.high: 98%
|
||||
cluster.routing.allocation.disk.watermark.flood_stage: 98%
|
||||
{%- if FEATURES is sameas true %}
|
||||
#xpack.security.enabled: false
|
||||
#xpack.security.http.ssl.enabled: false
|
||||
#xpack.security.transport.ssl.enabled: false
|
||||
#xpack.security.http.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key
|
||||
#xpack.security.http.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt
|
||||
#xpack.security.http.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt
|
||||
#xpack.security.transport.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key
|
||||
#xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt
|
||||
#xpack.security.transport.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt
|
||||
#xpack.security.transport.ssl.verification_mode: none
|
||||
#xpack.security.http.ssl.client_authentication: none
|
||||
xpack.security.transport.ssl.enabled: true
|
||||
xpack.security.transport.ssl.verification_mode: none
|
||||
xpack.security.transport.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key
|
||||
xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt
|
||||
xpack.security.transport.ssl.certificate_authorities: [ "/usr/share/elasticsearch/config/ca.crt" ]
|
||||
{%- if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
xpack.security.http.ssl.enabled: true
|
||||
xpack.security.http.ssl.client_authentication: none
|
||||
xpack.security.http.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key
|
||||
xpack.security.http.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt
|
||||
xpack.security.http.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt
|
||||
{%- endif %}
|
||||
#xpack.security.authc:
|
||||
# anonymous:
|
||||
# username: anonymous_user
|
||||
# roles: superuser
|
||||
# authz_exception: true
|
||||
{%- endif %}
|
||||
node.name: {{ grains.host }}
|
||||
script.max_compilations_rate: 1000/1m
|
||||
{%- if TRUECLUSTER is sameas true %}
|
||||
@@ -55,7 +56,7 @@ discovery.seed_hosts:
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- else %}
|
||||
node.roles: [ data, ingest ]
|
||||
node.roles: {{ NODE_ROLES }}
|
||||
node.attr.box_type: {{ NODE_ROUTE_TYPE }}
|
||||
discovery.seed_hosts:
|
||||
- {{ grains.master }}
|
||||
|
||||
@@ -23,12 +23,6 @@
|
||||
{% set TRUECLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %}
|
||||
{% set MANAGERIP = salt['pillar.get']('global:managerip') %}
|
||||
|
||||
{% if FEATURES is sameas true %}
|
||||
{% set FEATUREZ = "-features" %}
|
||||
{% else %}
|
||||
{% set FEATUREZ = '' %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone', 'so-import'] %}
|
||||
{% set esclustername = salt['pillar.get']('manager:esclustername') %}
|
||||
{% set esheap = salt['pillar.get']('manager:esheap') %}
|
||||
@@ -186,7 +180,7 @@ eslogdir:
|
||||
|
||||
so-elasticsearch:
|
||||
docker_container.running:
|
||||
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }}{{ FEATUREZ }}
|
||||
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }}
|
||||
- hostname: elasticsearch
|
||||
- name: so-elasticsearch
|
||||
- user: elasticsearch
|
||||
@@ -220,7 +214,13 @@ so-elasticsearch:
|
||||
- /nsm/elasticsearch:/usr/share/elasticsearch/data:rw
|
||||
- /opt/so/log/elasticsearch:/var/log/elasticsearch:rw
|
||||
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
|
||||
{% if ismanager %}
|
||||
- /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro
|
||||
{% else %}
|
||||
- /etc/ssl/certs/intca.crt:/usr/share/elasticsearch/config/ca.crt:ro
|
||||
{% endif %}
|
||||
- /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro
|
||||
- /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro
|
||||
- /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro
|
||||
- /opt/so/conf/elasticsearch/sotls.yml:/usr/share/elasticsearch/config/sotls.yml:ro
|
||||
- watch:
|
||||
|
||||
@@ -11,6 +11,10 @@
|
||||
{%- set STRELKAENABLED = salt['pillar.get']('strelka:enabled', '0') %}
|
||||
{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
|
||||
{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%}
|
||||
{%- set FBMEMEVENTS = salt['pillar.get']('filebeat:mem_events', 2048) -%}
|
||||
{%- set FBMEMFLUSHMINEVENTS = salt['pillar.get']('filebeat:mem_flush_min_events', 2048) -%}
|
||||
{%- set FBLSWORKERS = salt['pillar.get']('filebeat:ls_workers', 1) -%}
|
||||
{%- set FBLSBULKMAXSIZE = salt['pillar.get']('filebeat:ls_bulk_max_size', 2048) -%}
|
||||
|
||||
name: {{ HOSTNAME }}
|
||||
|
||||
@@ -290,7 +294,10 @@ output.logstash:
|
||||
hosts: ["{{ MANAGER }}:5644"]
|
||||
|
||||
# Number of workers per Logstash host.
|
||||
#worker: 1
|
||||
worker: {{ FBLSWORKERS }}
|
||||
|
||||
# Number of records to send to Logstash input at a time
|
||||
bulk_max_size: {{ FBLSBULKMAXSIZE }}
|
||||
|
||||
# Set gzip compression level.
|
||||
#compression_level: 3
|
||||
@@ -491,3 +498,6 @@ setup.template.enabled: false
|
||||
#http.host: localhost
|
||||
|
||||
# Port on which the HTTP endpoint will bind. Default is 5066.
|
||||
|
||||
queue.mem.events: {{ FBMEMEVENTS }}
|
||||
queue.mem.flush.min_events: {{ FBMEMFLUSHMINEVENTS }}
|
||||
|
||||
@@ -64,7 +64,7 @@ filebeatconfsync:
|
||||
OUTPUT: {{ salt['pillar.get']('filebeat:config:output', {}) }}
|
||||
so-filebeat:
|
||||
docker_container.running:
|
||||
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-filebeat:{{ VERSION }}{{ FEATURES }}
|
||||
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-filebeat:{{ VERSION }}
|
||||
- hostname: so-filebeat
|
||||
- user: root
|
||||
- extra_hosts: {{ MANAGER }}:{{ MANAGERIP }},{{ LOCALHOSTNAME }}:{{ LOCALHOSTIP }}
|
||||
|
||||
@@ -73,7 +73,7 @@ kibanabin:
|
||||
# Start the kibana docker
|
||||
so-kibana:
|
||||
docker_container.running:
|
||||
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-kibana:{{ VERSION }}{{ FEATURES }}
|
||||
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-kibana:{{ VERSION }}
|
||||
- hostname: kibana
|
||||
- user: kibana
|
||||
- environment:
|
||||
@@ -104,7 +104,7 @@ wait_for_kibana:
|
||||
module.run:
|
||||
- http.wait_for_successful_query:
|
||||
- url: "http://{{MANAGER}}:5601/api/saved_objects/_find?type=config"
|
||||
- wait_for: 180
|
||||
- wait_for: 900
|
||||
- onchanges:
|
||||
- file: kibanadashtemplate
|
||||
|
||||
|
||||
@@ -146,7 +146,7 @@ lslogdir:
|
||||
|
||||
so-logstash:
|
||||
docker_container.running:
|
||||
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-logstash:{{ VERSION }}{{ FEATURES }}
|
||||
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-logstash:{{ VERSION }}
|
||||
- hostname: so-logstash
|
||||
- name: so-logstash
|
||||
- user: logstash
|
||||
|
||||
@@ -674,6 +674,8 @@
|
||||
"/scripts/influxdbsize.sh"
|
||||
]
|
||||
data_format = "influx"
|
||||
## Timeout for each command to complete.
|
||||
timeout = "15s"
|
||||
{% elif grains['role'] in ['so-sensor', 'so-heavynode'] %}
|
||||
[[inputs.exec]]
|
||||
commands = [
|
||||
@@ -685,6 +687,7 @@
|
||||
"/scripts/oldpcap.sh"
|
||||
]
|
||||
data_format = "influx"
|
||||
timeout = "15s"
|
||||
{% elif grains['role'] == 'so-standalone' %}
|
||||
[[inputs.exec]]
|
||||
commands = [
|
||||
@@ -698,6 +701,7 @@
|
||||
"/scripts/oldpcap.sh"
|
||||
]
|
||||
data_format = "influx"
|
||||
timeout = "15s"
|
||||
{% elif grains['role'] == 'so-eval' %}
|
||||
[[inputs.exec]]
|
||||
commands = [
|
||||
@@ -711,6 +715,7 @@
|
||||
"/scripts/influxdbsize.sh"
|
||||
]
|
||||
data_format = "influx"
|
||||
timeout = "15s"
|
||||
{% elif grains['role'] == 'so-helix' %}
|
||||
[[inputs.exec]]
|
||||
commands = [
|
||||
@@ -723,19 +728,16 @@
|
||||
"/scripts/helixeps.sh"
|
||||
]
|
||||
data_format = "influx"
|
||||
timeout = "15s"
|
||||
{% endif %}
|
||||
|
||||
#
|
||||
# ## Timeout for each command to complete.
|
||||
# timeout = "5s"
|
||||
#
|
||||
# ## measurement name suffix (for separating different commands)
|
||||
# name_suffix = "_mycollector"
|
||||
#
|
||||
# ## Data format to consume.
|
||||
# ## Each data format has its own unique set of configuration options, read
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
## measurement name suffix (for separating different commands)
|
||||
# name_suffix = "_mycollector"
|
||||
|
||||
## Data format to consume.
|
||||
## Each data format has its own unique set of configuration options, read
|
||||
## more about them here:
|
||||
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -48,6 +48,7 @@ so-telegraf:
|
||||
- HOST_MOUNT_PREFIX=/host
|
||||
- GODEBUG=x509ignoreCN=0
|
||||
- network_mode: host
|
||||
- init: True
|
||||
- binds:
|
||||
- /opt/so/log/telegraf:/var/log/telegraf:rw
|
||||
- /opt/so/conf/telegraf/etc/telegraf.conf:/etc/telegraf/telegraf.conf:ro
|
||||
|
||||
@@ -97,7 +97,7 @@ wazuhmgrwhitelist:
|
||||
# Check to see if Wazuh API port is available
|
||||
wazuhportavailable:
|
||||
cmd.run:
|
||||
-name: netstat -utanp | grep ":55000" | grep -qv docker && PROCESS=$(netstat -utanp | grep ":55000" | uniq) && echo "Another process ($PROCESS) appears to be using port 55000. Please terminate this process, or reboot to ensure a clean state so that the Wazuh API can start properly." && exit 1 || exit 0
|
||||
- name: netstat -utanp | grep ":55000" | grep -qv docker && PROCESS=$(netstat -utanp | grep ":55000" | uniq) && echo "Another process ($PROCESS) appears to be using port 55000. Please terminate this process, or reboot to ensure a clean state so that the Wazuh API can start properly." && exit 1 || exit 0
|
||||
|
||||
so-wazuh:
|
||||
docker_container.running:
|
||||
|
||||
77
setup/automation/distributed-net-centos-manager
Normal file
77
setup/automation/distributed-net-centos-manager
Normal file
@@ -0,0 +1,77 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
TESTING=true
|
||||
|
||||
address_type=DHCP
|
||||
ADMINUSER=onionuser
|
||||
ADMINPASS1=onionuser
|
||||
ADMINPASS2=onionuser
|
||||
ALLOW_CIDR=0.0.0.0/0
|
||||
ALLOW_ROLE=a
|
||||
BASICZEEK=7
|
||||
BASICSURI=7
|
||||
# BLOGS=
|
||||
#BNICS=eth1
|
||||
ZEEKVERSION=ZEEK
|
||||
# CURCLOSEDAYS=
|
||||
# EVALADVANCED=BASIC
|
||||
GRAFANA=1
|
||||
# HELIXAPIKEY=
|
||||
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
|
||||
HNSENSOR=inherit
|
||||
HOSTNAME=distributed-manager
|
||||
install_type=MANAGER
|
||||
# LSINPUTBATCHCOUNT=
|
||||
# LSINPUTTHREADS=
|
||||
# LSPIPELINEBATCH=
|
||||
# LSPIPELINEWORKERS=
|
||||
MANAGERADV=BASIC
|
||||
MANAGERUPDATES=1
|
||||
# MDNS=
|
||||
# MGATEWAY=
|
||||
# MIP=
|
||||
# MMASK=
|
||||
MNIC=eth0
|
||||
# MSEARCH=
|
||||
# MSRV=
|
||||
# MTU=
|
||||
NIDS=Suricata
|
||||
# NODE_ES_HEAP_SIZE=
|
||||
# NODE_LS_HEAP_SIZE=
|
||||
NODESETUP=NODEBASIC
|
||||
NSMSETUP=BASIC
|
||||
NODEUPDATES=MANAGER
|
||||
# OINKCODE=
|
||||
OSQUERY=1
|
||||
# PATCHSCHEDULEDAYS=
|
||||
# PATCHSCHEDULEHOURS=
|
||||
PATCHSCHEDULENAME=auto
|
||||
PLAYBOOK=1
|
||||
# REDIRECTHOST=
|
||||
REDIRECTINFO=IP
|
||||
RULESETUP=ETOPEN
|
||||
# SHARDCOUNT=
|
||||
# SKIP_REBOOT=
|
||||
SOREMOTEPASS1=onionuser
|
||||
SOREMOTEPASS2=onionuser
|
||||
STRELKA=1
|
||||
THEHIVE=1
|
||||
WAZUH=1
|
||||
WEBUSER=onionuser@somewhere.invalid
|
||||
WEBPASSWD1=0n10nus3r
|
||||
WEBPASSWD2=0n10nus3r
|
||||
78
setup/automation/distributed-net-centos-search
Normal file
78
setup/automation/distributed-net-centos-search
Normal file
@@ -0,0 +1,78 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
TESTING=true
|
||||
|
||||
address_type=DHCP
|
||||
ADMINUSER=onionuser
|
||||
ADMINPASS1=onionuser
|
||||
ADMINPASS2=onionuser
|
||||
# ALLOW_CIDR=0.0.0.0/0
|
||||
# ALLOW_ROLE=a
|
||||
# BASICZEEK=7
|
||||
# BASICSURI=7
|
||||
# BLOGS=
|
||||
# BNICS=eth1
|
||||
# ZEEKVERSION=ZEEK
|
||||
# CURCLOSEDAYS=
|
||||
# EVALADVANCED=BASIC
|
||||
# GRAFANA=1
|
||||
# HELIXAPIKEY=
|
||||
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
|
||||
HNSENSOR=inherit
|
||||
HOSTNAME=distributed-search
|
||||
install_type=SEARCHNODE
|
||||
# LSINPUTBATCHCOUNT=
|
||||
# LSINPUTTHREADS=
|
||||
# LSPIPELINEBATCH=
|
||||
# LSPIPELINEWORKERS=
|
||||
# MANAGERADV=BASIC
|
||||
MANAGERUPDATES=1
|
||||
# MDNS=
|
||||
# MGATEWAY=
|
||||
# MIP=
|
||||
# MMASK=
|
||||
MNIC=eth0
|
||||
# MSEARCH=
|
||||
MSRV=distributed-manager
|
||||
MSRVIP=10.66.166.42
|
||||
# MTU=
|
||||
# NIDS=Suricata
|
||||
# NODE_ES_HEAP_SIZE=
|
||||
# NODE_LS_HEAP_SIZE=
|
||||
NODESETUP=NODEBASIC
|
||||
NSMSETUP=BASIC
|
||||
NODEUPDATES=MANAGER
|
||||
# OINKCODE=
|
||||
# OSQUERY=1
|
||||
# PATCHSCHEDULEDAYS=
|
||||
# PATCHSCHEDULEHOURS=
|
||||
PATCHSCHEDULENAME=auto
|
||||
# PLAYBOOK=1
|
||||
# REDIRECTHOST=
|
||||
# REDIRECTINFO=IP
|
||||
# RULESETUP=ETOPEN
|
||||
# SHARDCOUNT=
|
||||
# SKIP_REBOOT=
|
||||
SOREMOTEPASS1=onionuser
|
||||
SOREMOTEPASS2=onionuser
|
||||
# STRELKA=1
|
||||
# THEHIVE=1
|
||||
# WAZUH=1
|
||||
# WEBUSER=onionuser@somewhere.invalid
|
||||
# WEBPASSWD1=0n10nus3r
|
||||
# WEBPASSWD2=0n10nus3r
|
||||
78
setup/automation/distributed-net-centos-sensor
Normal file
78
setup/automation/distributed-net-centos-sensor
Normal file
@@ -0,0 +1,78 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
TESTING=true
|
||||
|
||||
address_type=DHCP
|
||||
ADMINUSER=onionuser
|
||||
ADMINPASS1=onionuser
|
||||
ADMINPASS2=onionuser
|
||||
# ALLOW_CIDR=0.0.0.0/0
|
||||
# ALLOW_ROLE=a
|
||||
BASICZEEK=2
|
||||
BASICSURI=2
|
||||
# BLOGS=
|
||||
BNICS=eth1
|
||||
ZEEKVERSION=ZEEK
|
||||
# CURCLOSEDAYS=
|
||||
# EVALADVANCED=BASIC
|
||||
# GRAFANA=1
|
||||
# HELIXAPIKEY=
|
||||
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
|
||||
HNSENSOR=inherit
|
||||
HOSTNAME=distributed-sensor
|
||||
install_type=SENSOR
|
||||
# LSINPUTBATCHCOUNT=
|
||||
# LSINPUTTHREADS=
|
||||
# LSPIPELINEBATCH=
|
||||
# LSPIPELINEWORKERS=
|
||||
# MANAGERADV=BASIC
|
||||
MANAGERUPDATES=1
|
||||
# MDNS=
|
||||
# MGATEWAY=
|
||||
# MIP=
|
||||
# MMASK=
|
||||
MNIC=eth0
|
||||
# MSEARCH=
|
||||
MSRV=distributed-manager
|
||||
MSRVIP=10.66.166.42
|
||||
# MTU=
|
||||
# NIDS=Suricata
|
||||
# NODE_ES_HEAP_SIZE=
|
||||
# NODE_LS_HEAP_SIZE=
|
||||
# NODESETUP=NODEBASIC
|
||||
NSMSETUP=BASIC
|
||||
NODEUPDATES=MANAGER
|
||||
# OINKCODE=
|
||||
# OSQUERY=1
|
||||
# PATCHSCHEDULEDAYS=
|
||||
# PATCHSCHEDULEHOURS=
|
||||
PATCHSCHEDULENAME=auto
|
||||
# PLAYBOOK=1
|
||||
# REDIRECTHOST=
|
||||
# REDIRECTINFO=IP
|
||||
# RULESETUP=ETOPEN
|
||||
# SHARDCOUNT=
|
||||
# SKIP_REBOOT=
|
||||
SOREMOTEPASS1=onionuser
|
||||
SOREMOTEPASS2=onionuser
|
||||
# STRELKA=1
|
||||
# THEHIVE=1
|
||||
# WAZUH=1
|
||||
# WEBUSER=onionuser@somewhere.invalid
|
||||
# WEBPASSWD1=0n10nus3r
|
||||
# WEBPASSWD2=0n10nus3r
|
||||
77
setup/automation/distributed-net-ubuntu-manager
Normal file
77
setup/automation/distributed-net-ubuntu-manager
Normal file
@@ -0,0 +1,77 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
TESTING=true
|
||||
|
||||
address_type=DHCP
|
||||
ADMINUSER=onionuser
|
||||
ADMINPASS1=onionuser
|
||||
ADMINPASS2=onionuser
|
||||
ALLOW_CIDR=0.0.0.0/0
|
||||
ALLOW_ROLE=a
|
||||
BASICZEEK=7
|
||||
BASICSURI=7
|
||||
# BLOGS=
|
||||
#BNICS=eth1
|
||||
ZEEKVERSION=ZEEK
|
||||
# CURCLOSEDAYS=
|
||||
# EVALADVANCED=BASIC
|
||||
GRAFANA=1
|
||||
# HELIXAPIKEY=
|
||||
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
|
||||
HNSENSOR=inherit
|
||||
HOSTNAME=distributed-manager
|
||||
install_type=MANAGER
|
||||
# LSINPUTBATCHCOUNT=
|
||||
# LSINPUTTHREADS=
|
||||
# LSPIPELINEBATCH=
|
||||
# LSPIPELINEWORKERS=
|
||||
MANAGERADV=BASIC
|
||||
MANAGERUPDATES=1
|
||||
# MDNS=
|
||||
# MGATEWAY=
|
||||
# MIP=
|
||||
# MMASK=
|
||||
MNIC=ens18
|
||||
# MSEARCH=
|
||||
# MSRV=
|
||||
# MTU=
|
||||
NIDS=Suricata
|
||||
# NODE_ES_HEAP_SIZE=
|
||||
# NODE_LS_HEAP_SIZE=
|
||||
NODESETUP=NODEBASIC
|
||||
NSMSETUP=BASIC
|
||||
NODEUPDATES=MANAGER
|
||||
# OINKCODE=
|
||||
OSQUERY=1
|
||||
# PATCHSCHEDULEDAYS=
|
||||
# PATCHSCHEDULEHOURS=
|
||||
PATCHSCHEDULENAME=auto
|
||||
PLAYBOOK=1
|
||||
# REDIRECTHOST=
|
||||
REDIRECTINFO=IP
|
||||
RULESETUP=ETOPEN
|
||||
# SHARDCOUNT=
|
||||
# SKIP_REBOOT=
|
||||
SOREMOTEPASS1=onionuser
|
||||
SOREMOTEPASS2=onionuser
|
||||
STRELKA=1
|
||||
THEHIVE=1
|
||||
WAZUH=1
|
||||
WEBUSER=onionuser@somewhere.invalid
|
||||
WEBPASSWD1=0n10nus3r
|
||||
WEBPASSWD2=0n10nus3r
|
||||
78
setup/automation/distributed-net-ubuntu-search
Normal file
78
setup/automation/distributed-net-ubuntu-search
Normal file
@@ -0,0 +1,78 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
TESTING=true
|
||||
|
||||
address_type=DHCP
|
||||
ADMINUSER=onionuser
|
||||
ADMINPASS1=onionuser
|
||||
ADMINPASS2=onionuser
|
||||
# ALLOW_CIDR=0.0.0.0/0
|
||||
# ALLOW_ROLE=a
|
||||
# BASICZEEK=7
|
||||
# BASICSURI=7
|
||||
# BLOGS=
|
||||
# BNICS=eth1
|
||||
# ZEEKVERSION=ZEEK
|
||||
# CURCLOSEDAYS=
|
||||
# EVALADVANCED=BASIC
|
||||
# GRAFANA=1
|
||||
# HELIXAPIKEY=
|
||||
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
|
||||
HNSENSOR=inherit
|
||||
HOSTNAME=distributed-search
|
||||
install_type=SEARCHNODE
|
||||
# LSINPUTBATCHCOUNT=
|
||||
# LSINPUTTHREADS=
|
||||
# LSPIPELINEBATCH=
|
||||
# LSPIPELINEWORKERS=
|
||||
# MANAGERADV=BASIC
|
||||
MANAGERUPDATES=1
|
||||
# MDNS=
|
||||
# MGATEWAY=
|
||||
# MIP=
|
||||
# MMASK=
|
||||
MNIC=ens18
|
||||
# MSEARCH=
|
||||
MSRV=distributed-manager
|
||||
MSRVIP=10.66.166.42
|
||||
# MTU=
|
||||
# NIDS=Suricata
|
||||
# NODE_ES_HEAP_SIZE=
|
||||
# NODE_LS_HEAP_SIZE=
|
||||
NODESETUP=NODEBASIC
|
||||
NSMSETUP=BASIC
|
||||
NODEUPDATES=MANAGER
|
||||
# OINKCODE=
|
||||
# OSQUERY=1
|
||||
# PATCHSCHEDULEDAYS=
|
||||
# PATCHSCHEDULEHOURS=
|
||||
PATCHSCHEDULENAME=auto
|
||||
# PLAYBOOK=1
|
||||
# REDIRECTHOST=
|
||||
# REDIRECTINFO=IP
|
||||
# RULESETUP=ETOPEN
|
||||
# SHARDCOUNT=
|
||||
# SKIP_REBOOT=
|
||||
SOREMOTEPASS1=onionuser
|
||||
SOREMOTEPASS2=onionuser
|
||||
# STRELKA=1
|
||||
# THEHIVE=1
|
||||
# WAZUH=1
|
||||
# WEBUSER=onionuser@somewhere.invalid
|
||||
# WEBPASSWD1=0n10nus3r
|
||||
# WEBPASSWD2=0n10nus3r
|
||||
78
setup/automation/distributed-net-ubuntu-sensor
Normal file
78
setup/automation/distributed-net-ubuntu-sensor
Normal file
@@ -0,0 +1,78 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
TESTING=true
|
||||
|
||||
address_type=DHCP
|
||||
ADMINUSER=onionuser
|
||||
ADMINPASS1=onionuser
|
||||
ADMINPASS2=onionuser
|
||||
# ALLOW_CIDR=0.0.0.0/0
|
||||
# ALLOW_ROLE=a
|
||||
BASICZEEK=2
|
||||
BASICSURI=2
|
||||
# BLOGS=
|
||||
BNICS=ens19
|
||||
ZEEKVERSION=ZEEK
|
||||
# CURCLOSEDAYS=
|
||||
# EVALADVANCED=BASIC
|
||||
# GRAFANA=1
|
||||
# HELIXAPIKEY=
|
||||
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
|
||||
HNSENSOR=inherit
|
||||
HOSTNAME=distributed-sensor
|
||||
install_type=SENSOR
|
||||
# LSINPUTBATCHCOUNT=
|
||||
# LSINPUTTHREADS=
|
||||
# LSPIPELINEBATCH=
|
||||
# LSPIPELINEWORKERS=
|
||||
# MANAGERADV=BASIC
|
||||
MANAGERUPDATES=1
|
||||
# MDNS=
|
||||
# MGATEWAY=
|
||||
# MIP=
|
||||
# MMASK=
|
||||
MNIC=ens18
|
||||
# MSEARCH=
|
||||
MSRV=distributed-manager
|
||||
MSRVIP=10.66.166.42
|
||||
# MTU=
|
||||
# NIDS=Suricata
|
||||
# NODE_ES_HEAP_SIZE=
|
||||
# NODE_LS_HEAP_SIZE=
|
||||
# NODESETUP=NODEBASIC
|
||||
NSMSETUP=BASIC
|
||||
NODEUPDATES=MANAGER
|
||||
# OINKCODE=
|
||||
# OSQUERY=1
|
||||
# PATCHSCHEDULEDAYS=
|
||||
# PATCHSCHEDULEHOURS=
|
||||
PATCHSCHEDULENAME=auto
|
||||
# PLAYBOOK=1
|
||||
# REDIRECTHOST=
|
||||
# REDIRECTINFO=IP
|
||||
# RULESETUP=ETOPEN
|
||||
# SHARDCOUNT=
|
||||
# SKIP_REBOOT=
|
||||
SOREMOTEPASS1=onionuser
|
||||
SOREMOTEPASS2=onionuser
|
||||
# STRELKA=1
|
||||
# THEHIVE=1
|
||||
# WAZUH=1
|
||||
# WEBUSER=onionuser@somewhere.invalid
|
||||
# WEBPASSWD1=0n10nus3r
|
||||
# WEBPASSWD2=0n10nus3r
|
||||
77
setup/automation/import-iso
Normal file
77
setup/automation/import-iso
Normal file
@@ -0,0 +1,77 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
TESTING=true
|
||||
|
||||
address_type=DHCP
|
||||
ADMINUSER=onionuser
|
||||
ADMINPASS1=onionuser
|
||||
ADMINPASS2=onionuser
|
||||
ALLOW_CIDR=0.0.0.0/0
|
||||
ALLOW_ROLE=a
|
||||
BASICZEEK=2
|
||||
BASICSURI=2
|
||||
# BLOGS=
|
||||
#BNICS=eth1
|
||||
ZEEKVERSION=ZEEK
|
||||
# CURCLOSEDAYS=
|
||||
# EVALADVANCED=BASIC
|
||||
GRAFANA=1
|
||||
# HELIXAPIKEY=
|
||||
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
|
||||
HNSENSOR=inherit
|
||||
HOSTNAME=import
|
||||
install_type=IMPORT
|
||||
# LSINPUTBATCHCOUNT=
|
||||
# LSINPUTTHREADS=
|
||||
# LSPIPELINEBATCH=
|
||||
# LSPIPELINEWORKERS=
|
||||
MANAGERADV=BASIC
|
||||
MANAGERUPDATES=1
|
||||
# MDNS=
|
||||
# MGATEWAY=
|
||||
# MIP=
|
||||
# MMASK=
|
||||
MNIC=eth0
|
||||
# MSEARCH=
|
||||
# MSRV=
|
||||
# MTU=
|
||||
NIDS=Suricata
|
||||
# NODE_ES_HEAP_SIZE=
|
||||
# NODE_LS_HEAP_SIZE=
|
||||
NODESETUP=NODEBASIC
|
||||
NSMSETUP=BASIC
|
||||
NODEUPDATES=MANAGER
|
||||
# OINKCODE=
|
||||
OSQUERY=1
|
||||
# PATCHSCHEDULEDAYS=
|
||||
# PATCHSCHEDULEHOURS=
|
||||
PATCHSCHEDULENAME=auto
|
||||
PLAYBOOK=1
|
||||
# REDIRECTHOST=
|
||||
REDIRECTINFO=IP
|
||||
RULESETUP=ETOPEN
|
||||
# SHARDCOUNT=
|
||||
# SKIP_REBOOT=
|
||||
# SOREMOTEPASS1=onionuser
|
||||
# SOREMOTEPASS2=onionuser
|
||||
STRELKA=1
|
||||
THEHIVE=1
|
||||
WAZUH=1
|
||||
WEBUSER=onionuser@somewhere.invalid
|
||||
WEBPASSWD1=0n10nus3r
|
||||
WEBPASSWD2=0n10nus3r
|
||||
@@ -17,6 +17,7 @@
|
||||
|
||||
# README - DO NOT DEFINE GLOBAL VARIABLES IN THIS FILE. Instead use so-variables.
|
||||
|
||||
### Begin Logging Section ###
|
||||
log() {
|
||||
msg=$1
|
||||
level=${2:-I}
|
||||
@@ -41,51 +42,7 @@ logCmd() {
|
||||
info "Executing command: $cmd"
|
||||
$cmd >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
filter_unused_nics() {
|
||||
|
||||
if [[ $MNIC ]]; then local grep_string="$MNIC\|bond0"; else local grep_string="bond0"; fi
|
||||
|
||||
# If we call this function and NICs have already been assigned to the bond interface then add them to the grep search string
|
||||
if [[ $BNICS ]]; then
|
||||
grep_string="$grep_string"
|
||||
for BONDNIC in "${BNICS[@]}"; do
|
||||
grep_string="$grep_string\|$BONDNIC"
|
||||
done
|
||||
fi
|
||||
|
||||
# Finally, set filtered_nics to any NICs we aren't using (and ignore interfaces that aren't of use)
|
||||
filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g')
|
||||
readarray -t filtered_nics <<< "$filtered_nics"
|
||||
|
||||
nic_list=()
|
||||
for nic in "${filtered_nics[@]}"; do
|
||||
case $(cat "/sys/class/net/${nic}/carrier" 2>/dev/null) in
|
||||
1)
|
||||
nic_list+=("$nic" "Link UP " "OFF")
|
||||
;;
|
||||
0)
|
||||
nic_list+=("$nic" "Link DOWN " "OFF")
|
||||
;;
|
||||
*)
|
||||
nic_list+=("$nic" "Link UNKNOWN " "OFF")
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
export nic_list
|
||||
}
|
||||
|
||||
calculate_useable_cores() {
|
||||
|
||||
# Calculate reasonable core usage
|
||||
local cores_for_zeek=$(( (num_cpu_cores/2) - 1 ))
|
||||
local lb_procs_round
|
||||
lb_procs_round=$(printf "%.0f\n" $cores_for_zeek)
|
||||
|
||||
if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi
|
||||
export lb_procs
|
||||
}
|
||||
### End Logging Section ###
|
||||
|
||||
airgap_rules() {
|
||||
# Copy the rules for suricata if using Airgap
|
||||
@@ -99,16 +56,6 @@ airgap_rules() {
|
||||
cp -Rv /root/SecurityOnion/agrules/strelka /nsm/repo/rules/
|
||||
}
|
||||
|
||||
analyze_system() {
|
||||
title "System Characteristics"
|
||||
logCmd "uptime"
|
||||
logCmd "uname -a"
|
||||
logCmd "free -h"
|
||||
logCmd "lscpu"
|
||||
logCmd "df -h"
|
||||
logCmd "ip a"
|
||||
}
|
||||
|
||||
accept_salt_key_remote() {
|
||||
systemctl restart salt-minion
|
||||
|
||||
@@ -146,24 +93,6 @@ addtotab_generate_templates() {
|
||||
|
||||
}
|
||||
|
||||
# $5 => (optional) password variable
|
||||
so_add_user() {
|
||||
local username=$1
|
||||
local uid=$2
|
||||
local gid=$3
|
||||
local home_dir=$4
|
||||
if [ "$5" ]; then local pass=$5; fi
|
||||
|
||||
echo "Add $username user" >> "$setup_log" 2>&1
|
||||
groupadd --gid "$gid" "$username"
|
||||
useradd -m --uid "$uid" --gid "$gid" --home-dir "$home_dir" "$username"
|
||||
|
||||
# If a password has been passed in, set the password
|
||||
if [ "$pass" ]; then
|
||||
echo "$username":"$pass" | chpasswd --crypt-method=SHA512
|
||||
fi
|
||||
}
|
||||
|
||||
add_socore_user_manager() {
|
||||
so_add_user "socore" "939" "939" "/opt/so" >> "$setup_log" 2>&1
|
||||
}
|
||||
@@ -172,29 +101,6 @@ add_soremote_user_manager() {
|
||||
so_add_user "soremote" "947" "947" "/home/soremote" "$SOREMOTEPASS1" >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
wait_for_file() {
|
||||
local filename=$1
|
||||
local max_attempts=$2 # this is multiplied by the wait interval, so make sure it isn't too large
|
||||
local cur_attempts=0
|
||||
local wait_interval=$3
|
||||
local total_time=$(( max_attempts * wait_interval ))
|
||||
local date
|
||||
date=$(date)
|
||||
|
||||
while [[ $cur_attempts -lt $max_attempts ]]; do
|
||||
if [ -f "$filename" ]; then
|
||||
echo "File $filename found at $date" >> "$setup_log" 2>&1
|
||||
return 0
|
||||
else
|
||||
((cur_attempts++))
|
||||
echo "File $filename does not exist; waiting ${wait_interval}s then checking again ($cur_attempts/$max_attempts)..." >> "$setup_log" 2>&1
|
||||
sleep "$wait_interval"
|
||||
fi
|
||||
done
|
||||
echo "Could not find $filename after waiting ${total_time}s" >> "$setup_log" 2>&1
|
||||
return 1
|
||||
}
|
||||
|
||||
add_web_user() {
|
||||
wait_for_file /opt/so/conf/kratos/db/db.sqlite 30 5
|
||||
{
|
||||
@@ -204,22 +110,25 @@ add_web_user() {
|
||||
} >> "/root/so-user-add.log" 2>&1
|
||||
}
|
||||
|
||||
# Create an secrets pillar so that passwords survive re-install
|
||||
secrets_pillar(){
|
||||
if [ ! -f $local_salt_dir/pillar/secrets.sls ]; then
|
||||
echo "Creating Secrets Pillar" >> "$setup_log" 2>&1
|
||||
mkdir -p $local_salt_dir/pillar
|
||||
printf '%s\n'\
|
||||
"secrets:"\
|
||||
" mysql: $MYSQLPASS"\
|
||||
" playbook_db: $PLAYBOOKDBPASS"\
|
||||
" playbook_admin: $PLAYBOOKADMINPASS"\
|
||||
" playbook_automation: $PLAYBOOKAUTOMATIONPASS"\
|
||||
" grafana_admin: $GRAFANAPASS"\
|
||||
" fleet: $FLEETPASS"\
|
||||
" fleet_jwt: $FLEETJWT"\
|
||||
" fleet_enroll-secret: False" > $local_salt_dir/pillar/secrets.sls
|
||||
fi
|
||||
analyze_system() {
|
||||
title "System Characteristics"
|
||||
logCmd "uptime"
|
||||
logCmd "uname -a"
|
||||
logCmd "free -h"
|
||||
logCmd "lscpu"
|
||||
logCmd "df -h"
|
||||
logCmd "ip a"
|
||||
}
|
||||
|
||||
calculate_useable_cores() {
|
||||
|
||||
# Calculate reasonable core usage
|
||||
local cores_for_zeek=$(( (num_cpu_cores/2) - 1 ))
|
||||
local lb_procs_round
|
||||
lb_procs_round=$(printf "%.0f\n" $cores_for_zeek)
|
||||
|
||||
if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi
|
||||
export lb_procs
|
||||
}
|
||||
|
||||
check_admin_pass() {
|
||||
@@ -825,6 +734,27 @@ check_requirements() {
|
||||
fi
|
||||
}
|
||||
|
||||
check_sos_appliance() {
|
||||
# Lets see if this is a SOS Appliance
|
||||
if [ -f "/etc/SOSMODEL"]; then
|
||||
local MODEL=$(cat /etc/SOSMODEL)
|
||||
echo "Found SOS Model $MODEL"
|
||||
echo "sosmodel: $MODEL" >> /etc/salt/grains
|
||||
fi
|
||||
}
|
||||
|
||||
compare_main_nic_ip() {
|
||||
if [[ "$MAINIP" != "$MNIC_IP" ]]; then
|
||||
read -r -d '' message <<- EOM
|
||||
The IP being routed by Linux is not the IP address assigned to the management interface ($MNIC).
|
||||
|
||||
This is not a supported configuration, please remediate and rerun setup.
|
||||
EOM
|
||||
whiptail --title "Security Onion Setup" --msgbox "$message" 10 75
|
||||
kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
compare_versions() {
|
||||
manager_ver=$($sshcmd -i /root/.ssh/so.key soremote@"$MSRV" cat /etc/soversion)
|
||||
|
||||
@@ -1219,6 +1149,95 @@ download_repo_tarball() {
|
||||
} >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
elasticsearch_pillar() {
|
||||
|
||||
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
|
||||
|
||||
# Create the node pillar
|
||||
printf '%s\n'\
|
||||
"elasticsearch:"\
|
||||
" mainip: '$MAINIP'"\
|
||||
" mainint: '$MNIC'"\
|
||||
" esheap: '$NODE_ES_HEAP_SIZE'" >> "$pillar_file"
|
||||
if [ -n "$ESCLUSTERNAME" ]; then
|
||||
printf '%s\n'\
|
||||
" esclustername: $ESCLUSTERNAME" >> "$pillar_file"
|
||||
else
|
||||
printf '%s\n'\
|
||||
" esclustername: {{ grains.host }}" >> "$pillar_file"
|
||||
fi
|
||||
printf '%s\n'\
|
||||
" node_type: '$NODETYPE'"\
|
||||
" es_port: $node_es_port"\
|
||||
" log_size_limit: $log_size_limit"\
|
||||
" node_route_type: 'hot'"\
|
||||
"" >> "$pillar_file"
|
||||
|
||||
printf '%s\n'\
|
||||
"logstash_settings:"\
|
||||
" ls_pipeline_batch_size: $LSPIPELINEBATCH"\
|
||||
" ls_input_threads: $LSINPUTTHREADS"\
|
||||
" lsheap: $NODE_LS_HEAP_SIZE"\
|
||||
" ls_pipeline_workers: $num_cpu_cores"\
|
||||
"" >> "$pillar_file"
|
||||
|
||||
}
|
||||
|
||||
es_heapsize() {
|
||||
|
||||
# Determine ES Heap Size
|
||||
if [ "$total_mem" -lt 8000 ] ; then
|
||||
ES_HEAP_SIZE="600m"
|
||||
elif [ "$total_mem" -ge 100000 ]; then
|
||||
# Set a max of 25GB for heap size
|
||||
# https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html
|
||||
ES_HEAP_SIZE="25000m"
|
||||
else
|
||||
# Set heap size to 25% of available memory
|
||||
ES_HEAP_SIZE=$(( total_mem / 4 ))"m"
|
||||
fi
|
||||
export ES_HEAP_SIZE
|
||||
|
||||
if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE|IMPORT)$ ]]; then
|
||||
NODE_ES_HEAP_SIZE=ES_HEAP_SIZE
|
||||
export NODE_ES_HEAP_SIZE
|
||||
fi
|
||||
}
|
||||
|
||||
filter_unused_nics() {
|
||||
|
||||
if [[ $MNIC ]]; then local grep_string="$MNIC\|bond0"; else local grep_string="bond0"; fi
|
||||
|
||||
# If we call this function and NICs have already been assigned to the bond interface then add them to the grep search string
|
||||
if [[ $BNICS ]]; then
|
||||
grep_string="$grep_string"
|
||||
for BONDNIC in "${BNICS[@]}"; do
|
||||
grep_string="$grep_string\|$BONDNIC"
|
||||
done
|
||||
fi
|
||||
|
||||
# Finally, set filtered_nics to any NICs we aren't using (and ignore interfaces that aren't of use)
|
||||
filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g')
|
||||
readarray -t filtered_nics <<< "$filtered_nics"
|
||||
|
||||
nic_list=()
|
||||
for nic in "${filtered_nics[@]}"; do
|
||||
case $(cat "/sys/class/net/${nic}/carrier" 2>/dev/null) in
|
||||
1)
|
||||
nic_list+=("$nic" "Link UP " "OFF")
|
||||
;;
|
||||
0)
|
||||
nic_list+=("$nic" "Link DOWN " "OFF")
|
||||
;;
|
||||
*)
|
||||
nic_list+=("$nic" "Link UNKNOWN " "OFF")
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
export nic_list
|
||||
}
|
||||
|
||||
fireeye_pillar() {
|
||||
|
||||
local fireeye_pillar_path=$local_salt_dir/pillar/fireeye
|
||||
@@ -1369,6 +1388,33 @@ import_registry_docker() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Set Logstash heap size based on total memory
|
||||
ls_heapsize() {
|
||||
|
||||
if [ "$total_mem" -ge 32000 ]; then
|
||||
LS_HEAP_SIZE='1000m'
|
||||
return
|
||||
fi
|
||||
|
||||
case "$install_type" in
|
||||
'MANAGERSEARCH' | 'HEAVYNODE' | 'HELIXSENSOR' | 'STANDALONE')
|
||||
LS_HEAP_SIZE='1000m'
|
||||
;;
|
||||
'EVAL')
|
||||
LS_HEAP_SIZE='700m'
|
||||
;;
|
||||
*)
|
||||
LS_HEAP_SIZE='500m'
|
||||
;;
|
||||
esac
|
||||
export LS_HEAP_SIZE
|
||||
|
||||
if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE)$ ]]; then
|
||||
NODE_LS_HEAP_SIZE=LS_HEAP_SIZE
|
||||
export NODE_LS_HEAP_SIZE
|
||||
fi
|
||||
}
|
||||
|
||||
manager_pillar() {
|
||||
|
||||
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
|
||||
@@ -1608,6 +1654,11 @@ manager_global() {
|
||||
printf '%s\n' '----' >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
mark_version() {
|
||||
# Drop a file with the current version
|
||||
echo "$SOVERSION" > /etc/soversion
|
||||
}
|
||||
|
||||
minio_generate_keys() {
|
||||
|
||||
local charSet="[:graph:]"
|
||||
@@ -1669,40 +1720,6 @@ network_setup() {
|
||||
} >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
elasticsearch_pillar() {
|
||||
|
||||
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
|
||||
|
||||
# Create the node pillar
|
||||
printf '%s\n'\
|
||||
"elasticsearch:"\
|
||||
" mainip: '$MAINIP'"\
|
||||
" mainint: '$MNIC'"\
|
||||
" esheap: '$NODE_ES_HEAP_SIZE'" >> "$pillar_file"
|
||||
if [ -n "$ESCLUSTERNAME" ]; then
|
||||
printf '%s\n'\
|
||||
" esclustername: $ESCLUSTERNAME" >> "$pillar_file"
|
||||
else
|
||||
printf '%s\n'\
|
||||
" esclustername: {{ grains.host }}" >> "$pillar_file"
|
||||
fi
|
||||
printf '%s\n'\
|
||||
" node_type: '$NODETYPE'"\
|
||||
" es_port: $node_es_port"\
|
||||
" log_size_limit: $log_size_limit"\
|
||||
" node_route_type: 'hot'"\
|
||||
"" >> "$pillar_file"
|
||||
|
||||
printf '%s\n'\
|
||||
"logstash_settings:"\
|
||||
" ls_pipeline_batch_size: $LSPIPELINEBATCH"\
|
||||
" ls_input_threads: $LSINPUTTHREADS"\
|
||||
" lsheap: $NODE_LS_HEAP_SIZE"\
|
||||
" ls_pipeline_workers: $num_cpu_cores"\
|
||||
"" >> "$pillar_file"
|
||||
|
||||
}
|
||||
|
||||
parse_install_username() {
|
||||
# parse out the install username so things copy correctly
|
||||
INSTALLUSERNAME=${SUDO_USER:-${USER}}
|
||||
@@ -2140,6 +2157,24 @@ salt_firstcheckin() {
|
||||
salt-call state.show_top >> /dev/null 2>&1 # send output to /dev/null because we don't actually care about the ouput
|
||||
}
|
||||
|
||||
# Create an secrets pillar so that passwords survive re-install
|
||||
secrets_pillar(){
|
||||
if [ ! -f $local_salt_dir/pillar/secrets.sls ]; then
|
||||
echo "Creating Secrets Pillar" >> "$setup_log" 2>&1
|
||||
mkdir -p $local_salt_dir/pillar
|
||||
printf '%s\n'\
|
||||
"secrets:"\
|
||||
" mysql: $MYSQLPASS"\
|
||||
" playbook_db: $PLAYBOOKDBPASS"\
|
||||
" playbook_admin: $PLAYBOOKADMINPASS"\
|
||||
" playbook_automation: $PLAYBOOKAUTOMATIONPASS"\
|
||||
" grafana_admin: $GRAFANAPASS"\
|
||||
" fleet: $FLEETPASS"\
|
||||
" fleet_jwt: $FLEETJWT"\
|
||||
" fleet_enroll-secret: False" > $local_salt_dir/pillar/secrets.sls
|
||||
fi
|
||||
}
|
||||
|
||||
set_base_heapsizes() {
|
||||
es_heapsize
|
||||
ls_heapsize
|
||||
@@ -2155,18 +2190,6 @@ set_main_ip() {
|
||||
MNIC_IP=$(ip a s "$MNIC" | grep -oE 'inet [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | cut -d' ' -f2)
|
||||
}
|
||||
|
||||
compare_main_nic_ip() {
|
||||
if [[ "$MAINIP" != "$MNIC_IP" ]]; then
|
||||
read -r -d '' message <<- EOM
|
||||
The IP being routed by Linux is not the IP address assigned to the management interface ($MNIC).
|
||||
|
||||
This is not a supported configuration, please remediate and rerun setup.
|
||||
EOM
|
||||
whiptail --title "Security Onion Setup" --msgbox "$message" 10 75
|
||||
kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Add /usr/sbin to everyone's path
|
||||
set_path() {
|
||||
echo "complete -cf sudo" > /etc/profile.d/securityonion.sh
|
||||
@@ -2440,6 +2463,24 @@ set_updates() {
|
||||
fi
|
||||
}
|
||||
|
||||
# $5 => (optional) password variable
|
||||
so_add_user() {
|
||||
local username=$1
|
||||
local uid=$2
|
||||
local gid=$3
|
||||
local home_dir=$4
|
||||
if [ "$5" ]; then local pass=$5; fi
|
||||
|
||||
echo "Add $username user" >> "$setup_log" 2>&1
|
||||
groupadd --gid "$gid" "$username"
|
||||
useradd -m --uid "$uid" --gid "$gid" --home-dir "$home_dir" "$username"
|
||||
|
||||
# If a password has been passed in, set the password
|
||||
if [ "$pass" ]; then
|
||||
echo "$username":"$pass" | chpasswd --crypt-method=SHA512
|
||||
fi
|
||||
}
|
||||
|
||||
steno_pillar() {
|
||||
|
||||
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
|
||||
@@ -2451,11 +2492,6 @@ steno_pillar() {
|
||||
|
||||
}
|
||||
|
||||
mark_version() {
|
||||
# Drop a file with the current version
|
||||
echo "$SOVERSION" > /etc/soversion
|
||||
}
|
||||
|
||||
update_sudoers_for_testing() {
|
||||
if [ -n "$TESTING" ]; then
|
||||
info "Ensuring $INSTALLUSERNAME has password-less sudo access for automated testing purposes."
|
||||
@@ -2507,56 +2543,29 @@ use_turbo_proxy() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Set Logstash heap size based on total memory
|
||||
ls_heapsize() {
|
||||
wait_for_file() {
|
||||
local filename=$1
|
||||
local max_attempts=$2 # this is multiplied by the wait interval, so make sure it isn't too large
|
||||
local cur_attempts=0
|
||||
local wait_interval=$3
|
||||
local total_time=$(( max_attempts * wait_interval ))
|
||||
local date
|
||||
date=$(date)
|
||||
|
||||
if [ "$total_mem" -ge 32000 ]; then
|
||||
LS_HEAP_SIZE='1000m'
|
||||
return
|
||||
fi
|
||||
|
||||
case "$install_type" in
|
||||
'MANAGERSEARCH' | 'HEAVYNODE' | 'HELIXSENSOR' | 'STANDALONE')
|
||||
LS_HEAP_SIZE='1000m'
|
||||
;;
|
||||
'EVAL')
|
||||
LS_HEAP_SIZE='700m'
|
||||
;;
|
||||
*)
|
||||
LS_HEAP_SIZE='500m'
|
||||
;;
|
||||
esac
|
||||
export LS_HEAP_SIZE
|
||||
|
||||
if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE)$ ]]; then
|
||||
NODE_LS_HEAP_SIZE=LS_HEAP_SIZE
|
||||
export NODE_LS_HEAP_SIZE
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
es_heapsize() {
|
||||
|
||||
# Determine ES Heap Size
|
||||
if [ "$total_mem" -lt 8000 ] ; then
|
||||
ES_HEAP_SIZE="600m"
|
||||
elif [ "$total_mem" -ge 100000 ]; then
|
||||
# Set a max of 25GB for heap size
|
||||
# https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html
|
||||
ES_HEAP_SIZE="25000m"
|
||||
while [[ $cur_attempts -lt $max_attempts ]]; do
|
||||
if [ -f "$filename" ]; then
|
||||
echo "File $filename found at $date" >> "$setup_log" 2>&1
|
||||
return 0
|
||||
else
|
||||
# Set heap size to 25% of available memory
|
||||
ES_HEAP_SIZE=$(( total_mem / 4 ))"m"
|
||||
fi
|
||||
export ES_HEAP_SIZE
|
||||
|
||||
if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE|IMPORT)$ ]]; then
|
||||
NODE_ES_HEAP_SIZE=ES_HEAP_SIZE
|
||||
export NODE_ES_HEAP_SIZE
|
||||
((cur_attempts++))
|
||||
echo "File $filename does not exist; waiting ${wait_interval}s then checking again ($cur_attempts/$max_attempts)..." >> "$setup_log" 2>&1
|
||||
sleep "$wait_interval"
|
||||
fi
|
||||
done
|
||||
echo "Could not find $filename after waiting ${total_time}s" >> "$setup_log" 2>&1
|
||||
return 1
|
||||
}
|
||||
|
||||
|
||||
whiptail_prog_new_message() {
|
||||
local message=$1
|
||||
set_progress_str "$percentage" "$message"
|
||||
|
||||
@@ -604,6 +604,7 @@ set_redirect >> $setup_log 2>&1
|
||||
|
||||
set_progress_str 8 'Initializing Salt minion'
|
||||
configure_minion "$minion_type" >> $setup_log 2>&1
|
||||
check_sos_appliance >> $setup_log 2>&1
|
||||
|
||||
update_sudoers_for_testing >> $setup_log 2>&1
|
||||
|
||||
|
||||
Reference in New Issue
Block a user