diff --git a/salt/common/tools/brostatus.sh b/salt/common/tools/brostatus.sh deleted file mode 100644 index e69de29bb..000000000 diff --git a/salt/firewall/init.sls b/salt/firewall/init.sls index c2fb74e77..0d5fd174d 100644 --- a/salt/firewall/init.sls +++ b/salt/firewall/init.sls @@ -173,6 +173,28 @@ enable_masternode_ES_9300_{{ip}}: - position: 1 - save: True +enable_masternode_ES_9400_{{ip}}: + iptables.insert: + - table: filter + - chain: DOCKER-USER + - jump: ACCEPT + - proto: tcp + - source: {{ ip }} + - dport: 9400 + - position: 1 + - save: True + +enable_masternode_ES_9500_{{ip}}: + iptables.insert: + - table: filter + - chain: DOCKER-USER + - jump: ACCEPT + - proto: tcp + - source: {{ ip }} + - dport: 9500 + - position: 1 + - save: True + enable_masternode_influxdb_8086_{{ip}}: iptables.insert: - table: filter @@ -367,6 +389,28 @@ enable_standard_analyst_3000_{{ip}}: - position: 1 - save: True +enable_standard_analyst_9000_{{ip}}: + iptables.insert: + - table: filter + - chain: DOCKER-USER + - jump: ACCEPT + - proto: tcp + - source: {{ ip }} + - dport: 9000 + - position: 1 + - save: True + +enable_standard_analyst_9001_{{ip}}: + iptables.insert: + - table: filter + - chain: DOCKER-USER + - jump: ACCEPT + - proto: tcp + - source: {{ ip }} + - dport: 9001 + - position: 1 + - save: True + {% endfor %} # Rules for storage nodes connecting to master diff --git a/salt/hive/init.sls b/salt/hive/init.sls new file mode 100644 index 000000000..958ecf540 --- /dev/null +++ b/salt/hive/init.sls @@ -0,0 +1,54 @@ +# Install Elasticsearch + +# Made directory for ES data to live in +hiveesdata: + file.directory: + - name: /nsm/hive/esdata + - makedirs: True + +so-thehive-es: + docker_container.running: + - image: docker.elastic.co/elasticsearch/elasticsearch:5.6.0 + - hostname: so-thehive-es + - name: so-thehive-es + - interactive: True + - tty: True + - binds: + - /nsm/hive/esdata:/usr/share/elasticsearch/data:rw + - environment: + - http.host=0.0.0.0 + - http.port=9400 + - transport.tcp.port=9500 + - transport.host=0.0.0.0 + - xpack.security.enabled=false + - cluster.name=hive + - script.inline=true + - thread_pool.index.queue_size=100000 + - thread_pool.search.queue_size=100000 + - thread_pool.bulk.queue_size=100000 + +# Install Cortex + +so-cortex: + docker_container.running: + - image: thehiveproject/cortex:latest + - hostname: so-cortex + - name: so-cortex + +# Install Hive +hiveconfdir: + file.directory: + - name: /opt/so/conf/hive/etc + - makedirs: True + +hiveconf: + file.managed: + - name: /opt/so/conf/hive/etc/application.conf + - source: salt://hive/thehive/etc/application.conf + - template: jinja + +so-thehive: + docker_container.running: + - image: thehiveproject/thehive:latest + - hostname: so-thehive + - name: so-thehive diff --git a/salt/hive/thehive/etc/application.conf b/salt/hive/thehive/etc/application.conf new file mode 100644 index 000000000..5a0a96b9b --- /dev/null +++ b/salt/hive/thehive/etc/application.conf @@ -0,0 +1,210 @@ +{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %} + +# Secret Key +# The secret key is used to secure cryptographic functions. +# WARNING: If you deploy your application on several servers, make sure to use the same key. +play.http.secret.key="letsdewdis" + +# Elasticsearch +search { + ## Basic configuration + # Index name. + index = the_hive + # ElasticSearch cluster name. + cluster = hive + # ElasticSearch instance address. + host = ["{{ MASTERIP }}:9500"] + + ## Advanced configuration + # Scroll keepalive. + #keepalive = 1m + # Scroll page size. + #pagesize = 50 + # Number of shards + #nbshards = 5 + # Number of replicas + #nbreplicas = 1 + # Arbitrary settings + #settings { + # # Maximum number of nested fields + # mapping.nested_fields.limit = 100 + #} + + ### XPack SSL configuration + # Username for XPack authentication + #search.username = "" + # Password for XPack authentication + #search.password = "" + # Enable SSL to connect to ElasticSearch + search.ssl.enabled = false + # Path to certificate authority file + #search.ssl.ca = "" + # Path to certificate file + #search.ssl.certificate = "" + # Path to key file + #search.ssl.key = "" + + ### SearchGuard configuration + # Path to JKS file containing client certificate + #search.guard.keyStore.path = "" + # Password of the keystore + #search.guard.keyStore.password = "" + # Path to JKS file containing certificate authorities + #search.guard.trustStore.path = "" + ## Password of the truststore + #search.guard.trustStore.password = "" + # Enforce hostname verification + #search.guard.hostVerification = false + # If hostname verification is enabled specify if hostname should be resolved + #search.guard.hostVerificationResolveHostname = false +} + +# Authentication +auth { + # "provider" parameter contains authentication provider. It can be multi-valued (useful for migration) + # available auth types are: + # services.LocalAuthSrv : passwords are stored in user entity (in Elasticsearch). No configuration is required. + # ad : use ActiveDirectory to authenticate users. Configuration is under "auth.ad" key + # ldap : use LDAP to authenticate users. Configuration is under "auth.ldap" key + provider = [local] + + # By default, basic authentication is disabled. You can enable it by setting "method.basic" to true. + #method.basic = true + + + ad { + # The Windows domain name in DNS format. This parameter is required if you do not use + # 'serverNames' below. + #domainFQDN = "mydomain.local" + + # Optionally you can specify the host names of the domain controllers instead of using 'domainFQDN + # above. If this parameter is not set, TheHive uses 'domainFQDN'. + #serverNames = [ad1.mydomain.local, ad2.mydomain.local] + + # The Windows domain name using short format. This parameter is required. + #domainName = "MYDOMAIN" + + # If 'true', use SSL to connect to the domain controller. + #useSSL = true + } + + ldap { + # The LDAP server name or address. The port can be specified using the 'host:port' + # syntax. This parameter is required if you don't use 'serverNames' below. + #serverName = "ldap.mydomain.local:389" + + # If you have multiple LDAP servers, use the multi-valued setting 'serverNames' instead. + #serverNames = [ldap1.mydomain.local, ldap2.mydomain.local] + + # Account to use to bind to the LDAP server. This parameter is required. + #bindDN = "cn=thehive,ou=services,dc=mydomain,dc=local" + + # Password of the binding account. This parameter is required. + #bindPW = "***secret*password***" + + # Base DN to search users. This parameter is required. + #baseDN = "ou=users,dc=mydomain,dc=local" + + # Filter to search user in the directory server. Please note that {0} is replaced + # by the actual user name. This parameter is required. + #filter = "(cn={0})" + + # If 'true', use SSL to connect to the LDAP directory server. + #useSSL = true + } +} + +# Maximum time between two requests without requesting authentication +session { + warning = 5m + inactivity = 1h +} + +# Max textual content length +play.http.parser.maxMemoryBuffer= 1M +# Max file size +play.http.parser.maxDiskBuffer = 1G + +# Cortex +# TheHive can connect to one or multiple Cortex instances. Give each +# Cortex instance a name and specify the associated URL. +# +# In order to use Cortex, first you need to enable the Cortex module by uncommenting the next line + +#play.modules.enabled += connectors.cortex.CortexConnector + +cortex { + #"CORTEX-SERVER-ID" { + # url = "" + # key = "" + # # HTTP client configuration (SSL and proxy) + # ws {} + #} +} + +# MISP +# TheHive can connect to one or multiple MISP instances. Give each MISP +# instance a name and specify the associated Authkey that must be used +# to poll events, the case template that should be used by default when +# importing events as well as the tags that must be added to cases upon +# import. + +# Prior to configuring the integration with a MISP instance, you must +# enable the MISP connector. This will allow you to import events to +# and/or export cases to the MISP instance(s). + +#play.modules.enabled += connectors.misp.MispConnector + +misp { + # Interval between consecutive MISP event imports in hours (h) or + # minutes (m). + interval = 1h + + #"MISP-SERVER-ID" { + # # MISP connection configuration requires at least an url and a key. The key must + # # be linked with a sync account on MISP. + # url = "" + # key = "" + # + # # Name of the case template in TheHive that shall be used to import + # # MISP events as cases by default. + # caseTemplate = "" + # + # # Optional tags to add to each observable imported from an event + # # available on this instance. + # tags = ["misp-server-id"] + # + # ## MISP event filters + # # MISP filters is used to exclude events from the import. + # # Filter criteria are: + # # The number of attribute + # max-attributes = 1000 + # # The size of its JSON representation + # max-size = 1 MiB + # # The age of the last publish date + # max-age = 7 days + # # Organization and tags + # exclusion { + # organisation = ["bad organisation", "other organisations"] + # tags = ["tag1", "tag2"] + # } + # + # ## HTTP client configuration (SSL and proxy) + # # Truststore to use to validate the X.509 certificate of the MISP + # # instance if the default truststore is not sufficient. + # # Proxy can also be used + # ws { + # ssl.trustManager.stores = [ { + # path = /path/to/truststore.jks + # } ] + # proxy { + # host = proxy.mydomain.org + # port = 3128 + # } + # } + # + # # MISP purpose defines if this instance can be used to import events (ImportOnly), export cases (ExportOnly) or both (ImportAndExport) + # # Default is ImportAndExport + # purpose = ImportAndExport + #} ## <-- Uncomment to complete the configuration +} diff --git a/salt/suricata/files/suricata.yaml b/salt/suricata/files/suricata.yaml index eb1888571..1acce5b96 100644 --- a/salt/suricata/files/suricata.yaml +++ b/salt/suricata/files/suricata.yaml @@ -919,7 +919,7 @@ host-mode: auto # Runmode the engine should use. Please check --list-runmodes to get the available # runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned # load balancing). -#runmode: autofp +runmode: workers # Specifies the kind of flow load balancer used by the flow pinned autofp mode. # diff --git a/salt/suricata/files/suricataMETA.yaml b/salt/suricata/files/suricataMETA.yaml index 2f58ad2f2..99a59c719 100644 --- a/salt/suricata/files/suricataMETA.yaml +++ b/salt/suricata/files/suricataMETA.yaml @@ -1074,7 +1074,7 @@ host-mode: auto # Runmode the engine should use. Please check --list-runmodes to get the available # runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned # load balancing). -#runmode: autofp +runmode: workers # Specifies the kind of flow load balancer used by the flow pinned autofp mode. # diff --git a/so-setup-network.sh b/so-setup-network.sh index ea37b89c5..e0a2899de 100644 --- a/so-setup-network.sh +++ b/so-setup-network.sh @@ -482,7 +482,7 @@ install_master() { # Install the salt master package if [ $OS == 'centos' ]; then - yum -y install salt-master wget + yum -y install wget salt-common salt-master # Create a place for the keys for Ubuntu minions mkdir -p /opt/so/gpg @@ -491,7 +491,9 @@ install_master() { wget --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH else - apt-get install -y salt-master + apt-get install -y salt-common=2018.3.4+ds-1 salt-master=2018.3.4+ds-1 salt-minion=2018.3.4+ds-1 python-m2crypto + apt-mark hold salt-common salt-master salt-minion + apt-get install -y python-m2crypto fi copy_master_config @@ -604,6 +606,8 @@ saltify() { if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then yum -y install https://repo.saltstack.com/yum/redhat/salt-repo-latest-2.el7.noarch.rpm + cp /etc/yum.repos.d/salt-latest.repo /etc/yum.repos.d/salt-2018-3.repo + sed -i 's/latest/2018.3/g' /etc/yum.repos.d/salt-2018-3.repo cat > /etc/yum.repos.d/wazuh.repo <<\EOF [wazuh_repo] gpgcheck=1 @@ -716,6 +720,15 @@ EOF echo "gpgcheck=1" >> /etc/yum.repos.d/salt-latest.repo echo "gpgkey=file:///etc/pki/rpm-gpg/saltstack-signing-key" >> /etc/yum.repos.d/salt-latest.repo + # Proxy is hating on me.. Lets just set it manually + echo "[salt-2018.3]" > /etc/yum.repos.d/salt-2018-3.repo + echo "name=SaltStack Latest Release Channel for RHEL/Centos \$releasever" >> /etc/yum.repos.d/salt-2018-3.repo + echo "baseurl=https://repo.saltstack.com/yum/redhat/7/\$basearch/2018.3" >> /etc/yum.repos.d/salt-2018-3.repo + echo "failovermethod=priority" >> /etc/yum.repos.d/salt-2018-3.repo + echo "enabled=1" >> /etc/yum.repos.d/salt-2018-3.repo + echo "gpgcheck=1" >> /etc/yum.repos.d/salt-2018-3.repo + echo "gpgkey=file:///etc/pki/rpm-gpg/saltstack-signing-key" >> /etc/yum.repos.d/salt-2018-3.repo + cat > /etc/yum.repos.d/wazuh.repo <<\EOF [wazuh_repo] gpgcheck=1 @@ -727,6 +740,8 @@ protect=1 EOF else yum -y install https://repo.saltstack.com/yum/redhat/salt-repo-latest-2.el7.noarch.rpm + cp /etc/yum.repos.d/salt-latest.repo /etc/yum.repos.d/salt-2018-3.repo + sed -i 's/latest/2018.3/g' /etc/yum.repos.d/salt-2018-3.repo cat > /etc/yum.repos.d/wazuh.repo <<\EOF [wazuh_repo] gpgcheck=1 @@ -740,18 +755,18 @@ EOF fi yum clean expire-cache - yum -y install salt-minion yum-utils device-mapper-persistent-data lvm2 openssl - yum -y update + yum -y install salt-minion-2018.3.4 yum-utils device-mapper-persistent-data lvm2 openssl + yum -y update exclude=salt* systemctl enable salt-minion # Nasty hack but required for now if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then - yum -y install salt-master python-m2crypto salt-minion m2crypto + yum -y install salt-master-2018.3.4 python-m2crypto salt-minion-2018.3.4 m2crypto systemctl enable salt-master else - yum -y install salt-minion python-m2m2crypto m2crypto + yum -y install salt-minion-2018.3.4 python-m2m2crypto m2crypto fi - + echo "exclude=salt*" >> /etc/yum.conf else ADDUSER=useradd @@ -768,7 +783,9 @@ EOF # Install the repo for salt wget --inet4-only -O - https://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add - + wget --inet4-only -O - https://repo.saltstack.com/apt/ubuntu/$UVER/amd64/2018.3/SALTSTACK-GPG-KEY.pub | apt-key add - echo "deb http://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list + echo "deb http://repo.saltstack.com/apt/ubuntu/$UVER/amd64/2018.3 xenial main" > /etc/apt/sources.list.d/saltstack2018.list # Lets get the docker repo added curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - @@ -787,7 +804,8 @@ EOF # Initialize the new repos apt-get update >>~/sosetup.log 2>&1 - apt-get -y install salt-minion python-m2crypto >>~/sosetup.log 2>&1 + apt-get -y install salt-minion=2018.3.4+ds-1 salt-common=2018.3.4+ds-1 python-m2crypto >>~/sosetup.log 2>&1 + apt-mark hold salt-minion salt-common else @@ -800,7 +818,8 @@ EOF echo "deb https://packages.wazuh.com/3.x/apt/ stable main" | tee /etc/apt/sources.list.d/wazuh.list # Initialize the new repos apt-get update >>~/sosetup.log 2>&1 - apt-get -y install salt-minion python-m2crypto >>~/sosetup.log 2>&1 + apt-get -y install salt-minion=2018.3.4+ds-1 salt-common=2018.3.4+ds-1 python-m2crypto >>~/sosetup.log 2>&1 + apt-mark hold salt-minion salt-common fi