diff --git a/pillar/firewall/ports.sls b/pillar/firewall/ports.sls
index 4f7c06bec..1e0be460b 100644
--- a/pillar/firewall/ports.sls
+++ b/pillar/firewall/ports.sls
@@ -33,6 +33,8 @@ firewall:
- 9300
- 9400
- 9500
+ - 9595
+ - 9696
udp:
- 1514
minions:
diff --git a/pillar/logstash/manager.sls b/pillar/logstash/manager.sls
index dcf222ae4..6f3ba495b 100644
--- a/pillar/logstash/manager.sls
+++ b/pillar/logstash/manager.sls
@@ -1,12 +1,9 @@
-{%- set PIPELINE = salt['pillar.get']('global:pipeline', 'minio') %}
+{%- set PIPELINE = salt['pillar.get']('global:pipeline', 'redis') %}
logstash:
pipelines:
manager:
config:
- so/0009_input_beats.conf
- so/0010_input_hhbeats.conf
- {%- if PIPELINE == "minio"%}
- - so/9998_output_minio.conf.jinja
- {%- else %}
- so/9999_output_redis.conf.jinja
- {%- endif %}
\ No newline at end of file
+
\ No newline at end of file
diff --git a/pillar/logstash/search.sls b/pillar/logstash/search.sls
index 22f73c5d4..7a5aeec39 100644
--- a/pillar/logstash/search.sls
+++ b/pillar/logstash/search.sls
@@ -3,11 +3,7 @@ logstash:
pipelines:
search:
config:
- {%- if PIPELINE == "minio"%}
- - so/0899_input_minio.conf.jinja
- {%- else %}
- so/0900_input_redis.conf.jinja
- {%- endif %}
- so/9000_output_zeek.conf.jinja
- so/9002_output_import.conf.jinja
- so/9034_output_syslog.conf.jinja
diff --git a/salt/ca/init.sls b/salt/ca/init.sls
index dcec40d9a..62b89d351 100644
--- a/salt/ca/init.sls
+++ b/salt/ca/init.sls
@@ -36,6 +36,7 @@ pki_private_key:
- days_valid: 3650
- days_remaining: 0
- backup: True
+ - replace: False
- require:
- file: /etc/pki
diff --git a/salt/common/tools/sbin/so-playbook-sync b/salt/common/tools/sbin/so-playbook-sync
index 8b2817eaa..f4c2c456e 100755
--- a/salt/common/tools/sbin/so-playbook-sync
+++ b/salt/common/tools/sbin/so-playbook-sync
@@ -17,4 +17,4 @@
. /usr/sbin/so-common
-docker exec so-soctopus python3 playbook_play-sync.py >> /opt/so/log/soctopus/so-playbook-sync.log 2>&1
+docker exec so-soctopus python3 playbook_play-sync.py
diff --git a/salt/elasticsearch/files/ingest/sysmon b/salt/elasticsearch/files/ingest/sysmon
index feb96720d..5fe46b3a5 100644
--- a/salt/elasticsearch/files/ingest/sysmon
+++ b/salt/elasticsearch/files/ingest/sysmon
@@ -7,6 +7,9 @@
{ "set": { "if": "ctx.winlog?.computer_name != null", "field": "observer.name", "value": "{{winlog.computer_name}}", "override": true } },
{ "set": { "if": "ctx.event?.code == '3'", "field": "event.category", "value": "host,process,network", "override": true } },
{ "set": { "if": "ctx.event?.code == '1'", "field": "event.category", "value": "host,process", "override": true } },
+ { "set": { "if": "ctx.event?.code == '5'", "field": "event.category", "value": "host,process", "override": true } },
+ { "set": { "if": "ctx.event?.code == '6'", "field": "event.category", "value": "host,driver", "override": true } },
+ { "set": { "if": "ctx.event?.code == '22'", "field": "event.category", "value": "network", "override": true } },
{ "set": { "if": "ctx.event?.code == '1'", "field": "event.dataset", "value": "process_creation", "override": true } },
{ "set": { "if": "ctx.event?.code == '2'", "field": "event.dataset", "value": "process_changed_file", "override": true } },
{ "set": { "if": "ctx.event?.code == '3'", "field": "event.dataset", "value": "network_connection", "override": true } },
@@ -34,6 +37,7 @@
{ "rename": { "field": "winlog.event_data.CurrentDirectory", "target_field": "process.working_directory", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.Description", "target_field": "process.pe.description", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.Product", "target_field": "process.pe.product", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.Company", "target_field": "process.pe.company", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.OriginalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.FileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } },
{ "rename": { "field": "winlog.event_data.ParentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } },
diff --git a/salt/elasticsearch/files/scripts/so-catrust b/salt/elasticsearch/files/scripts/so-catrust
new file mode 100644
index 000000000..02ea12726
--- /dev/null
+++ b/salt/elasticsearch/files/scripts/so-catrust
@@ -0,0 +1,32 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+{%- set VERSION = salt['pillar.get']('global:soversion', '') %}
+{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
+{%- set MANAGER = salt['grains.get']('master') %}
+. /usr/sbin/so-common
+# Check to see if we have extracted the ca cert.
+if [ ! -f /opt/so/saltstack/local/salt/common/cacerts ]; then
+ docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-elasticsearchca --user root --entrypoint keytool {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-logstash:{{ VERSION }} -keystore /etc/pki/ca-trust/extracted/java/cacerts -alias SOSCA -import -file /etc/pki/ca.crt -storepass changeit -noprompt
+ docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/java/cacerts /opt/so/saltstack/local/salt/common/cacerts
+ docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/common/tls-ca-bundle.pem
+ docker rm so-elasticsearchca
+ echo "" >> /opt/so/saltstack/local/salt/common/tls-ca-bundle.pem
+ echo "sosca" >> /opt/so/saltstack/local/salt/common/tls-ca-bundle.pem
+ cat /etc/pki/ca.crt >> /opt/so/saltstack/local/salt/common/tls-ca-bundle.pem
+else
+ exit 0
+fi
\ No newline at end of file
diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls
index 0b2090591..0f92a5d9c 100644
--- a/salt/elasticsearch/init.sls
+++ b/salt/elasticsearch/init.sls
@@ -26,9 +26,11 @@
{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone', 'so-importpcap'] %}
{% set esclustername = salt['pillar.get']('manager:esclustername', '') %}
{% set esheap = salt['pillar.get']('manager:esheap', '') %}
+ {% set ismanager = True %}
{% elif grains['role'] in ['so-node','so-heavynode'] %}
{% set esclustername = salt['pillar.get']('elasticsearch:esclustername', '') %}
{% set esheap = salt['pillar.get']('elasticsearch:esheap', '') %}
+ {% set ismanager = False %}
{% endif %}
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
@@ -37,6 +39,46 @@ vm.max_map_count:
sysctl.present:
- value: 262144
+{% if ismanager %}
+# We have to add the Manager CA to the CA list
+cascriptsync:
+ file.managed:
+ - name: /usr/sbin/so-catrust
+ - source: salt://elasticsearch/files/scripts/so-catrust
+ - user: 939
+ - group: 939
+ - mode: 750
+ - template: jinja
+
+# Run the CA magic
+cascriptfun:
+ cmd.run:
+ - name: /usr/sbin/so-catrust
+
+{% endif %}
+
+# Move our new CA over so Elastic and Logstash can use SSL with the internal CA
+catrustdir:
+ file.directory:
+ - name: /opt/so/conf/ca
+ - user: 939
+ - group: 939
+ - makedirs: True
+
+cacertz:
+ file.managed:
+ - name: /opt/so/conf/ca/cacerts
+ - source: salt://common/cacerts
+ - user: 939
+ - group: 939
+
+capemz:
+ file.managed:
+ - name: /opt/so/conf/ca/tls-ca-bundle.pem
+ - source: salt://common/tls-ca-bundle.pem
+ - user: 939
+ - group: 939
+
# Add ES Group
elasticsearchgroup:
group.present:
@@ -149,6 +191,9 @@ so-elasticsearch:
- /opt/so/conf/elasticsearch/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
- /nsm/elasticsearch:/usr/share/elasticsearch/data:rw
- /opt/so/log/elasticsearch:/var/log/elasticsearch:rw
+ - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
+ - watch:
+ - file: cacertz
so-elasticsearch-pipelines-file:
file.managed:
diff --git a/salt/firewall/portgroups.yaml b/salt/firewall/portgroups.yaml
index 5dee48755..8771df8ef 100644
--- a/salt/firewall/portgroups.yaml
+++ b/salt/firewall/portgroups.yaml
@@ -64,6 +64,7 @@ firewall:
redis:
tcp:
- 6379
+ - 9696
salt_manager:
tcp:
- 4505
diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls
index 1fa5b0e86..9f9a5c51b 100644
--- a/salt/logstash/init.sls
+++ b/salt/logstash/init.sls
@@ -148,7 +148,6 @@ so-logstash:
- user: logstash
- environment:
- LS_JAVA_OPTS=-Xms{{ lsheap }} -Xmx{{ lsheap }}
- - SSL_CERT_FILE=/etc/ssl/certs/ca.crt
- port_bindings:
{% for BINDING in DOCKER_OPTIONS.port_bindings %}
- {{ BINDING }}
@@ -167,7 +166,8 @@ so-logstash:
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
- - /etc/ssl/certs/intca.crt:/etc/ssl/certs/ca.crt:ro
+ - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
+ - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
{%- if grains['role'] == 'so-eval' %}
- /nsm/zeek:/nsm/zeek:ro
- /nsm/suricata:/suricata:ro
diff --git a/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja b/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja
index 6e736f22f..c98a2a388 100644
--- a/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja
+++ b/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja
@@ -1,13 +1,11 @@
-{%- if grains.role == 'so-heavynode' %}
-{%- set MANAGER = salt['pillar.get']('elasticsearch:mainip', '') %}
-{%- else %}
-{%- set MANAGER = salt['pillar.get']('global:managerip', '') %}
-{% endif -%}
+{%- set MANAGER = salt['grains.get']('master') %}
{%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %}
input {
redis {
host => '{{ MANAGER }}'
+ port => 9696
+ ssl => true
data_type => 'list'
key => 'logstash:unparsed'
type => 'redis-input'
diff --git a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja
index e953c3521..a38d2cd44 100644
--- a/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9998_output_minio.conf.jinja
@@ -17,6 +17,7 @@ output {
encoding => {{ ENCODING }}
upload_queue_size => {{ UPLOAD_QUEUE_SIZE }}
temporary_directory => "/usr/share/logstash/data/tmp"
+ validate_credentials_on_root_bucket => false
additional_settings => {
"force_path_style" => true
}
diff --git a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja
index 239ca8cb6..626ed62c3 100644
--- a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja
@@ -1,8 +1,9 @@
-{% set MANAGER = salt['pillar.get']('global:managerip', '') %}
+{%- set MANAGER = salt['grains.get']('master') %}
{% set BATCH = salt['pillar.get']('logstash_settings:ls_pipeline_batch_size', 125) %}
output {
redis {
host => '{{ MANAGER }}'
+ port => 6379
data_type => 'list'
key => 'logstash:unparsed'
congestion_interval => 1
diff --git a/salt/nginx/files/navigator_config.json b/salt/nginx/files/navigator_config.json
index d54f13265..b0866d742 100644
--- a/salt/nginx/files/navigator_config.json
+++ b/salt/nginx/files/navigator_config.json
@@ -1,4 +1,4 @@
-{%- set ip = salt['pillar.get']('global:managerip', '') %}
+{%- set URL_BASE = salt['pillar.get']('manager:url_base', '') %}
{
"enterprise_attack_url": "https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json",
@@ -16,7 +16,7 @@
"domain": "mitre-enterprise",
- "custom_context_menu_items": [ {"label": "view related plays","url": " https://{{ip}}/playbook/projects/detection-playbooks/issues?utf8=%E2%9C%93&set_filter=1&sort=id%3Adesc&f%5B%5D=cf_15&op%5Bcf_15%5D=%3D&f%5B%5D=&c%5B%5D=status&c%5B%5D=cf_10&c%5B%5D=cf_13&c%5B%5D=cf_18&c%5B%5D=cf_19&c%5B%5D=cf_1&c%5B%5D=updated_on&v%5Bcf_15%5D%5B%5D=~Technique_ID~"}],
+ "custom_context_menu_items": [ {"label": "view related plays","url": " https://{{URL_BASE}}/playbook/projects/detection-playbooks/issues?utf8=%E2%9C%93&set_filter=1&sort=id%3Adesc&f%5B%5D=cf_15&op%5Bcf_15%5D=%3D&f%5B%5D=&c%5B%5D=status&c%5B%5D=cf_10&c%5B%5D=cf_13&c%5B%5D=cf_18&c%5B%5D=cf_19&c%5B%5D=cf_1&c%5B%5D=updated_on&v%5Bcf_15%5D%5B%5D=~Technique_ID~"}],
"default_layers": {
"enabled": true,
diff --git a/salt/redis/etc/redis.conf b/salt/redis/etc/redis.conf
index d5f39da99..cf43bc04c 100644
--- a/salt/redis/etc/redis.conf
+++ b/salt/redis/etc/redis.conf
@@ -59,7 +59,7 @@
# internet, binding to all the interfaces is dangerous and will expose the
# instance to everybody on the internet. So by default we uncomment the
# following bind directive, that will force Redis to listen only into
-# the IPv4 lookback interface address (this means Redis will be able to
+# the IPv4 loopback interface address (this means Redis will be able to
# accept connections only from clients running into the same computer it
# is running).
#
@@ -86,6 +86,11 @@ bind 0.0.0.0
# even if no authentication is configured, nor a specific set of interfaces
# are explicitly listed using the "bind" directive.
protected-mode no
+tls-cert-file /certs/redis.crt
+tls-key-file /certs/redis.key
+tls-ca-cert-file /certs/ca.crt
+tls-port 9696
+tls-auth-clients no
# Accept connections on the specified port, default is 6379 (IANA #815344).
# If port 0 is specified Redis will not listen on a TCP socket.
@@ -129,6 +134,92 @@ timeout 0
# Redis default starting with Redis 3.2.1.
tcp-keepalive 300
+################################# TLS/SSL #####################################
+
+# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration
+# directive can be used to define TLS-listening ports. To enable TLS on the
+# default port, use:
+#
+# port 0
+# tls-port 6379
+
+# Configure a X.509 certificate and private key to use for authenticating the
+# server to connected clients, masters or cluster peers. These files should be
+# PEM formatted.
+#
+# tls-cert-file redis.crt
+# tls-key-file redis.key
+
+# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange:
+#
+# tls-dh-params-file redis.dh
+
+# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL
+# clients and peers. Redis requires an explicit configuration of at least one
+# of these, and will not implicitly use the system wide configuration.
+#
+# tls-ca-cert-file ca.crt
+# tls-ca-cert-dir /etc/ssl/certs
+
+# By default, clients (including replica servers) on a TLS port are required
+# to authenticate using valid client side certificates.
+#
+# It is possible to disable authentication using this directive.
+#
+# tls-auth-clients no
+
+# By default, a Redis replica does not attempt to establish a TLS connection
+# with its master.
+#
+# Use the following directive to enable TLS on replication links.
+#
+# tls-replication yes
+
+# By default, the Redis Cluster bus uses a plain TCP connection. To enable
+# TLS for the bus protocol, use the following directive:
+#
+# tls-cluster yes
+
+# Explicitly specify TLS versions to support. Allowed values are case insensitive
+# and include "TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3" (OpenSSL >= 1.1.1) or
+# any combination. To enable only TLSv1.2 and TLSv1.3, use:
+#
+# tls-protocols "TLSv1.2 TLSv1.3"
+
+# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information
+# about the syntax of this string.
+#
+# Note: this configuration applies only to <= TLSv1.2.
+#
+# tls-ciphers DEFAULT:!MEDIUM
+
+# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more
+# information about the syntax of this string, and specifically for TLSv1.3
+# ciphersuites.
+#
+# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256
+
+# When choosing a cipher, use the server's preference instead of the client
+# preference. By default, the server follows the client's preference.
+#
+# tls-prefer-server-ciphers yes
+
+# By default, TLS session caching is enabled to allow faster and less expensive
+# reconnections by clients that support it. Use the following directive to disable
+# caching.
+#
+# tls-session-caching no
+
+# Change the default number of TLS sessions cached. A zero value sets the cache
+# to unlimited size. The default size is 20480.
+#
+# tls-session-cache-size 5000
+
+# Change the default timeout of cached TLS sessions. The default timeout is 300
+# seconds.
+#
+# tls-session-cache-timeout 60
+
################################# GENERAL #####################################
# By default Redis does not run as a daemon. Use 'yes' if you need it.
@@ -252,6 +343,19 @@ rdbchecksum yes
# The filename where to dump the DB
dbfilename dump.rdb
+# Remove RDB files used by replication in instances without persistence
+# enabled. By default this option is disabled, however there are environments
+# where for regulations or other security concerns, RDB files persisted on
+# disk by masters in order to feed replicas, or stored on disk by replicas
+# in order to load them for the initial synchronization, should be deleted
+# ASAP. Note that this option ONLY WORKS in instances that have both AOF
+# and RDB persistence disabled, otherwise is completely ignored.
+#
+# An alternative (and sometimes better) way to obtain the same effect is
+# to use diskless replication on both master and replicas instances. However
+# in the case of replicas, diskless is not always an option.
+rdb-del-sync-files no
+
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
@@ -260,88 +364,104 @@ dbfilename dump.rdb
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
-dir /redis
+dir ./
################################# REPLICATION #################################
-# Master-Slave replication. Use slaveof to make a Redis instance a copy of
+# Master-Replica replication. Use replicaof to make a Redis instance a copy of
# another Redis server. A few things to understand ASAP about Redis replication.
#
+# +------------------+ +---------------+
+# | Master | ---> | Replica |
+# | (receive writes) | | (exact copy) |
+# +------------------+ +---------------+
+#
# 1) Redis replication is asynchronous, but you can configure a master to
# stop accepting writes if it appears to be not connected with at least
-# a given number of slaves.
-# 2) Redis slaves are able to perform a partial resynchronization with the
+# a given number of replicas.
+# 2) Redis replicas are able to perform a partial resynchronization with the
# master if the replication link is lost for a relatively small amount of
# time. You may want to configure the replication backlog size (see the next
# sections of this file) with a sensible value depending on your needs.
# 3) Replication is automatic and does not need user intervention. After a
-# network partition slaves automatically try to reconnect to masters
+# network partition replicas automatically try to reconnect to masters
# and resynchronize with them.
#
-# slaveof
+# replicaof
# If the master is password protected (using the "requirepass" configuration
-# directive below) it is possible to tell the slave to authenticate before
+# directive below) it is possible to tell the replica to authenticate before
# starting the replication synchronization process, otherwise the master will
-# refuse the slave request.
+# refuse the replica request.
#
# masterauth
-
-# When a slave loses its connection with the master, or when the replication
-# is still in progress, the slave can act in two different ways:
#
-# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
+# However this is not enough if you are using Redis ACLs (for Redis version
+# 6 or greater), and the default user is not capable of running the PSYNC
+# command and/or other commands needed for replication. In this case it's
+# better to configure a special user to use with replication, and specify the
+# masteruser configuration as such:
+#
+# masteruser
+#
+# When masteruser is specified, the replica will authenticate against its
+# master using the new AUTH form: AUTH .
+
+# When a replica loses its connection with the master, or when the replication
+# is still in progress, the replica can act in two different ways:
+#
+# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
-# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
+# 2) if replica-serve-stale-data is set to 'no' the replica will reply with
# an error "SYNC with master in progress" to all the kind of commands
-# but to INFO and SLAVEOF.
+# but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG,
+# SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB,
+# COMMAND, POST, HOST: and LATENCY.
#
-slave-serve-stale-data yes
+replica-serve-stale-data yes
-# You can configure a slave instance to accept writes or not. Writing against
-# a slave instance may be useful to store some ephemeral data (because data
-# written on a slave will be easily deleted after resync with the master) but
+# You can configure a replica instance to accept writes or not. Writing against
+# a replica instance may be useful to store some ephemeral data (because data
+# written on a replica will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
-# Since Redis 2.6 by default slaves are read-only.
+# Since Redis 2.6 by default replicas are read-only.
#
-# Note: read only slaves are not designed to be exposed to untrusted clients
+# Note: read only replicas are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
-# Still a read only slave exports by default all the administrative commands
+# Still a read only replica exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
-# security of read only slaves using 'rename-command' to shadow all the
+# security of read only replicas using 'rename-command' to shadow all the
# administrative / dangerous commands.
-slave-read-only yes
+replica-read-only yes
# Replication SYNC strategy: disk or socket.
#
-# -------------------------------------------------------
-# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
-# -------------------------------------------------------
+# New replicas and reconnecting replicas that are not able to continue the
+# replication process just receiving differences, need to do what is called a
+# "full synchronization". An RDB file is transmitted from the master to the
+# replicas.
#
-# New slaves and reconnecting slaves that are not able to continue the replication
-# process just receiving differences, need to do what is called a "full
-# synchronization". An RDB file is transmitted from the master to the slaves.
# The transmission can happen in two different ways:
#
# 1) Disk-backed: The Redis master creates a new process that writes the RDB
# file on disk. Later the file is transferred by the parent
-# process to the slaves incrementally.
+# process to the replicas incrementally.
# 2) Diskless: The Redis master creates a new process that directly writes the
-# RDB file to slave sockets, without touching the disk at all.
+# RDB file to replica sockets, without touching the disk at all.
#
-# With disk-backed replication, while the RDB file is generated, more slaves
-# can be queued and served with the RDB file as soon as the current child producing
-# the RDB file finishes its work. With diskless replication instead once
-# the transfer starts, new slaves arriving will be queued and a new transfer
-# will start when the current one terminates.
+# With disk-backed replication, while the RDB file is generated, more replicas
+# can be queued and served with the RDB file as soon as the current child
+# producing the RDB file finishes its work. With diskless replication instead
+# once the transfer starts, new replicas arriving will be queued and a new
+# transfer will start when the current one terminates.
#
# When diskless replication is used, the master waits a configurable amount of
-# time (in seconds) before starting the transfer in the hope that multiple slaves
-# will arrive and the transfer can be parallelized.
+# time (in seconds) before starting the transfer in the hope that multiple
+# replicas will arrive and the transfer can be parallelized.
#
# With slow disks and fast (large bandwidth) networks, diskless replication
# works better.
@@ -349,157 +469,334 @@ repl-diskless-sync no
# When diskless replication is enabled, it is possible to configure the delay
# the server waits in order to spawn the child that transfers the RDB via socket
-# to the slaves.
+# to the replicas.
#
# This is important since once the transfer starts, it is not possible to serve
-# new slaves arriving, that will be queued for the next RDB transfer, so the server
-# waits a delay in order to let more slaves arrive.
+# new replicas arriving, that will be queued for the next RDB transfer, so the
+# server waits a delay in order to let more replicas arrive.
#
# The delay is specified in seconds, and by default is 5 seconds. To disable
# it entirely just set it to 0 seconds and the transfer will start ASAP.
repl-diskless-sync-delay 5
-# Slaves send PINGs to server in a predefined interval. It's possible to change
-# this interval with the repl_ping_slave_period option. The default value is 10
-# seconds.
+# -----------------------------------------------------------------------------
+# WARNING: RDB diskless load is experimental. Since in this setup the replica
+# does not immediately store an RDB on disk, it may cause data loss during
+# failovers. RDB diskless load + Redis modules not handling I/O reads may also
+# cause Redis to abort in case of I/O errors during the initial synchronization
+# stage with the master. Use only if your do what you are doing.
+# -----------------------------------------------------------------------------
#
-# repl-ping-slave-period 10
+# Replica can load the RDB it reads from the replication link directly from the
+# socket, or store the RDB to a file and read that file after it was completely
+# recived from the master.
+#
+# In many cases the disk is slower than the network, and storing and loading
+# the RDB file may increase replication time (and even increase the master's
+# Copy on Write memory and salve buffers).
+# However, parsing the RDB file directly from the socket may mean that we have
+# to flush the contents of the current database before the full rdb was
+# received. For this reason we have the following options:
+#
+# "disabled" - Don't use diskless load (store the rdb file to the disk first)
+# "on-empty-db" - Use diskless load only when it is completely safe.
+# "swapdb" - Keep a copy of the current db contents in RAM while parsing
+# the data directly from the socket. note that this requires
+# sufficient memory, if you don't have it, you risk an OOM kill.
+repl-diskless-load disabled
+
+# Replicas send PINGs to server in a predefined interval. It's possible to
+# change this interval with the repl_ping_replica_period option. The default
+# value is 10 seconds.
+#
+# repl-ping-replica-period 10
# The following option sets the replication timeout for:
#
-# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
-# 2) Master timeout from the point of view of slaves (data, pings).
-# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
+# 1) Bulk transfer I/O during SYNC, from the point of view of replica.
+# 2) Master timeout from the point of view of replicas (data, pings).
+# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings).
#
# It is important to make sure that this value is greater than the value
-# specified for repl-ping-slave-period otherwise a timeout will be detected
-# every time there is low traffic between the master and the slave.
+# specified for repl-ping-replica-period otherwise a timeout will be detected
+# every time there is low traffic between the master and the replica.
#
# repl-timeout 60
-# Disable TCP_NODELAY on the slave socket after SYNC?
+# Disable TCP_NODELAY on the replica socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
-# less bandwidth to send data to slaves. But this can add a delay for
-# the data to appear on the slave side, up to 40 milliseconds with
+# less bandwidth to send data to replicas. But this can add a delay for
+# the data to appear on the replica side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
-# If you select "no" the delay for data to appear on the slave side will
+# If you select "no" the delay for data to appear on the replica side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
-# or when the master and slaves are many hops away, turning this to "yes" may
+# or when the master and replicas are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# Set the replication backlog size. The backlog is a buffer that accumulates
-# slave data when slaves are disconnected for some time, so that when a slave
-# wants to reconnect again, often a full resync is not needed, but a partial
-# resync is enough, just passing the portion of data the slave missed while
-# disconnected.
+# replica data when replicas are disconnected for some time, so that when a
+# replica wants to reconnect again, often a full resync is not needed, but a
+# partial resync is enough, just passing the portion of data the replica
+# missed while disconnected.
#
-# The bigger the replication backlog, the longer the time the slave can be
+# The bigger the replication backlog, the longer the time the replica can be
# disconnected and later be able to perform a partial resynchronization.
#
-# The backlog is only allocated once there is at least a slave connected.
+# The backlog is only allocated once there is at least a replica connected.
#
# repl-backlog-size 1mb
-# After a master has no longer connected slaves for some time, the backlog
+# After a master has no longer connected replicas for some time, the backlog
# will be freed. The following option configures the amount of seconds that
-# need to elapse, starting from the time the last slave disconnected, for
+# need to elapse, starting from the time the last replica disconnected, for
# the backlog buffer to be freed.
#
-# Note that slaves never free the backlog for timeout, since they may be
+# Note that replicas never free the backlog for timeout, since they may be
# promoted to masters later, and should be able to correctly "partially
-# resynchronize" with the slaves: hence they should always accumulate backlog.
+# resynchronize" with the replicas: hence they should always accumulate backlog.
#
# A value of 0 means to never release the backlog.
#
# repl-backlog-ttl 3600
-# The slave priority is an integer number published by Redis in the INFO output.
-# It is used by Redis Sentinel in order to select a slave to promote into a
-# master if the master is no longer working correctly.
+# The replica priority is an integer number published by Redis in the INFO
+# output. It is used by Redis Sentinel in order to select a replica to promote
+# into a master if the master is no longer working correctly.
#
-# A slave with a low priority number is considered better for promotion, so
-# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
-# pick the one with priority 10, that is the lowest.
+# A replica with a low priority number is considered better for promotion, so
+# for instance if there are three replicas with priority 10, 100, 25 Sentinel
+# will pick the one with priority 10, that is the lowest.
#
-# However a special priority of 0 marks the slave as not able to perform the
-# role of master, so a slave with priority of 0 will never be selected by
+# However a special priority of 0 marks the replica as not able to perform the
+# role of master, so a replica with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
-slave-priority 100
+replica-priority 100
# It is possible for a master to stop accepting writes if there are less than
-# N slaves connected, having a lag less or equal than M seconds.
+# N replicas connected, having a lag less or equal than M seconds.
#
-# The N slaves need to be in "online" state.
+# The N replicas need to be in "online" state.
#
# The lag in seconds, that must be <= the specified value, is calculated from
-# the last ping received from the slave, that is usually sent every second.
+# the last ping received from the replica, that is usually sent every second.
#
# This option does not GUARANTEE that N replicas will accept the write, but
-# will limit the window of exposure for lost writes in case not enough slaves
+# will limit the window of exposure for lost writes in case not enough replicas
# are available, to the specified number of seconds.
#
-# For example to require at least 3 slaves with a lag <= 10 seconds use:
+# For example to require at least 3 replicas with a lag <= 10 seconds use:
#
-# min-slaves-to-write 3
-# min-slaves-max-lag 10
+# min-replicas-to-write 3
+# min-replicas-max-lag 10
#
# Setting one or the other to 0 disables the feature.
#
-# By default min-slaves-to-write is set to 0 (feature disabled) and
-# min-slaves-max-lag is set to 10.
+# By default min-replicas-to-write is set to 0 (feature disabled) and
+# min-replicas-max-lag is set to 10.
# A Redis master is able to list the address and port of the attached
-# slaves in different ways. For example the "INFO replication" section
+# replicas in different ways. For example the "INFO replication" section
# offers this information, which is used, among other tools, by
-# Redis Sentinel in order to discover slave instances.
+# Redis Sentinel in order to discover replica instances.
# Another place where this info is available is in the output of the
# "ROLE" command of a master.
#
-# The listed IP and address normally reported by a slave is obtained
+# The listed IP and address normally reported by a replica is obtained
# in the following way:
#
# IP: The address is auto detected by checking the peer address
-# of the socket used by the slave to connect with the master.
+# of the socket used by the replica to connect with the master.
#
-# Port: The port is communicated by the slave during the replication
-# handshake, and is normally the port that the slave is using to
-# list for connections.
+# Port: The port is communicated by the replica during the replication
+# handshake, and is normally the port that the replica is using to
+# listen for connections.
#
# However when port forwarding or Network Address Translation (NAT) is
-# used, the slave may be actually reachable via different IP and port
-# pairs. The following two options can be used by a slave in order to
+# used, the replica may be actually reachable via different IP and port
+# pairs. The following two options can be used by a replica in order to
# report to its master a specific set of IP and port, so that both INFO
# and ROLE will report those values.
#
# There is no need to use both the options if you need to override just
# the port or the IP address.
#
-# slave-announce-ip 5.5.5.5
-# slave-announce-port 1234
+# replica-announce-ip 5.5.5.5
+# replica-announce-port 1234
+
+############################### KEYS TRACKING #################################
+
+# Redis implements server assisted support for client side caching of values.
+# This is implemented using an invalidation table that remembers, using
+# 16 millions of slots, what clients may have certain subsets of keys. In turn
+# this is used in order to send invalidation messages to clients. Please
+# to understand more about the feature check this page:
+#
+# https://redis.io/topics/client-side-caching
+#
+# When tracking is enabled for a client, all the read only queries are assumed
+# to be cached: this will force Redis to store information in the invalidation
+# table. When keys are modified, such information is flushed away, and
+# invalidation messages are sent to the clients. However if the workload is
+# heavily dominated by reads, Redis could use more and more memory in order
+# to track the keys fetched by many clients.
+#
+# For this reason it is possible to configure a maximum fill value for the
+# invalidation table. By default it is set to 1M of keys, and once this limit
+# is reached, Redis will start to evict keys in the invalidation table
+# even if they were not modified, just to reclaim memory: this will in turn
+# force the clients to invalidate the cached values. Basically the table
+# maximum size is a trade off between the memory you want to spend server
+# side to track information about who cached what, and the ability of clients
+# to retain cached objects in memory.
+#
+# If you set the value to 0, it means there are no limits, and Redis will
+# retain as many keys as needed in the invalidation table.
+# In the "stats" INFO section, you can find information about the number of
+# keys in the invalidation table at every given moment.
+#
+# Note: when key tracking is used in broadcasting mode, no memory is used
+# in the server side so this setting is useless.
+#
+# tracking-table-max-keys 1000000
################################## SECURITY ###################################
-# Require clients to issue AUTH before processing any other
-# commands. This might be useful in environments in which you do not trust
-# others with access to the host running redis-server.
-#
-# This should stay commented out for backward compatibility and because most
-# people do not need auth (e.g. they run their own servers).
-#
# Warning: since Redis is pretty fast an outside user can try up to
-# 150k passwords per second against a good box. This means that you should
-# use a very strong password otherwise it will be very easy to break.
+# 1 million passwords per second against a modern box. This means that you
+# should use very strong passwords, otherwise they will be very easy to break.
+# Note that because the password is really a shared secret between the client
+# and the server, and should not be memorized by any human, the password
+# can be easily a long string from /dev/urandom or whatever, so by using a
+# long and unguessable password no brute force attack will be possible.
+
+# Redis ACL users are defined in the following format:
+#
+# user ... acl rules ...
+#
+# For example:
+#
+# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99
+#
+# The special username "default" is used for new connections. If this user
+# has the "nopass" rule, then new connections will be immediately authenticated
+# as the "default" user without the need of any password provided via the
+# AUTH command. Otherwise if the "default" user is not flagged with "nopass"
+# the connections will start in not authenticated state, and will require
+# AUTH (or the HELLO command AUTH option) in order to be authenticated and
+# start to work.
+#
+# The ACL rules that describe what an user can do are the following:
+#
+# on Enable the user: it is possible to authenticate as this user.
+# off Disable the user: it's no longer possible to authenticate
+# with this user, however the already authenticated connections
+# will still work.
+# + Allow the execution of that command
+# - Disallow the execution of that command
+# +@ Allow the execution of all the commands in such category
+# with valid categories are like @admin, @set, @sortedset, ...
+# and so forth, see the full list in the server.c file where
+# the Redis command table is described and defined.
+# The special category @all means all the commands, but currently
+# present in the server, and that will be loaded in the future
+# via modules.
+# +|subcommand Allow a specific subcommand of an otherwise
+# disabled command. Note that this form is not
+# allowed as negative like -DEBUG|SEGFAULT, but
+# only additive starting with "+".
+# allcommands Alias for +@all. Note that it implies the ability to execute
+# all the future commands loaded via the modules system.
+# nocommands Alias for -@all.
+# ~ Add a pattern of keys that can be mentioned as part of
+# commands. For instance ~* allows all the keys. The pattern
+# is a glob-style pattern like the one of KEYS.
+# It is possible to specify multiple patterns.
+# allkeys Alias for ~*
+# resetkeys Flush the list of allowed keys patterns.
+# > Add this passowrd to the list of valid password for the user.
+# For example >mypass will add "mypass" to the list.
+# This directive clears the "nopass" flag (see later).
+# < Remove this password from the list of valid passwords.
+# nopass All the set passwords of the user are removed, and the user
+# is flagged as requiring no password: it means that every
+# password will work against this user. If this directive is
+# used for the default user, every new connection will be
+# immediately authenticated with the default user without
+# any explicit AUTH command required. Note that the "resetpass"
+# directive will clear this condition.
+# resetpass Flush the list of allowed passwords. Moreover removes the
+# "nopass" status. After "resetpass" the user has no associated
+# passwords and there is no way to authenticate without adding
+# some password (or setting it as "nopass" later).
+# reset Performs the following actions: resetpass, resetkeys, off,
+# -@all. The user returns to the same state it has immediately
+# after its creation.
+#
+# ACL rules can be specified in any order: for instance you can start with
+# passwords, then flags, or key patterns. However note that the additive
+# and subtractive rules will CHANGE MEANING depending on the ordering.
+# For instance see the following example:
+#
+# user alice on +@all -DEBUG ~* >somepassword
+#
+# This will allow "alice" to use all the commands with the exception of the
+# DEBUG command, since +@all added all the commands to the set of the commands
+# alice can use, and later DEBUG was removed. However if we invert the order
+# of two ACL rules the result will be different:
+#
+# user alice on -DEBUG +@all ~* >somepassword
+#
+# Now DEBUG was removed when alice had yet no commands in the set of allowed
+# commands, later all the commands are added, so the user will be able to
+# execute everything.
+#
+# Basically ACL rules are processed left-to-right.
+#
+# For more information about ACL configuration please refer to
+# the Redis web site at https://redis.io/topics/acl
+
+# ACL LOG
+#
+# The ACL Log tracks failed commands and authentication events associated
+# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked
+# by ACLs. The ACL Log is stored in memory. You can reclaim memory with
+# ACL LOG RESET. Define the maximum entry length of the ACL Log below.
+acllog-max-len 128
+
+# Using an external ACL file
+#
+# Instead of configuring users here in this file, it is possible to use
+# a stand-alone file just listing users. The two methods cannot be mixed:
+# if you configure users here and at the same time you activate the exteranl
+# ACL file, the server will refuse to start.
+#
+# The format of the external ACL user file is exactly the same as the
+# format that is used inside redis.conf to describe users.
+#
+# aclfile /etc/redis/users.acl
+
+# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatiblity
+# layer on top of the new ACL system. The option effect will be just setting
+# the password for the default user. Clients will still authenticate using
+# AUTH as usually, or more explicitly with AUTH default
+# if they follow the new protocol: both will work.
#
# requirepass foobared
-# Command renaming.
+# Command renaming (DEPRECATED).
+#
+# ------------------------------------------------------------------------
+# WARNING: avoid using this option if possible. Instead use ACLs to remove
+# commands from the default user, and put them only in some admin user you
+# create for administrative purposes.
+# ------------------------------------------------------------------------
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
@@ -516,7 +813,7 @@ slave-priority 100
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
-# AOF file or transmitted to slaves may cause problems.
+# AOF file or transmitted to replicas may cause problems.
################################### CLIENTS ####################################
@@ -529,6 +826,11 @@ slave-priority 100
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
+# IMPORTANT: When Redis Cluster is used, the max number of connections is also
+# shared with the cluster bus: every node in the cluster will use two
+# connections, one incoming and another outgoing. It is important to size the
+# limit accordingly in case of very large clusters.
+#
# maxclients 10000
############################## MEMORY MANAGEMENT ################################
@@ -545,27 +847,27 @@ slave-priority 100
# This option is usually useful when using Redis as an LRU or LFU cache, or to
# set a hard memory limit for an instance (using the 'noeviction' policy).
#
-# WARNING: If you have slaves attached to an instance with maxmemory on,
-# the size of the output buffers needed to feed the slaves are subtracted
+# WARNING: If you have replicas attached to an instance with maxmemory on,
+# the size of the output buffers needed to feed the replicas are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
-# buffer of slaves is full with DELs of keys evicted triggering the deletion
+# buffer of replicas is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
-# In short... if you have slaves attached it is suggested that you set a lower
-# limit for maxmemory so that there is some free RAM on the system for slave
+# In short... if you have replicas attached it is suggested that you set a lower
+# limit for maxmemory so that there is some free RAM on the system for replica
# output buffers (but this is not needed if the policy is 'noeviction').
#
-maxmemory 817m
+# maxmemory
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
-# is reached. You can select among five behaviors:
+# is reached. You can select one from the following behaviors:
#
-# volatile-lru -> Evict using approximated LRU among the keys with an expire set.
+# volatile-lru -> Evict using approximated LRU, only keys with an expire set.
# allkeys-lru -> Evict any key using approximated LRU.
-# volatile-lfu -> Evict using approximated LFU among the keys with an expire set.
+# volatile-lfu -> Evict using approximated LFU, only keys with an expire set.
# allkeys-lfu -> Evict any key using approximated LFU.
-# volatile-random -> Remove a random key among the ones with an expire set.
+# volatile-random -> Remove a random key having an expire set.
# allkeys-random -> Remove a random key, any key.
# volatile-ttl -> Remove the key with the nearest expire time (minor TTL)
# noeviction -> Don't evict anything, just return an error on write operations.
@@ -587,7 +889,7 @@ maxmemory 817m
#
# The default is:
#
-maxmemory-policy noeviction
+# maxmemory-policy noeviction
# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can tune it for speed or
@@ -600,6 +902,43 @@ maxmemory-policy noeviction
#
# maxmemory-samples 5
+# Starting from Redis 5, by default a replica will ignore its maxmemory setting
+# (unless it is promoted to master after a failover or manually). It means
+# that the eviction of keys will be just handled by the master, sending the
+# DEL commands to the replica as keys evict in the master side.
+#
+# This behavior ensures that masters and replicas stay consistent, and is usually
+# what you want, however if your replica is writable, or you want the replica
+# to have a different memory setting, and you are sure all the writes performed
+# to the replica are idempotent, then you may change this default (but be sure
+# to understand what you are doing).
+#
+# Note that since the replica by default does not evict, it may end using more
+# memory than the one set via maxmemory (there are certain buffers that may
+# be larger on the replica, or data structures may sometimes take more memory
+# and so forth). So make sure you monitor your replicas and make sure they
+# have enough memory to never hit a real out-of-memory condition before the
+# master hits the configured maxmemory setting.
+#
+# replica-ignore-maxmemory yes
+
+# Redis reclaims expired keys in two ways: upon access when those keys are
+# found to be expired, and also in background, in what is called the
+# "active expire key". The key space is slowly and interactively scanned
+# looking for expired keys to reclaim, so that it is possible to free memory
+# of keys that are expired and will never be accessed again in a short time.
+#
+# The default effort of the expire cycle will try to avoid having more than
+# ten percent of expired keys still in memory, and will try to avoid consuming
+# more than 25% of total memory and to add latency to the system. However
+# it is possible to increase the expire "effort" that is normally set to
+# "1", to a greater value, up to the value "10". At its maximum value the
+# system will use more CPU, longer cycles (and technically may introduce
+# more latency), and will tollerate less already expired keys still present
+# in the system. It's a tradeoff betweeen memory, CPU and latecy.
+#
+# active-expire-effort 1
+
############################# LAZY FREEING ####################################
# Redis has two primitives to delete keys. One is called DEL and is a blocking
@@ -635,19 +974,72 @@ maxmemory-policy noeviction
# or SORT with STORE option may delete existing keys. The SET command
# itself removes any old content of the specified key in order to replace
# it with the specified string.
-# 4) During replication, when a slave performs a full resynchronization with
+# 4) During replication, when a replica performs a full resynchronization with
# its master, the content of the whole database is removed in order to
-# load the RDB file just transfered.
+# load the RDB file just transferred.
#
# In all the above cases the default is to delete objects in a blocking way,
# like if DEL was called. However you can configure each case specifically
# in order to instead release memory in a non-blocking way like if UNLINK
-# was called, using the following configuration directives:
+# was called, using the following configuration directives.
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
-slave-lazy-flush no
+replica-lazy-flush no
+
+# It is also possible, for the case when to replace the user code DEL calls
+# with UNLINK calls is not easy, to modify the default behavior of the DEL
+# command to act exactly like UNLINK, using the following configuration
+# directive:
+
+lazyfree-lazy-user-del no
+
+################################ THREADED I/O #################################
+
+# Redis is mostly single threaded, however there are certain threaded
+# operations such as UNLINK, slow I/O accesses and other things that are
+# performed on side threads.
+#
+# Now it is also possible to handle Redis clients socket reads and writes
+# in different I/O threads. Since especially writing is so slow, normally
+# Redis users use pipelining in order to speedup the Redis performances per
+# core, and spawn multiple instances in order to scale more. Using I/O
+# threads it is possible to easily speedup two times Redis without resorting
+# to pipelining nor sharding of the instance.
+#
+# By default threading is disabled, we suggest enabling it only in machines
+# that have at least 4 or more cores, leaving at least one spare core.
+# Using more than 8 threads is unlikely to help much. We also recommend using
+# threaded I/O only if you actually have performance problems, with Redis
+# instances being able to use a quite big percentage of CPU time, otherwise
+# there is no point in using this feature.
+#
+# So for instance if you have a four cores boxes, try to use 2 or 3 I/O
+# threads, if you have a 8 cores, try to use 6 threads. In order to
+# enable I/O threads use the following configuration directive:
+#
+# io-threads 4
+#
+# Setting io-threads to 1 will just use the main thread as usually.
+# When I/O threads are enabled, we only use threads for writes, that is
+# to thread the write(2) syscall and transfer the client buffers to the
+# socket. However it is also possible to enable threading of reads and
+# protocol parsing using the following configuration directive, by setting
+# it to yes:
+#
+# io-threads-do-reads no
+#
+# Usually threading reads doesn't help much.
+#
+# NOTE 1: This configuration directive cannot be changed at runtime via
+# CONFIG SET. Aso this feature currently does not work when SSL is
+# enabled.
+#
+# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make
+# sure you also run the benchmark itself in threaded mode, using the
+# --threads option to match the number of Redis theads, otherwise you'll not
+# be able to notice the improvements.
############################## APPEND ONLY MODE ###############################
@@ -776,10 +1168,7 @@ aof-load-truncated yes
# When loading Redis recognizes that the AOF file starts with the "REDIS"
# string and loads the prefixed RDB file, and continues loading the AOF
# tail.
-#
-# This is currently turned off by default in order to avoid the surprise
-# of a format change, but will at some point be used as the default.
-aof-use-rdb-preamble no
+aof-use-rdb-preamble yes
################################ LUA SCRIPTING ###############################
@@ -800,13 +1189,7 @@ aof-use-rdb-preamble no
lua-time-limit 5000
################################ REDIS CLUSTER ###############################
-#
-# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however
-# in order to mark it as "mature" we need to wait for a non trivial percentage
-# of users to deploy it in production.
-# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-#
+
# Normal Redis instances can't be part of a Redis Cluster; only nodes that are
# started as cluster nodes can. In order to start a Redis instance as a
# cluster node enable the cluster support uncommenting the following:
@@ -827,42 +1210,42 @@ lua-time-limit 5000
#
# cluster-node-timeout 15000
-# A slave of a failing master will avoid to start a failover if its data
+# A replica of a failing master will avoid to start a failover if its data
# looks too old.
#
-# There is no simple way for a slave to actually have an exact measure of
+# There is no simple way for a replica to actually have an exact measure of
# its "data age", so the following two checks are performed:
#
-# 1) If there are multiple slaves able to failover, they exchange messages
-# in order to try to give an advantage to the slave with the best
+# 1) If there are multiple replicas able to failover, they exchange messages
+# in order to try to give an advantage to the replica with the best
# replication offset (more data from the master processed).
-# Slaves will try to get their rank by offset, and apply to the start
+# Replicas will try to get their rank by offset, and apply to the start
# of the failover a delay proportional to their rank.
#
-# 2) Every single slave computes the time of the last interaction with
+# 2) Every single replica computes the time of the last interaction with
# its master. This can be the last ping or command received (if the master
# is still in the "connected" state), or the time that elapsed since the
# disconnection with the master (if the replication link is currently down).
-# If the last interaction is too old, the slave will not try to failover
+# If the last interaction is too old, the replica will not try to failover
# at all.
#
-# The point "2" can be tuned by user. Specifically a slave will not perform
+# The point "2" can be tuned by user. Specifically a replica will not perform
# the failover if, since the last interaction with the master, the time
# elapsed is greater than:
#
-# (node-timeout * slave-validity-factor) + repl-ping-slave-period
+# (node-timeout * replica-validity-factor) + repl-ping-replica-period
#
-# So for example if node-timeout is 30 seconds, and the slave-validity-factor
-# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the
-# slave will not try to failover if it was not able to talk with the master
+# So for example if node-timeout is 30 seconds, and the replica-validity-factor
+# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the
+# replica will not try to failover if it was not able to talk with the master
# for longer than 310 seconds.
#
-# A large slave-validity-factor may allow slaves with too old data to failover
+# A large replica-validity-factor may allow replicas with too old data to failover
# a master, while a too small value may prevent the cluster from being able to
-# elect a slave at all.
+# elect a replica at all.
#
-# For maximum availability, it is possible to set the slave-validity-factor
-# to a value of 0, which means, that slaves will always try to failover the
+# For maximum availability, it is possible to set the replica-validity-factor
+# to a value of 0, which means, that replicas will always try to failover the
# master regardless of the last time they interacted with the master.
# (However they'll always try to apply a delay proportional to their
# offset rank).
@@ -870,22 +1253,22 @@ lua-time-limit 5000
# Zero is the only value able to guarantee that when all the partitions heal
# the cluster will always be able to continue.
#
-# cluster-slave-validity-factor 10
+# cluster-replica-validity-factor 10
-# Cluster slaves are able to migrate to orphaned masters, that are masters
-# that are left without working slaves. This improves the cluster ability
+# Cluster replicas are able to migrate to orphaned masters, that are masters
+# that are left without working replicas. This improves the cluster ability
# to resist to failures as otherwise an orphaned master can't be failed over
-# in case of failure if it has no working slaves.
+# in case of failure if it has no working replicas.
#
-# Slaves migrate to orphaned masters only if there are still at least a
-# given number of other working slaves for their old master. This number
-# is the "migration barrier". A migration barrier of 1 means that a slave
-# will migrate only if there is at least 1 other working slave for its master
-# and so forth. It usually reflects the number of slaves you want for every
+# Replicas migrate to orphaned masters only if there are still at least a
+# given number of other working replicas for their old master. This number
+# is the "migration barrier". A migration barrier of 1 means that a replica
+# will migrate only if there is at least 1 other working replica for its master
+# and so forth. It usually reflects the number of replicas you want for every
# master in your cluster.
#
-# Default is 1 (slaves migrate only if their masters remain with at least
-# one slave). To disable migration just set it to a very large value.
+# Default is 1 (replicas migrate only if their masters remain with at least
+# one replica). To disable migration just set it to a very large value.
# A value of 0 can be set but is useful only for debugging and dangerous
# in production.
#
@@ -904,7 +1287,7 @@ lua-time-limit 5000
#
# cluster-require-full-coverage yes
-# This option, when set to yes, prevents slaves from trying to failover its
+# This option, when set to yes, prevents replicas from trying to failover its
# master during master failures. However the master can still perform a
# manual failover, if forced to do so.
#
@@ -912,7 +1295,23 @@ lua-time-limit 5000
# data center operations, where we want one side to never be promoted if not
# in the case of a total DC failure.
#
-# cluster-slave-no-failover no
+# cluster-replica-no-failover no
+
+# This option, when set to yes, allows nodes to serve read traffic while the
+# the cluster is in a down state, as long as it believes it owns the slots.
+#
+# This is useful for two cases. The first case is for when an application
+# doesn't require consistency of data during node failures or network partitions.
+# One example of this is a cache, where as long as the node has the data it
+# should be able to serve it.
+#
+# The second use case is for configurations that don't meet the recommended
+# three shards but want to enable cluster mode and scale later. A
+# master outage in a 1 or 2 shard configuration causes a read/write outage to the
+# entire cluster without this option set, with it set there is only a write outage.
+# Without a quorum of masters, slot ownership will not change automatically.
+#
+# cluster-allow-reads-when-down no
# In order to setup your cluster make sure to read the documentation
# available at http://redis.io web site.
@@ -1020,7 +1419,11 @@ latency-monitor-threshold 0
# z Sorted set commands
# x Expired events (events generated every time a key expires)
# e Evicted events (events generated when a key is evicted for maxmemory)
-# A Alias for g$lshzxe, so that the "AKE" string means all the events.
+# t Stream commands
+# m Key-miss events (Note: It is not included in the 'A' class)
+# A Alias for g$lshzxet, so that the "AKE" string means all the events
+# (Except key-miss events which are excluded from 'A' due to their
+# unique nature).
#
# The "notify-keyspace-events" takes as argument a string that is composed
# of zero or multiple characters. The empty string means that notifications
@@ -1041,6 +1444,61 @@ latency-monitor-threshold 0
# specify at least one of K or E, no events will be delivered.
notify-keyspace-events ""
+############################### GOPHER SERVER #################################
+
+# Redis contains an implementation of the Gopher protocol, as specified in
+# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt).
+#
+# The Gopher protocol was very popular in the late '90s. It is an alternative
+# to the web, and the implementation both server and client side is so simple
+# that the Redis server has just 100 lines of code in order to implement this
+# support.
+#
+# What do you do with Gopher nowadays? Well Gopher never *really* died, and
+# lately there is a movement in order for the Gopher more hierarchical content
+# composed of just plain text documents to be resurrected. Some want a simpler
+# internet, others believe that the mainstream internet became too much
+# controlled, and it's cool to create an alternative space for people that
+# want a bit of fresh air.
+#
+# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol
+# as a gift.
+#
+# --- HOW IT WORKS? ---
+#
+# The Redis Gopher support uses the inline protocol of Redis, and specifically
+# two kind of inline requests that were anyway illegal: an empty request
+# or any request that starts with "/" (there are no Redis commands starting
+# with such a slash). Normal RESP2/RESP3 requests are completely out of the
+# path of the Gopher protocol implementation and are served as usually as well.
+#
+# If you open a connection to Redis when Gopher is enabled and send it
+# a string like "/foo", if there is a key named "/foo" it is served via the
+# Gopher protocol.
+#
+# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher
+# talking), you likely need a script like the following:
+#
+# https://github.com/antirez/gopher2redis
+#
+# --- SECURITY WARNING ---
+#
+# If you plan to put Redis on the internet in a publicly accessible address
+# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance.
+# Once a password is set:
+#
+# 1. The Gopher server (when enabled, not by default) will still serve
+# content via Gopher.
+# 2. However other commands cannot be called before the client will
+# authenticate.
+#
+# So use the 'requirepass' option to protect your instance.
+#
+# To enable Gopher support uncomment the following line and set
+# the option from no (the default) to yes.
+#
+# gopher-enabled no
+
############################### ADVANCED CONFIG ###############################
# Hashes are encoded using a memory efficient data structure when they have a
@@ -1107,6 +1565,17 @@ zset-max-ziplist-value 64
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
hll-sparse-max-bytes 3000
+# Streams macro node max size / items. The stream data structure is a radix
+# tree of big nodes that encode multiple items inside. Using this configuration
+# it is possible to configure how big a single node can be in bytes, and the
+# maximum number of items it may contain before switching to a new node when
+# appending new stream entries. If any of the following settings are set to
+# zero, the limit is ignored, so for instance it is possible to set just a
+# max entires limit by setting max-bytes to 0 and max-entries to the desired
+# value.
+stream-node-max-bytes 4096
+stream-node-max-entries 100
+
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
@@ -1135,7 +1604,7 @@ activerehashing yes
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients including MONITOR clients
-# slave -> slave clients
+# replica -> replica clients
# pubsub -> clients subscribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
@@ -1156,12 +1625,12 @@ activerehashing yes
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
-# Instead there is a default limit for pubsub and slave clients, since
-# subscribers and slaves receive data in a push fashion.
+# Instead there is a default limit for pubsub and replica clients, since
+# subscribers and replicas receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
-client-output-buffer-limit slave 256mb 64mb 60
+client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Client query buffers accumulate new commands. They are limited to a fixed
@@ -1195,12 +1664,34 @@ client-output-buffer-limit pubsub 32mb 8mb 60
# 100 only in environments where very low latency is required.
hz 10
+# Normally it is useful to have an HZ value which is proportional to the
+# number of clients connected. This is useful in order, for instance, to
+# avoid too many clients are processed for each background task invocation
+# in order to avoid latency spikes.
+#
+# Since the default HZ value by default is conservatively set to 10, Redis
+# offers, and enables by default, the ability to use an adaptive HZ value
+# which will temporary raise when there are many connected clients.
+#
+# When dynamic HZ is enabled, the actual configured HZ will be used
+# as a baseline, but multiples of the configured HZ value will be actually
+# used as needed once more clients are connected. In this way an idle
+# instance will use very little CPU time while a busy instance will be
+# more responsive.
+dynamic-hz yes
+
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes
+# When redis saves RDB file, if the following option is enabled
+# the file will be fsync-ed every 32 MB of data generated. This is useful
+# in order to commit the file to the disk more incrementally and avoid
+# big latency spikes.
+rdb-save-incremental-fsync yes
+
# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
# idea to start with the default settings and only change them after investigating
# how to improve the performances and how the keys LFU change over time, which
@@ -1255,10 +1746,6 @@ aof-rewrite-incremental-fsync yes
########################### ACTIVE DEFRAGMENTATION #######################
#
-# WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested
-# even in production and manually tested by multiple engineers for some
-# time.
-#
# What is active defragmentation?
# -------------------------------
#
@@ -1298,7 +1785,7 @@ aof-rewrite-incremental-fsync yes
# a good idea to leave the defaults untouched.
# Enabled active defragmentation
-# activedefrag yes
+# activedefrag no
# Minimum amount of fragmentation waste to start active defrag
# active-defrag-ignore-bytes 100mb
@@ -1309,8 +1796,42 @@ aof-rewrite-incremental-fsync yes
# Maximum percentage of fragmentation at which we use maximum effort
# active-defrag-threshold-upper 100
-# Minimal effort for defrag in CPU percentage
-# active-defrag-cycle-min 25
+# Minimal effort for defrag in CPU percentage, to be used when the lower
+# threshold is reached
+# active-defrag-cycle-min 1
-# Maximal effort for defrag in CPU percentage
-# active-defrag-cycle-max 75
+# Maximal effort for defrag in CPU percentage, to be used when the upper
+# threshold is reached
+# active-defrag-cycle-max 25
+
+# Maximum number of set/hash/zset/list fields that will be processed from
+# the main dictionary scan
+# active-defrag-max-scan-fields 1000
+
+# Jemalloc background thread for purging will be enabled by default
+jemalloc-bg-thread yes
+
+# It is possible to pin different threads and processes of Redis to specific
+# CPUs in your system, in order to maximize the performances of the server.
+# This is useful both in order to pin different Redis threads in different
+# CPUs, but also in order to make sure that multiple Redis instances running
+# in the same host will be pinned to different CPUs.
+#
+# Normally you can do this using the "taskset" command, however it is also
+# possible to this via Redis configuration directly, both in Linux and FreeBSD.
+#
+# You can pin the server/IO threads, bio threads, aof rewrite child process, and
+# the bgsave child process. The syntax to specify the cpu list is the same as
+# the taskset command:
+#
+# Set redis server/io threads to cpu affinity 0,2,4,6:
+# server_cpulist 0-7:2
+#
+# Set bio threads to cpu affinity 1,3:
+# bio_cpulist 1,3
+#
+# Set aof rewrite child process to cpu affinity 8,9,10,11:
+# aof_rewrite_cpulist 8-11
+#
+# Set bgsave child process to cpu affinity 1,10,11
+# bgsave_cpulist 1,10-11
\ No newline at end of file
diff --git a/salt/redis/init.sls b/salt/redis/init.sls
index 4864fc8a2..3f24ba079 100644
--- a/salt/redis/init.sls
+++ b/salt/redis/init.sls
@@ -53,10 +53,14 @@ so-redis:
- user: socore
- port_bindings:
- 0.0.0.0:6379:6379
+ - 0.0.0.0:9696:9696
- binds:
- /opt/so/log/redis:/var/log/redis:rw
- /opt/so/conf/redis/etc/redis.conf:/usr/local/etc/redis/redis.conf:ro
- /opt/so/conf/redis/working:/redis:rw
+ - /etc/pki/redis.crt:/certs/redis.crt:ro
+ - /etc/pki/redis.key:/certs/redis.key:ro
+ - /etc/pki/ca.crt:/certs/ca.crt:ro
- entrypoint: "redis-server /usr/local/etc/redis/redis.conf"
- watch:
- file: /opt/so/conf/redis/etc
diff --git a/salt/salt/master.defaults.yaml b/salt/salt/master.defaults.yaml
index c366ae6ce..8694ffbc7 100644
--- a/salt/salt/master.defaults.yaml
+++ b/salt/salt/master.defaults.yaml
@@ -2,4 +2,4 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt:
master:
- version: 3001
\ No newline at end of file
+ version: 3001.1
\ No newline at end of file
diff --git a/salt/salt/minion.defaults.yaml b/salt/salt/minion.defaults.yaml
index cd061237b..31c313df6 100644
--- a/salt/salt/minion.defaults.yaml
+++ b/salt/salt/minion.defaults.yaml
@@ -2,4 +2,4 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt:
minion:
- version: 3001
\ No newline at end of file
+ version: 3001.1
\ No newline at end of file
diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls
index fdb40a0bf..af9495e59 100644
--- a/salt/ssl/init.sls
+++ b/salt/ssl/init.sls
@@ -216,6 +216,41 @@ miniokeyperms:
- mode: 640
- group: 939
+/etc/pki/redis.key:
+ x509.private_key_managed:
+ - CN: {{ manager }}
+ - bits: 4096
+ - days_remaining: 0
+ - days_valid: 820
+ - backup: True
+ - new: True
+ {% if salt['file.file_exists']('/etc/pki/redis.key') -%}
+ - prereq:
+ - x509: /etc/pki/redis.crt
+ {%- endif %}
+
+# Create a cert for the docker registry
+/etc/pki/redis.crt:
+ x509.certificate_managed:
+ - ca_server: {{ ca_server }}
+ - signing_policy: registry
+ - public_key: /etc/pki/redis.key
+ - CN: {{ manager }}
+ - days_remaining: 0
+ - days_valid: 820
+ - backup: True
+ - unless:
+ # https://github.com/saltstack/salt/issues/52167
+ # Will trigger 5 days (432000 sec) from cert expiration
+ - 'enddate=$(date -d "$(openssl x509 -in /etc/pki/redis.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
+
+rediskeyperms:
+ file.managed:
+ - replace: False
+ - name: /etc/pki/redis.key
+ - mode: 640
+ - group: 939
+
/etc/pki/managerssl.key:
x509.private_key_managed:
- CN: {{ manager }}
diff --git a/setup/so-functions b/setup/so-functions
index d9f8826ee..5ae4b7716 100755
--- a/setup/so-functions
+++ b/setup/so-functions
@@ -1003,7 +1003,7 @@ manager_global() {
" wazuh: $WAZUH"\
" managerupdate: $MANAGERUPDATES"\
" imagerepo: $IMAGEREPO"\
- " pipeline: minio"\
+ " pipeline: redis"\
"pcap:"\
" sensor_checkin_interval_ms: $SENSOR_CHECKIN_INTERVAL_MS"\
"strelka:"\
@@ -1075,8 +1075,8 @@ manager_global() {
" close: 365"\
" delete: 45"\
"minio:"\
- " access_key: $ACCESS_KEY"\
- " access_secret: $ACCESS_SECRET"\
+ " access_key: '$ACCESS_KEY'"\
+ " access_secret: '$ACCESS_SECRET'"\
"s3_settings:"\
" size_file: 2048"\
" time_file: 1"\
@@ -1223,7 +1223,7 @@ saltify() {
if [ $OS = 'centos' ]; then
set_progress_str 5 'Installing Salt repo'
{
- sudo rpm --import https://repo.saltstack.com/py3/redhat/7/x86_64/3001/SALTSTACK-GPG-KEY.pub;
+ sudo rpm --import https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001.1/SALTSTACK-GPG-KEY.pub;
cp ./yum_repos/saltstack.repo /etc/yum.repos.d/saltstack.repo;
} >> "$setup_log" 2>&1
set_progress_str 6 'Installing various dependencies'
@@ -1235,12 +1235,12 @@ saltify() {
yum -y install sqlite argon2 curl mariadb-devel >> "$setup_log" 2>&1
# Download Ubuntu Keys in case manager updates = 1
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
- wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3001/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
+ wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3001.1/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1
cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo >> "$setup_log" 2>&1
set_progress_str 7 'Installing salt-master'
- yum -y install salt-master-3001 >> "$setup_log" 2>&1
+ yum -y install salt-master-3001.1 >> "$setup_log" 2>&1
systemctl enable salt-master >> "$setup_log" 2>&1
;;
*)
@@ -1260,7 +1260,7 @@ saltify() {
set_progress_str 8 'Installing salt-minion & python modules'
{
yum -y install epel-release
- yum -y install salt-minion-3001\
+ yum -y install salt-minion-3001.1\
python3\
python36-docker\
python36-dateutil\
@@ -1271,7 +1271,7 @@ saltify() {
lvm2\
openssl\
jq;
- yum -y update exclude=salt*;
+ yum -y update --exclude=salt*;
systemctl enable salt-minion;
} >> "$setup_log" 2>&1
yum versionlock salt*
@@ -1303,8 +1303,8 @@ saltify() {
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORTPCAP') # TODO: should this also be HELIXSENSOR?
# Add saltstack repo(s)
- wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3001/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
- echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3001 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
+ wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3001.1/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
+ echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3001.1 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
# Add Docker repo
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - >> "$setup_log" 2>&1
@@ -1312,7 +1312,7 @@ saltify() {
# Get gpg keys
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
- wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/archive/3001/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
+ wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/archive/3001.1/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1
@@ -1325,7 +1325,7 @@ saltify() {
set_progress_str 6 'Installing various dependencies'
apt-get -y install sqlite3 argon2 libssl-dev >> "$setup_log" 2>&1
set_progress_str 7 'Installing salt-master'
- apt-get -y install salt-master=3001+ds-1 >> "$setup_log" 2>&1
+ apt-get -y install salt-master=3001.1+ds-1 >> "$setup_log" 2>&1
apt-mark hold salt-master >> "$setup_log" 2>&1
;;
*)
@@ -1336,14 +1336,14 @@ saltify() {
echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1
apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1
- echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3001/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
+ echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3001.1/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log"
;;
esac
apt-get update >> "$setup_log" 2>&1
set_progress_str 8 'Installing salt-minion & python modules'
- apt-get -y install salt-minion=3001+ds-1\
- salt-common=3001+ds-1 >> "$setup_log" 2>&1
+ apt-get -y install salt-minion=3001.1+ds-1\
+ salt-common=3001.1+ds-1 >> "$setup_log" 2>&1
apt-mark hold salt-minion salt-common >> "$setup_log" 2>&1
if [ "$OSVER" != 'xenial' ]; then
apt-get -y install python3-dateutil python3-m2crypto python3-mysqldb >> "$setup_log" 2>&1
diff --git a/setup/yum_repos/saltstack.repo b/setup/yum_repos/saltstack.repo
index f04f02be0..2e1b425fb 100644
--- a/setup/yum_repos/saltstack.repo
+++ b/setup/yum_repos/saltstack.repo
@@ -1,6 +1,6 @@
-[saltstack-repo]
+[saltstack]
name=SaltStack repo for RHEL/CentOS $releasever PY3
-baseurl=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001/
+baseurl=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001.1/
enabled=1
gpgcheck=1
-gpgkey=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001/SALTSTACK-GPG-KEY.pub
+gpgkey=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3001.1/SALTSTACK-GPG-KEY.pub
\ No newline at end of file