Merge pull request #3001 from Security-Onion-Solutions/dev

Dev
This commit is contained in:
Josh Brower
2021-02-17 16:36:49 -05:00
committed by GitHub
24 changed files with 267 additions and 161 deletions

View File

@@ -42,6 +42,9 @@ pki_private_key:
- replace: False
- require:
- file: /etc/pki
- timeout: 30
- retry: 5
- interval: 30
x509_pem_entries:
module.run:

View File

@@ -6,5 +6,17 @@
nocompress
create
sharedscripts
endscript
}
/nsm/strelka/log/strelka.log
{
daily
rotate 14
missingok
copytruncate
compress
create
extension .log
dateext
dateyesterday
}

View File

@@ -231,6 +231,15 @@ commonlogrotateconf:
- dayweek: '*'
{% if role in ['eval', 'manager', 'managersearch', 'standalone'] %}
# Lock permissions on the backup directory
backupdir:
file.directory:
- name: /nsm/backup
- user: 0
- group: 0
- makedirs: True
- mode: 700
# Add config backup
/usr/sbin/so-config-backup > /dev/null 2>&1:
cron.present:
@@ -286,4 +295,4 @@ dockerreserveports:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -33,12 +33,15 @@ if [ ! -f $BACKUPFILE ]; then
{%- for LOCATION in BACKUPLOCATIONS %}
tar -rf $BACKUPFILE {{ LOCATION }}
{%- endfor %}
tar -rf $BACKUPFILE /etc/pki
tar -rf $BACKUPFILE /etc/salt
fi
# Find oldest backup file and remove it
# Find oldest backup files and remove them
NUMBACKUPS=$(find /nsm/backup/ -type f -name "so-config-backup*" | wc -l)
OLDESTBACKUP=$(find /nsm/backup/ -type f -name "so-config-backup*" | ls -1t | tail -1)
if [ "$NUMBACKUPS" -gt "$MAXBACKUPS" ]; then
rm -f /nsm/backup/$OLDESTBACKUP
fi
while [ "$NUMBACKUPS" -gt "$MAXBACKUPS" ]; do
OLDESTBACKUP=$(find /nsm/backup/ -type f -name "so-config-backup*" -type f -printf '%T+ %p\n' | sort | head -n 1 | awk -F" " '{print $2}')
rm -f $OLDESTBACKUP
NUMBACKUPS=$(find /nsm/backup/ -type f -name "so-config-backup*" | wc -l)
done

View File

@@ -15,10 +15,13 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
import sys
import time
import yaml
lockFile = "/tmp/so-firewall.lock"
hostgroupsFilename = "/opt/so/saltstack/local/salt/firewall/hostgroups.local.yaml"
portgroupsFilename = "/opt/so/saltstack/local/salt/firewall/portgroups.local.yaml"
defaultPortgroupsFilename = "/opt/so/saltstack/default/salt/firewall/portgroups.yaml"
@@ -329,7 +332,7 @@ def apply():
proc = subprocess.run(['salt-call', 'state.apply', 'firewall', 'queue=True'])
return proc.returncode
def main():
def main():
options = []
args = sys.argv[1:]
for option in args:
@@ -356,8 +359,30 @@ def main():
"addportgroup": addportgroup
}
cmd = commands.get(args[0], showUsage)
code = cmd(options, args[1:])
code=1
try:
lockAttempts = 0
maxAttempts = 30
while lockAttempts < maxAttempts:
lockAttempts = lockAttempts + 1
try:
f = open(lockFile, "x")
f.close()
break
except:
time.sleep(2)
if lockAttempts == maxAttempts:
print("Lock file (" + lockFile + ") could not be created; proceeding without lock.")
cmd = commands.get(args[0], showUsage)
code = cmd(options, args[1:])
finally:
try:
os.remove(lockFile)
except:
print("Lock file (" + lockFile + ") already removed")
sys.exit(code)

View File

@@ -26,9 +26,9 @@ if [[ $# -lt 1 || $# -gt 2 ]]; then
echo " update: Updates a user's password; requires 'email' parameter"
echo " enable: Enables a user; requires 'email' parameter"
echo " disable: Disables a user; requires 'email' parameter"
echo " validate: Validates that the given email address and password are acceptable for defining a new user; requires 'email' parameter"
echo " valemail: Validates that the given email address is acceptable for defining a new user; requires 'email' parameter"
echo " valpass: Validates that a password is acceptable for defining a new user"
echo " validate: Validates that the given email address and password are acceptable; requires 'email' parameter"
echo " valemail: Validates that the given email address is acceptable; requires 'email' parameter"
echo " valpass: Validates that a password is acceptable"
echo ""
echo " Note that the password can be piped into STDIN to avoid prompting for it"
exit 1

View File

@@ -0,0 +1,23 @@
#actions:
# 1:
# action: allocation
# description: "Apply shard allocation filtering rules to the specified indices"
# options:
# key: box_type
# value: warm
# allocation_type: require
# wait_for_completion: true
# timeout_override:
# continue_if_exception: false
# disable_action: false
# filters:
# - filtertype: pattern
# kind: prefix
# value: so-
# - filtertype: age
# source: name
# direction: older
# timestring: '%Y.%m.%d'
# unit: days
# unit_count: 3

View File

@@ -0,0 +1,48 @@
elastalert:
config:
rules_folder: /opt/elastalert/rules/
scan_subdirectories: true
disable_rules_on_error: false
run_every:
minutes: 3
buffer_time:
minutes: 10
old_query_limit:
minutes: 5
es_host: {{salt['pillar.get']('manager:mainip', '')}}
es_port: {{salt['pillar.get']('manager:es_port', '')}}
es_conn_timeout: 55
max_query_size: 5000
#aws_region: us-east-1
#profile: test
#es_url_prefix: elasticsearch
#use_ssl: True
#verify_certs: True
#es_send_get_body_as: GET
#es_username: someusername
#es_password: somepassword
writeback_index: elastalert_status
alert_time_limit:
days: 2
index_settings:
shards: 1
replicas: 0
logging:
version: 1
incremental: false
disable_existing_loggers: false
formatters:
logline:
format: '%(asctime)s %(levelname)+8s %(name)+20s %(message)s'
handlers:
file:
class: logging.FileHandler
formatter: logline
level: INFO
filename: /var/log/elastalert/elastalert.log
loggers:
'':
level: INFO
handlers:
- file
propagate: false

View File

@@ -0,0 +1,4 @@
{% import_yaml 'elastalert/defaults.yaml' as elastalert_defaults with context %}
{% set elastalert_pillar = salt['pillar.get']('elastalert:config', {}) %}
{% do salt['defaults.merge'](elastalert_defaults.elastalert.config, elastalert_pillar, in_place=True) %}

View File

@@ -1,110 +0,0 @@
{% set esip = salt['pillar.get']('manager:mainip', '') %}
{% set esport = salt['pillar.get']('manager:es_port', '') %}
# This is the folder that contains the rule yaml files
# Any .yaml file will be loaded as a rule
rules_folder: /opt/elastalert/rules/
# Sets whether or not ElastAlert should recursively descend
# the rules directory - true or false
scan_subdirectories: true
# Do not disable a rule when an uncaught exception is thrown -
# This setting should be tweaked once the following issue has been fixed
# https://github.com/Security-Onion-Solutions/securityonion-saltstack/issues/98
disable_rules_on_error: false
# How often ElastAlert will query Elasticsearch
# The unit can be anything from weeks to seconds
run_every:
minutes: 3
# ElastAlert will buffer results from the most recent
# period of time, in case some log sources are not in real time
buffer_time:
minutes: 10
# The maximum time between queries for ElastAlert to start at the most recently
# run query. When ElastAlert starts, for each rule, it will search elastalert_metadata
# for the most recently run query and start from that time, unless it is older than
# old_query_limit, in which case it will start from the present time. The default is one week.
old_query_limit:
minutes: 5
# The Elasticsearch hostname for metadata writeback
# Note that every rule can have its own Elasticsearch host
es_host: {{ esip }}
# The Elasticsearch port
es_port: {{ esport }}
# Sets timeout for connecting to and reading from es_host
es_conn_timeout: 55
# The maximum number of documents that will be downloaded from Elasticsearch in
# a single query. The default is 10,000, and if you expect to get near this number,
# consider using use_count_query for the rule. If this limit is reached, ElastAlert
# will scroll through pages the size of max_query_size until processing all results.
max_query_size: 5000
# The AWS region to use. Set this when using AWS-managed elasticsearch
#aws_region: us-east-1
# The AWS profile to use. Use this if you are using an aws-cli profile.
# See http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html
# for details
#profile: test
# Optional URL prefix for Elasticsearch
#es_url_prefix: elasticsearch
# Connect with TLS to Elasticsearch
#use_ssl: True
# Verify TLS certificates
#verify_certs: True
# GET request with body is the default option for Elasticsearch.
# If it fails for some reason, you can pass 'GET', 'POST' or 'source'.
# See http://elasticsearch-py.readthedocs.io/en/master/connection.html?highlight=send_get_body_as#transport
# for details
#es_send_get_body_as: GET
# Option basic-auth username and password for Elasticsearch
#es_username: someusername
#es_password: somepassword
# The index on es_host which is used for metadata storage
# This can be a unmapped index, but it is recommended that you run
# elastalert-create-index to set a mapping
writeback_index: elastalert_status
# If an alert fails for some reason, ElastAlert will retry
# sending the alert until this time period has elapsed
alert_time_limit:
days: 2
index_settings:
shards: 1
replicas: 0
logging:
version: 1
incremental: false
disable_existing_loggers: false
formatters:
logline:
format: '%(asctime)s %(levelname)+8s %(name)+20s %(message)s'
handlers:
file:
class : logging.FileHandler
formatter: logline
level: INFO
filename: /var/log/elastalert/elastalert.log
loggers:
'':
level: INFO
handlers:
- file
propagate: false

View File

@@ -0,0 +1 @@
{{ elastalert_config | yaml(False) }}

View File

@@ -15,6 +15,8 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'elastalert/elastalert_config.map.jinja' import elastalert_defaults as elastalert_config with context %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
@@ -92,7 +94,9 @@ elastasomodulesync:
elastaconf:
file.managed:
- name: /opt/so/conf/elastalert/elastalert_config.yaml
- source: salt://elastalert/files/elastalert_config.yaml
- source: salt://elastalert/files/elastalert_config.yaml.jinja
- context:
elastalert_config: {{ elastalert_config.elastalert.config }}
- user: 933
- group: 933
- template: jinja
@@ -119,6 +123,8 @@ so-elastalert:
- {{MANAGER_URL}}:{{MANAGER_IP}}
- require:
- module: wait_for_elasticsearch
- watch:
- file: elastaconf
append_so-elastalert_so-status.conf:
file.append:

View File

@@ -34,7 +34,6 @@ iptables_allow_established:
- jump: ACCEPT
- match: conntrack
- ctstate: 'RELATED,ESTABLISHED'
- save: True
# I like pings
iptables_allow_pings:
@@ -43,7 +42,6 @@ iptables_allow_pings:
- chain: INPUT
- jump: ACCEPT
- proto: icmp
- save: True
# Create the chain for logging
iptables_LOGGING_chain:
@@ -68,7 +66,6 @@ iptables_log_input_drops:
- table: filter
- chain: INPUT
- jump: LOGGING
- save: True
# Enable global DOCKER-USER block rule
enable_docker_user_fw_policy:
@@ -79,7 +76,6 @@ enable_docker_user_fw_policy:
- in-interface: '!docker0'
- out-interface: docker0
- position: 1
- save: True
enable_docker_user_established:
iptables.insert:
@@ -89,7 +85,6 @@ enable_docker_user_established:
- in-interface: '!docker0'
- out-interface: docker0
- position: 1
- save: True
- match: conntrack
- ctstate: 'RELATED,ESTABLISHED'
@@ -115,7 +110,6 @@ enable_docker_user_established:
{% if action == 'insert' %}
- position: 1
{% endif %}
- save: True
{% endfor %}
{% endfor %}
@@ -126,6 +120,15 @@ enable_docker_user_established:
{% endfor %}
{% endfor %}
# Block icmp timestamp response
block_icmp_timestamp_reply:
iptables.append:
- table: filter
- chain: OUTPUT
- jump: DROP
- proto: icmp
- icmp-type: 'timestamp-reply'
# Make the input policy send stuff that doesn't match to be logged and dropped
iptables_drop_all_the_things:
iptables.append:

View File

@@ -5,6 +5,6 @@
"discover:sampleSize":"100",
"dashboard:defaultDarkTheme":true,
"theme:darkMode":true,
"timepicker:timeDefaults":"{\n \"from\": \"now-24h\",\n \"to\": \"now\",\n \"mode\": \"quick\"\n}"
"timepicker:timeDefaults":"{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"
}
}

View File

@@ -13,3 +13,4 @@ elasticsearch.hosts: [ "http://{{ ES }}:9200" ]
elasticsearch.requestTimeout: 90000
logging.dest: /var/log/kibana/kibana.log
telemetry.enabled: false
security.showInsecureClusterWarning: false

File diff suppressed because one or more lines are too long

View File

@@ -11,3 +11,9 @@ salt_bootstrap:
- name: /usr/sbin/bootstrap-salt.sh
- source: salt://salt/scripts/bootstrap-salt.sh
- mode: 755
{% if grains.os == 'CentOS' %}
remove_salt-2019-2-5.repo:
file.absent:
- name: /etc/yum.repos.d/salt-2019-2-5.repo
{% endif %}

View File

@@ -4,10 +4,10 @@
{ "name": "Elastalerts", "description": "", "query": "_type:elastalert | groupby rule.name"},
{ "name": "Alerts", "description": "Show all alerts grouped by alert source", "query": "event.dataset: alert | groupby event.module"},
{ "name": "NIDS Alerts", "description": "Show all NIDS alerts grouped by alert", "query": "event.category: network AND event.dataset: alert | groupby rule.category rule.gid rule.uuid rule.name"},
{ "name": "Wazuh/OSSEC Alerts", "description": "Show all Wazuh alerts grouped by category", "query": "event.module:ossec AND event.dataset:alert | groupby rule.category rule.name"},
{ "name": "Wazuh/OSSEC Commands", "description": "Show all Wazuh alerts grouped by command line", "query": "event.module:ossec AND event.dataset:alert | groupby process.command_line"},
{ "name": "Wazuh/OSSEC Alerts", "description": "Show all Wazuh alerts at Level 5 or higher grouped by category", "query": "event.module:ossec AND event.dataset:alert AND rule.level:>4 | groupby rule.category rule.name"},
{ "name": "Wazuh/OSSEC Alerts", "description": "Show all Wazuh alerts at Level 4 or lower grouped by category", "query": "event.module:ossec AND event.dataset:alert AND rule.level:<5 | groupby rule.category rule.name"},
{ "name": "Wazuh/OSSEC Users and Commands", "description": "Show all Wazuh alerts grouped by username and command line", "query": "event.module:ossec AND event.dataset:alert | groupby user.escalated.keyword process.command_line"},
{ "name": "Wazuh/OSSEC Processes", "description": "Show all Wazuh alerts grouped by process name", "query": "event.module:ossec AND event.dataset:alert | groupby process.name.keyword"},
{ "name": "Wazuh/OSSEC Users", "description": "Show all Wazuh alerts grouped by username", "query": "event.module:ossec AND event.dataset:alert | groupby user.escalated.keyword"},
{ "name": "Sysmon Events", "description": "Show all Sysmon logs grouped by event type", "query": "event.module:sysmon | groupby event.dataset"},
{ "name": "Sysmon Usernames", "description": "Show all Sysmon logs grouped by username", "query": "event.module:sysmon | groupby event.dataset, user.name.keyword"},
{ "name": "Strelka", "description": "Show all Strelka logs grouped by file type", "query": "event.module:strelka | groupby file.mime_type"},

View File

@@ -67,6 +67,9 @@ removeesp12dir:
- prereq:
- x509: /etc/pki/influxdb.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
# Create a cert for the talking to influxdb
/etc/pki/influxdb.crt:
@@ -82,6 +85,9 @@ removeesp12dir:
# https://github.com/saltstack/salt/issues/52167
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/influxdb.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
influxkeyperms:
file.managed:
@@ -104,6 +110,9 @@ influxkeyperms:
- prereq:
- x509: /etc/pki/redis.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
/etc/pki/redis.crt:
x509.certificate_managed:
@@ -118,6 +127,9 @@ influxkeyperms:
# https://github.com/saltstack/salt/issues/52167
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/redis.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
rediskeyperms:
file.managed:
@@ -140,6 +152,9 @@ rediskeyperms:
- prereq:
- x509: /etc/pki/filebeat.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
# Request a cert and drop it where it needs to go to be distributed
/etc/pki/filebeat.crt:
@@ -159,6 +174,9 @@ rediskeyperms:
# https://github.com/saltstack/salt/issues/52167
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/filebeat.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/filebeat.key -topk8 -out /etc/pki/filebeat.p8 -nocrypt"
- onchanges:
@@ -213,6 +231,9 @@ fbcrtlink:
- prereq:
- x509: /etc/pki/registry.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
# Create a cert for the docker registry
/etc/pki/registry.crt:
@@ -228,6 +249,9 @@ fbcrtlink:
# https://github.com/saltstack/salt/issues/52167
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/registry.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
regkeyperms:
file.managed:
@@ -248,6 +272,9 @@ regkeyperms:
- prereq:
- x509: /etc/pki/minio.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
# Create a cert for minio
/etc/pki/minio.crt:
@@ -263,6 +290,9 @@ regkeyperms:
# https://github.com/saltstack/salt/issues/52167
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/minio.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
miniokeyperms:
file.managed:
@@ -284,6 +314,9 @@ miniokeyperms:
- prereq:
- x509: /etc/pki/elasticsearch.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
/etc/pki/elasticsearch.crt:
x509.certificate_managed:
@@ -298,6 +331,9 @@ miniokeyperms:
# https://github.com/saltstack/salt/issues/52167
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nodes -passout pass:"
- onchanges:
@@ -329,6 +365,9 @@ elasticp12perms:
- prereq:
- x509: /etc/pki/managerssl.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
# Create a cert for the reverse proxy
/etc/pki/managerssl.crt:
@@ -345,6 +384,9 @@ elasticp12perms:
# https://github.com/saltstack/salt/issues/52167
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/managerssl.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
msslkeyperms:
file.managed:
@@ -366,6 +408,9 @@ msslkeyperms:
- prereq:
- x509: /etc/pki/fleet.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
/etc/pki/fleet.crt:
x509.certificate_managed:
@@ -379,6 +424,9 @@ msslkeyperms:
# https://github.com/saltstack/salt/issues/52167
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/fleet.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
fleetkeyperms:
file.managed:
@@ -407,6 +455,9 @@ fbcertdir:
- prereq:
- x509: /opt/so/conf/filebeat/etc/pki/filebeat.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
# Request a cert and drop it where it needs to go to be distributed
/opt/so/conf/filebeat/etc/pki/filebeat.crt:
@@ -426,6 +477,9 @@ fbcertdir:
# https://github.com/saltstack/salt/issues/52167
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /opt/so/conf/filebeat/etc/pki/filebeat.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
# Convert the key to pkcs#8 so logstash will work correctly.
filebeatpkcs:
@@ -465,6 +519,9 @@ chownfilebeatp8:
- prereq:
- x509: /etc/pki/managerssl.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
# Create a cert for the reverse proxy
/etc/pki/managerssl.crt:
@@ -481,6 +538,9 @@ chownfilebeatp8:
# https://github.com/saltstack/salt/issues/52167
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/managerssl.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
msslkeyperms:
file.managed:
@@ -502,6 +562,9 @@ msslkeyperms:
- prereq:
- x509: /etc/pki/fleet.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
/etc/pki/fleet.crt:
x509.certificate_managed:
@@ -515,6 +578,9 @@ msslkeyperms:
# https://github.com/saltstack/salt/issues/52167
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/fleet.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
fleetkeyperms:
file.managed:
@@ -539,6 +605,9 @@ fleetkeyperms:
- prereq:
- x509: /etc/pki/elasticsearch.crt
{%- endif %}
- timeout: 30
- retry: 5
- interval: 30
/etc/pki/elasticsearch.crt:
x509.certificate_managed:
@@ -553,6 +622,9 @@ fleetkeyperms:
# https://github.com/saltstack/salt/issues/52167
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/elasticsearch.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
- timeout: 30
- retry: 5
- interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nodes -passout pass:"
- onchanges:

View File

@@ -66,7 +66,7 @@ REDIRECTHOST=$(curl http://169.254.169.254/latest/meta-data/public-ipv4)
REDIRECTINFO=OTHER
RULESETUP=ETOPEN
# SHARDCOUNT=
SKIP_REBOOT=0
# SKIP_REBOOT=0
SOREMOTEPASS1=onionuser
SOREMOTEPASS2=onionuser
STRELKA=1

View File

@@ -48,8 +48,8 @@ MANAGERUPDATES=1
# MMASK=
MNIC=eth0
# MSEARCH=
MSRV=manager-aws
MSRVIP=172.16.163.10
MSRV=distributed-manager
MSRVIP=10.99.1.20
# MTU=
#NIDS=Suricata
# NODE_ES_HEAP_SIZE=
@@ -67,7 +67,7 @@ PATCHSCHEDULENAME=auto
#REDIRECTINFO=HOSTNAME
#RULESETUP=ETOPEN
# SHARDCOUNT=
SKIP_REBOOT=0
# SKIP_REBOOT=0
SOREMOTEPASS1=onionuser
SOREMOTEPASS2=onionuser
#STRELKA=1

View File

@@ -48,8 +48,8 @@ MANAGERUPDATES=1
# MMASK=
MNIC=eth0
# MSEARCH=
MSRV=manager-aws
MSRVIP=172.16.163.10
MSRV=distributed-manager
MSRVIP=10.99.1.20
# MTU=
#NIDS=Suricata
# NODE_ES_HEAP_SIZE=
@@ -67,7 +67,7 @@ PATCHSCHEDULENAME=auto
#REDIRECTINFO=HOSTNAME
#RULESETUP=ETOPEN
# SHARDCOUNT=
SKIP_REBOOT=0
# SKIP_REBOOT=0
SOREMOTEPASS1=onionuser
SOREMOTEPASS2=onionuser
#STRELKA=1

View File

@@ -1,8 +1,8 @@
#!/bin/bash
if [[ "$DEVICE_IFACE" != "$MNIC" && "$DEVICE_IFACE" != *"docker"* ]]; then
if [[ "$DEVICE_IFACE" != "$MNIC" && "$DEVICE_IFACE" != *"docker"* && "$DEVICE_IFACE" != *"tun"* && "DEVICE_IFACE" != *"wg"* ]]; then
for i in rx tx sg tso ufo gso gro lro; do
ethtool -K "$DEVICE_IFACE" "$i" off;
done
ip link set dev "$DEVICE_IFACE" arp off multicast off allmulticast off promisc on
fi
fi

View File

@@ -1221,7 +1221,7 @@ filter_unused_nics() {
fi
# Finally, set filtered_nics to any NICs we aren't using (and ignore interfaces that aren't of use)
filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g')
filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|tun|wg|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g')
readarray -t filtered_nics <<< "$filtered_nics"
nic_list=()
@@ -2283,7 +2283,7 @@ sensor_pillar() {
set_default_log_size() {
local percentage
case $INSTALLTYPE in
case $install_type in
STANDALONE | EVAL | HEAVYNODE)
percentage=50
;;