Merge pull request #8746 from Security-Onion-Solutions/funstuff

Fix for Suricata
This commit is contained in:
Mike Reeves
2022-09-15 10:53:22 -04:00
committed by GitHub
17 changed files with 104 additions and 1678 deletions

View File

@@ -47,8 +47,7 @@
'schedule',
'soctopus',
'tcpreplay',
'docker_clean',
'learn'
'docker_clean'
],
'so-heavynode': [
'ssl',
@@ -106,8 +105,7 @@
'zeek',
'schedule',
'tcpreplay',
'docker_clean',
'learn'
'docker_clean'
],
'so-manager': [
'salt.master',
@@ -128,8 +126,7 @@
'utility',
'schedule',
'soctopus',
'docker_clean',
'learn'
'docker_clean'
],
'so-managersearch': [
'salt.master',
@@ -150,8 +147,7 @@
'utility',
'schedule',
'soctopus',
'docker_clean',
'learn'
'docker_clean'
],
'so-searchnode': [
'ssl',
@@ -184,8 +180,7 @@
'schedule',
'soctopus',
'tcpreplay',
'docker_clean',
'learn'
'docker_clean'
],
'so-sensor': [
'ssl',

View File

@@ -1,295 +0,0 @@
#!/usr/bin/env python3
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
from itertools import chain
from typing import List
import signal
import sys
import os
import re
import subprocess
import argparse
import textwrap
import yaml
import multiprocessing
import docker
import pty
minion_pillar_dir = '/opt/so/saltstack/local/pillar/minions'
so_status_conf = '/opt/so/conf/so-status/so-status.conf'
proc: subprocess.CompletedProcess = None
# Temp store of modules, will likely be broken out into salt
def get_learn_modules():
return {
'logscan': { 'cpu_period': get_cpu_period(fraction=0.25), 'enabled': False, 'description': 'Scan log files against pre-trained models to alert on anomalies.' }
}
def get_cpu_period(fraction: float):
multiplier = 10000
num_cores = multiprocessing.cpu_count()
if num_cores <= 2:
fraction = 1.
num_used_cores = int(num_cores * fraction)
cpu_period = num_used_cores * multiplier
return cpu_period
def sigint_handler(*_):
print('Exiting gracefully on Ctrl-C')
if proc is not None: proc.send_signal(signal.SIGINT)
sys.exit(1)
def find_minion_pillar() -> str:
regex = '^.*_(manager|managersearch|standalone|import|eval)\.sls$'
result = []
for root, _, files in os.walk(minion_pillar_dir):
for f_minion_id in files:
if re.search(regex, f_minion_id):
result.append(os.path.join(root, f_minion_id))
if len(result) == 0:
print('Could not find manager-type pillar (eval, standalone, manager, managersearch, import). Are you running this script on the manager?', file=sys.stderr)
sys.exit(3)
elif len(result) > 1:
res_str = ', '.join(f'\"{result}\"')
print('(This should not happen, the system is in an error state if you see this message.)\n', file=sys.stderr)
print('More than one manager-type pillar exists, minion id\'s listed below:', file=sys.stderr)
print(f' {res_str}', file=sys.stderr)
sys.exit(3)
else:
return result[0]
def read_pillar(pillar: str):
try:
with open(pillar, 'r') as pillar_file:
loaded_yaml = yaml.safe_load(pillar_file.read())
if loaded_yaml is None:
print(f'Could not parse {pillar}', file=sys.stderr)
sys.exit(3)
return loaded_yaml
except:
print(f'Could not open {pillar}', file=sys.stderr)
sys.exit(3)
def write_pillar(pillar: str, content: dict):
try:
with open(pillar, 'w') as pillar_file:
yaml.dump(content, pillar_file, default_flow_style=False)
except:
print(f'Could not open {pillar}', file=sys.stderr)
sys.exit(3)
def mod_so_status(action: str, item: str):
with open(so_status_conf, 'a+') as conf:
conf.seek(0)
containers = conf.readlines()
if f'so-{item}\n' in containers:
if action == 'remove': containers.remove(f'so-{item}\n')
if action == 'add': pass
else:
if action == 'remove': pass
if action == 'add': containers.append(f'so-{item}\n')
[containers.remove(c_name) for c_name in containers if c_name == '\n'] # remove extra newlines
conf.seek(0)
conf.truncate(0)
conf.writelines(containers)
def create_pillar_if_not_exist(pillar:str, content: dict):
pillar_dict = content
if pillar_dict.get('learn', {}).get('modules') is None:
pillar_dict['learn'] = {}
pillar_dict['learn']['modules'] = get_learn_modules()
content.update()
write_pillar(pillar, content)
return content
def salt_call(module: str):
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', f'learn.{module}', 'queue=True']
print(f' Applying salt state for {module} module...')
proc = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
return_code = proc.returncode
if return_code != 0:
print(f' [ERROR] Failed to apply salt state for {module} module.')
return return_code
def pull_image(module: str):
container_basename = f'so-{module}'
client = docker.from_env()
image_list = client.images.list(filters={ 'dangling': False })
tag_list = list(chain.from_iterable(list(map(lambda x: x.attrs.get('RepoTags'), image_list))))
basename_match = list(filter(lambda x: f'{container_basename}' in x, tag_list))
local_registry_match = list(filter(lambda x: ':5000' in x, basename_match))
if len(local_registry_match) == 0:
print(f'Pulling and verifying missing image for {module} (may take several minutes) ...')
pull_command = ['so-image-pull', '--quiet', container_basename]
proc = subprocess.run(pull_command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
return_code = proc.returncode
if return_code != 0:
print(f'[ERROR] Failed to pull image so-{module}, skipping state.')
else:
return_code = 0
return return_code
def apply(module_list: List):
return_code = 0
for module in module_list:
salt_ret = salt_call(module)
# Only update return_code if the command returned a non-zero return
if salt_ret != 0:
return_code = salt_ret
return return_code
def check_apply(args: dict):
if args.apply:
print('Configuration updated. Applying changes:')
return apply(args.modules)
else:
message = 'Configuration updated. Would you like to apply your changes now? (y/N) '
answer = input(message)
while answer.lower() not in [ 'y', 'n', '' ]:
answer = input(message)
if answer.lower() in [ 'n', '' ]:
return 0
else:
print('Applying changes:')
return apply(args.modules)
def enable_disable_modules(args, enable: bool):
pillar_modules = args.pillar_dict.get('learn', {}).get('modules')
pillar_mod_names = args.pillar_dict.get('learn', {}).get('modules').keys()
action_str = 'add' if enable else 'remove'
if 'all' in args.modules:
for module, details in pillar_modules.items():
details['enabled'] = enable
mod_so_status(action_str, module)
if enable: pull_image(module)
args.pillar_dict.update()
write_pillar(args.pillar, args.pillar_dict)
else:
write_needed = False
for module in args.modules:
if module in pillar_mod_names:
if pillar_modules[module]['enabled'] == enable:
state_str = 'enabled' if enable else 'disabled'
print(f'{module} module already {state_str}.', file=sys.stderr)
else:
if enable and pull_image(module) != 0:
continue
pillar_modules[module]['enabled'] = enable
mod_so_status(action_str, module)
write_needed = True
if write_needed:
args.pillar_dict.update()
write_pillar(args.pillar, args.pillar_dict)
cmd_ret = check_apply(args)
return cmd_ret
def enable_modules(args):
enable_disable_modules(args, enable=True)
def disable_modules(args):
enable_disable_modules(args, enable=False)
def list_modules(*_):
print('Available ML modules:')
for module, details in get_learn_modules().items():
print(f' - { module } : {details["description"]}')
return 0
def main():
beta_str = 'BETA - SUBJECT TO CHANGE\n'
apply_help='After ACTION the chosen modules, apply any necessary salt states.'
enable_apply_help = apply_help.replace('ACTION', 'enabling')
disable_apply_help = apply_help.replace('ACTION', 'disabling')
signal.signal(signal.SIGINT, sigint_handler)
if os.geteuid() != 0:
print('You must run this script as root', file=sys.stderr)
sys.exit(1)
main_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
subcommand_desc = textwrap.dedent(
"""\
enable Enable one or more ML modules.
disable Disable one or more ML modules.
list List all available ML modules.
"""
)
subparsers = main_parser.add_subparsers(title='commands', description=subcommand_desc, metavar='', dest='command')
module_help_str = 'One or more ML modules, which can be listed using \'so-learn list\'. Use the keyword \'all\' to apply the action to all available modules.'
enable = subparsers.add_parser('enable')
enable.set_defaults(func=enable_modules)
enable.add_argument('modules', metavar='ML_MODULE', nargs='+', help=module_help_str)
enable.add_argument('--apply', action='store_const', const=True, required=False, help=enable_apply_help)
disable = subparsers.add_parser('disable')
disable.set_defaults(func=disable_modules)
disable.add_argument('modules', metavar='ML_MODULE', nargs='+', help=module_help_str)
disable.add_argument('--apply', action='store_const', const=True, required=False, help=disable_apply_help)
list = subparsers.add_parser('list')
list.set_defaults(func=list_modules)
args = main_parser.parse_args(sys.argv[1:])
args.pillar = find_minion_pillar()
args.pillar_dict = create_pillar_if_not_exist(args.pillar, read_pillar(args.pillar))
if hasattr(args, 'func'):
exit_code = args.func(args)
else:
if args.command is None:
print(beta_str)
main_parser.print_help()
sys.exit(0)
sys.exit(exit_code)
if __name__ == '__main__':
main()

View File

@@ -5,8 +5,9 @@
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
if [ $OPERATION != 'setup' ]; then
. /usr/sbin/so-common
fi
if [[ $# -lt 1 ]]; then
echo "Usage: $0 -o=<operation> -m=[id]"
@@ -160,23 +161,14 @@ function add_patch_pillar_to_minion() {
function add_sensor_to_minion() {
echo "sensor:" >> $PILLARFILE
echo " interface: '$INTERFACE'" >> $PILLARFILE
echo " zeekpin: False" >> $PILLARFILE
echo " zeekpins:" >> $PILLARFILE
echo " - 1" >> $PILLARFILE
echo " zeek_lbprocs: $CORECOUNT" >> $PILLARFILE
echo " suripin: False" >> $PILLARFILE
echo " suripins:" >> $PILLARFILE
echo " - 2" >> $PILLARFILE
echo " suriprocs: $CORECOUNT" >> $PILLARFILE
echo " mtu: 9000" >> $PILLARFILE
echo " uniqueid: $(date '+%s')" >> $PILLARFILE
echo "steno:" >> $PILLARFILE
echo " stenopin: False" >> $PILLARFILE
echo " stenopins:" >> $PILLARFILE
echo " - 3" >> $PILLARFILE
echo " enabled: True" >> $PILLARFILE
echo " disks:" >> $PILLARFILE
echo " - '/some/path'" >> $PILLARFILE
echo "zeek:" >> $PILLARFILE
echo " config:" >> $PILLARFILE
echo " lb_procs: '$CORECOUNT'" >> $PILLARFILE
echo "suricata:" >> $PILLARFILE
echo " config:" >> $PILLARFILE
echo " af-packet:" >> $PILLARFILE
echo " threads: '$CORECOUNT'" >> $PILLARFILE
}
function createSTANDALONE() {

View File

@@ -1,7 +0,0 @@
[global]
ts_format = iso8601
scan_interval = 30s
log_level = info
[kratos]
log_path = kratos/kratos.log

View File

@@ -1,19 +0,0 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% set module_dict = salt['pillar.get']('learn:modules', {} ) %}
{% if module_dict.items()|length != 0 %}
include:
{% for module, _ in module_dict.items() %}
- 'learn.{{ module }}'
{% endfor %}
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,58 +0,0 @@
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set logscan_cpu_period = salt['pillar.get']('learn:modules:logscan:cpu_period', 20000) %}
{% set enabled = salt['pillar.get']('learn:modules:logscan:enabled', False) %}
{% if enabled %}
{% set container_action = 'running' %}
{% else %}
{% set container_action = 'absent'%}
{% endif %}
logscan_data_dir:
file.directory:
- name: /nsm/logscan/data
- user: 939
- group: 939
- makedirs: True
logscan_conf_dir:
file.directory:
- name: /opt/so/conf/logscan
- user: 939
- group: 939
- makedirs: True
logscan_conf:
file.managed:
- name: /opt/so/conf/logscan/logscan.conf
- source: salt://learn/files/logscan.conf
- user: 939
- group: 939
- mode: 600
logscan_log_dir:
file.directory:
- name: /opt/so/log/logscan
- user: 939
- group: 939
so-logscan:
docker_container.{{ container_action }}:
{% if container_action == 'running' %}
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-logscan:{{ VERSION }}
- hostname: logscan
- name: so-logscan
- binds:
- /nsm/logscan/data:/logscan/data:rw
- /opt/so/conf/logscan/logscan.conf:/logscan/logscan.conf:ro
- /opt/so/log/logscan:/logscan/output:rw
- /opt/so/log:/logscan/logs:ro
- cpu_period: {{ logscan_cpu_period }}
- require:
- file: logscan_conf
{% else %}
- force: true
{% endif %}

11
salt/pcap/defaults.yaml Normal file
View File

@@ -0,0 +1,11 @@
pcap:
enabled: True
config:
maxdirectoryfiles: 30000
diskfreepercentage: 10
blocks: 2048
preallocate_file_mb: 4096
aiops: 128
stenopin: False
stenopins: []
disks: []

View File

@@ -1,12 +1,24 @@
pcap:
enabled:
description: Enable or Disable Stenographer on all sensors or a single sensor
config:
enabled:
description: Enable or Disable Stenographer on all sensors or a single sensor
maxfiles:
description: The maximum number of packet/index files to create before cleaning old ones up.
maxdirectoryfiles:
description: The maximum number of packet/index files to create before deleting old files. The default is about 8 days regardless of free space.
diskfreepercentage:
description: The disk space percent to always keep free for pcap
blocks:
description: The number of 1MB packet blocks used by AF_PACKET to store packets in memory, per thread. You shouldn't need to change this.
advanced: True
preallocate_file_mb:
description: File size to pre-allocate for individual pcap files. You shouldn't need to change this.
advanced: True
aiops:
description: The max number of async writes to allow at once.
advanced: True
pin_to_cpu:
description: Enable CPU pinning for PCAP.
cpus_to_pin_to:
description: CPU to pin PCAP to. Currently only a single SPU is supported
disks:
description: List of disks to use for PCAP. This is currently not used.
advanced: True

View File

@@ -1,15 +1,14 @@
{% import_yaml 'suricata/defaults.yaml' as suricata_defaults with context %}
{% set suricata_pillar = pillar.suricata %}
{% set surimerge = salt['defaults.merge'](suricata_defaults, suricata_pillar, in_place=False)
{% load_yaml as afpacket %}
af-packet:
- interface: {{ salt['pillar.get']('sensor:interface', 'bond0') }}
cluster-id: 59
cluster-type: cluster_flow
defrag: yes
use-mmap: yes
threads: {{ salt['pillar.get']('sensor:suriprocs', salt['pillar.get']('sensor:suripins') | length) }}
tpacket-v3: yes
ring-size: {{ salt['pillar.get']('sensor:suriringsize', '5000') }}
- interface: default
#threads: auto
#use-mmap: no
#tpacket-v3: yes
- interface: {{ surimerge.suricata.config.af-packet.interface }}
cluster-id: {{ surimerge.suricata.config.af-packet.cluster-id }}
cluster-type: {{ surimerge.suricata.config.af-packet.cluster-type }}
defrag: {{ surimerge.suricata.config.af-packet.defrag }}
use-mmap: {{ surimerge.suricata.config.af-packet.use-mmap }}
threads: {{ surimerge.suricata.config.af-packet.threads }}
tpacket-v3: {{ surimerge.suricata.config.af-packet.tpacket-v3 }}
ring-size: {{ surimerge.suricata.config.af-packet.ring-size }}
{% endload %}

View File

@@ -1,5 +1,28 @@
suricata:
config:
threading:
set-cpu-affinity: 'no'
detect-thread-ratio: 1.0
cpu-affinity:
- management-cpu-set:
cpu: []
- receive-cpu-set:
cpu: []
- worker-cpu-set:
cpu: []
mode: exclusive
threads: 1
prio:
default: high
af-packet:
interface: bond0
cluster-id: 59
cluster-type: cluster_flow
defrag: true
use-mmap: true
threads: 1
tpacket-v3: true
ring-size: 5000
vars:
address-groups:
HOME_NET: "[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]"
@@ -125,11 +148,6 @@ suricata:
enabled: "no"
facility: local5
format: "[%i] <%d> -- "
pcap:
- interface: eth0
- interface: default
pcap-file:
checksum-checks: auto
app-layer:
protocols:
krb5:
@@ -227,6 +245,14 @@ suricata:
enabled: "yes"
sip:
enabled: "yes"
rfb:
enabled: 'yes'
detection-ports:
dp: 5900, 5901, 5902, 5903, 5904, 5905, 5906, 5907, 5908, 5909
mqtt:
enabled: 'no'
http2:
enabled: 'no'
asn1-max-frames: 256
run-as:
user: suricata
@@ -348,9 +374,6 @@ suricata:
include-mpm-stats: false
mpm-algo: auto
spm-algo: auto
threading:
set-cpu-affinity: "yes"
detect-thread-ratio: 1.0
luajit:
states: 128

View File

@@ -1,70 +1,22 @@
{% import_yaml 'suricata/defaults.yaml' as suricata_defaults with context %}
{% import_yaml 'suricata/suricata_meta.yaml' as suricata_meta with context %}
{% from 'suricata/afpacket.map.jinja' import afpacket %}
{% set suricata_pillar = salt['pillar.get']('suricata:config', {}) %}
{% set default_evelog_index = [] %}
{% set default_filestore_index = [] %}
{% set surimeta_evelog_index = [] %}
{% set surimeta_filestore_index = [] %}
{% set suricata_pillar = pillar.suricata %}
{% set surimerge = salt['defaults.merge'](suricata_defaults, suricata_pillar, in_place=False) %}
{% if salt['pillar.get']('sensor:hnsensor') %}
{% load_yaml as homenet %}
HOME_NET: "[{{salt['pillar.get']('sensor:hnsensor')}}]"
{% endload %}
{% else %}
{% load_yaml as homenet %}
HOME_NET: "[{{salt['pillar.get']('global:hnmanager', '')}}]"
{% endload %}
{% endif %}
{% load_yaml as afpacket %}
- interface: {{ surimerge.suricata.config['af-packet'].interface }}
cluster-id: {{ surimerge.suricata.config['af-packet']['cluster-id'] }}
cluster-type: {{ surimerge.suricata.config['af-packet']['cluster-type'] }}
defrag: {{ surimerge.suricata.config['af-packet'].defrag }}
use-mmap: {{ surimerge.suricata.config['af-packet']['use-mmap'] }}
threads: {{ surimerge.suricata.config['af-packet'].threads }}
tpacket-v3: {{ surimerge.suricata.config['af-packet']['tpacket-v3'] }}
ring-size: {{ surimerge.suricata.config['af-packet']['ring-size'] }}
{% endload %}
{% do suricata_defaults.suricata.config.update({'af-packet': afpacket}) %}
{% set hardware_header = 15 %}
{% set default_packet_size = salt['grains.filter_by']({
'*_eval': {
'default-packet-size': salt['pillar.get']('sensor:mtu', 1500) + hardware_header,
},
'*_helixsensor': {
'default-packet-size': salt['pillar.get']('sensor:mtu', 9000) + hardware_header,
},
'*': {
'default-packet-size': salt['pillar.get']('sensor:mtu', 1500) + hardware_header,
},
},grain='id') %}
{# Find the index of eve-log so it can be updated later #}
{% for li in suricata_defaults.suricata.config.outputs %}
{% if 'eve-log' in li.keys() %}
{% do default_evelog_index.append(loop.index0) %}
{% endif %}
{% if 'file-store' in li.keys() %}
{% do default_filestore_index.append(loop.index0) %}
{% endif %}
{% load_yaml as outputs %}
{% for le, ld in surimerge.suricata.config.outputs.items() %}
- {{ le }}: {{ ld }}
{% endfor %}
{% set default_evelog_index = default_evelog_index[0] %}
{% set default_filestore_index = default_filestore_index[0] %}
{# Find the index of eve-log so it can be grabbed later #}
{% for li in suricata_meta.suricata.config.outputs %}
{% if 'eve-log' in li.keys() %}
{% do surimeta_evelog_index.append(loop.index0) %}
{% endif %}
{% if 'file-store' in li.keys() %}
{% do surimeta_filestore_index.append(loop.index0) %}
{% endif %}
{% endfor %}
{% set surimeta_evelog_index = surimeta_evelog_index[0] %}
{% set surimeta_filestore_index = surimeta_filestore_index[0] %}
{% if salt['pillar.get']('global:mdengine', 'ZEEK') == 'SURICATA' %}
{% do suricata_defaults.suricata.config.outputs[default_evelog_index]['eve-log'].types.extend(suricata_meta.suricata.config.outputs[surimeta_evelog_index]['eve-log'].types) %}
{% do suricata_defaults.suricata.config.outputs[default_filestore_index]['file-store'].update({'enabled':suricata_meta.suricata.config.outputs[surimeta_filestore_index]['file-store']['enabled']}) %}
{% endif %}
{% do suricata_defaults.suricata.config.update(default_packet_size) %}
{% do suricata_defaults.suricata.config.update(afpacket) %}
{% do suricata_defaults.suricata.config.vars['address-groups'].update(homenet) %}
{% if salt['pillar.get']('sensor:suriprocs', salt['pillar.get']('sensor:suripins', {})) %}
{% from 'suricata/threading.map.jinja' import cpu_affinity with context %}
{% do suricata_defaults.suricata.config.threading.update(cpu_affinity) %}
{% endif %}
{% do salt['defaults.merge'](suricata_defaults.suricata.config, suricata_pillar, in_place=True) %}
{% endload %}
{% do suricata_defaults.suricata.config.update({'outputs': outputs}) %}

View File

@@ -114,7 +114,6 @@ base:
{%- endif %}
- docker_clean
- pipeline.load
- learn
'*_manager and G@saltversion:{{saltversion}}':
- match: compound
@@ -160,7 +159,6 @@ base:
- playbook
- docker_clean
- pipeline.load
- learn
'*_standalone and G@saltversion:{{saltversion}}':
- match: compound
@@ -215,7 +213,6 @@ base:
- docker_clean
- elastic-fleet
- pipeline.load
- learn
'*_searchnode and G@saltversion:{{saltversion}}':
- match: compound
@@ -281,7 +278,6 @@ base:
- playbook
- docker_clean
- pipeline.load
- learn
'*_heavynode and G@saltversion:{{saltversion}}':
- match: compound
@@ -348,7 +344,6 @@ base:
- schedule
- docker_clean
- pipeline.load
- learn
'*_receiver and G@saltversion:{{saltversion}}':
- match: compound

View File

@@ -1,29 +0,0 @@
zeek:
policy:
file_extraction:
- application/x-dosexec: exe
- application/pdf: pdf
- application/msword: doc
- application/vnd.ms-powerpoint: doc
- application/rtf: doc
- application/vnd.ms-word.document.macroenabled.12: doc
- application/vnd.ms-word.template.macroenabled.12: doc
- application/vnd.ms-powerpoint.template.macroenabled.12: doc
- application/vnd.ms-excel: doc
- application/vnd.ms-excel.addin.macroenabled.12: doc
- application/vnd.ms-excel.sheet.binary.macroenabled.12: doc
- application/vnd.ms-excel.template.macroenabled.12: doc
- application/vnd.ms-excel.sheet.macroenabled.12: doc
- application/vnd.openxmlformats-officedocument.presentationml.presentation: doc
- application/vnd.openxmlformats-officedocument.presentationml.slide: doc
- application/vnd.openxmlformats-officedocument.presentationml.slideshow: doc
- application/vnd.openxmlformats-officedocument.presentationml.template: doc
- application/vnd.openxmlformats-officedocument.spreadsheetml.sheet: doc
- application/vnd.openxmlformats-officedocument.spreadsheetml.template: doc
- application/vnd.openxmlformats-officedocument.wordprocessingml.document: doc
- application/vnd.openxmlformats-officedocument.wordprocessingml.template: doc
- application/vnd.ms-powerpoint.addin.macroenabled.12: doc
- application/vnd.ms-powerpoint.slide.macroenabled.12: doc
- application/vnd.ms-powerpoint.presentation.macroenabled.12: doc
- application/vnd.ms-powerpoint.slideshow.macroenabled.12: doc
- application/vnd.openxmlformats-officedocument: doc

View File

@@ -6,7 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from "zeek/map.jinja" import ZEEKOPTIONS with context %}
{% from "zeek/config.map.jinja" import ZEEKOPTIONS with context %}
{% set VERSION = salt['pillar.get']('global:soversion') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}

View File

@@ -510,7 +510,8 @@ if ! [[ -f $install_opt_file ]]; then
export MAINIP=$MAINIP
export PATCHSCHEDULENAME=$PATCHSCHEDULENAME
export INTERFACE="bond0"
so-minion -o=setup
export CORECOUNT=$lb_procs
logCmd "so-minion -o=setup"
title "Creating Global SLS"
if [[ $is_airgap ]]; then

File diff suppressed because it is too large Load Diff