Merge pull request #837 from Security-Onion-Solutions/issue/641

Issue/641
This commit is contained in:
Josh Patterson
2020-06-11 14:22:28 -04:00
committed by GitHub
15 changed files with 1036 additions and 803 deletions

View File

@@ -0,0 +1,18 @@
{% import_yaml 'firewall/portgroups.yaml' as default_portgroups %}
{% set default_portgroups = default_portgroups.firewall.aliases.ports %}
{% import_yaml 'firewall/portgroups.local.yaml' as local_portgroups %}
{% if local_portgroups.firewall.aliases.ports %}
{% set local_portgroups = local_portgroups.firewall.aliases.ports %}
{% else %}
{% set local_portgroups = {} %}
{% endif %}
{% set portgroups = salt['defaults.merge'](default_portgroups, local_portgroups, in_place=False) %}
role:
eval:
helisensor:
master:
mastersearch:
standalone:
searchnode:
fleet:

View File

@@ -0,0 +1,46 @@
firewall:
hostgroups:
analyst:
ips:
delete:
insert:
beats_endpoint:
ips:
delete:
insert:
fleet:
ips:
delete:
insert:
heavy_node:
ips:
delete:
insert:
master:
ips:
delete:
insert:
minion:
ips:
delete:
insert:
node:
ips:
delete:
insert:
osquery_endpoint:
ips:
delete:
insert:
search_node:
ips:
delete:
insert:
sensor:
ips:
delete:
insert:
wazuh_endpoint:
ips:
delete:
insert:

View File

@@ -0,0 +1,3 @@
firewall:
aliases:
ports:

62
pillar/firewall/ports.sls Normal file
View File

@@ -0,0 +1,62 @@
firewall:
analyst:
ports:
tcp:
- 80
- 443
udp:
beats_endpoint:
ports:
tcp:
- 5044
forward_nodes:
ports:
tcp:
- 443
- 5044
- 5644
- 9822
udp:
master:
ports:
tcp:
- 1514
- 3200
- 3306
- 4200
- 5601
- 6379
- 8086
- 8090
- 9001
- 9200
- 9300
- 9400
- 9500
udp:
- 1514
minions:
ports:
tcp:
- 3142
- 4505
- 4506
- 5000
- 8080
- 8086
- 55000
osquery_endpoint:
ports:
tcp:
- 8090
search_nodes:
ports:
tcp:
- 6379
- 9300
wazuh_endpoint:
ports:
tcp:
- 1514
udp:
-1514

View File

@@ -14,7 +14,6 @@ base:
'*_sensor':
- static
- firewall.*
- brologs
- healthcheck.sensor
- minions.{{ grains.id }}
@@ -22,7 +21,6 @@ base:
'*_master or *_mastersearch':
- match: compound
- static
- firewall.*
- data.*
- secrets
- minions.{{ grains.id }}
@@ -33,7 +31,6 @@ base:
'*_eval':
- static
- firewall.*
- data.*
- brologs
- secrets
@@ -44,7 +41,6 @@ base:
- logstash
- logstash.master
- logstash.search
- firewall.*
- data.*
- brologs
- secrets
@@ -54,18 +50,15 @@ base:
'*_node':
- static
- firewall.*
- minions.{{ grains.id }}
'*_heavynode':
- static
- firewall.*
- brologs
- minions.{{ grains.id }}
'*_helix':
- static
- firewall.*
- fireeye
- brologs
- logstash
@@ -74,14 +67,12 @@ base:
'*_fleet':
- static
- firewall.*
- data.*
- secrets
- minions.{{ grains.id }}
'*_searchnode':
- static
- firewall.*
- logstash
- logstash.search
- minions.{{ grains.id }}

View File

@@ -83,7 +83,8 @@ if [ "$SKIP" -eq 0 ]; then
fi
echo "Adding $IP to the $FULLROLE role. This can take a few seconds"
$default_salt_dir/pillar/firewall/addfirewall.sh $FULLROLE $IP
/usr/sbin/so-firewall includehost $FULLROLE $IP
salt-call state.apply firewall queue=True
# Check if Wazuh enabled
if grep -q -R "wazuh: 1" $local_salt_dir/pillar/*; then

View File

@@ -0,0 +1,286 @@
#!/usr/bin/env python3
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import yaml
hostgroupsFilename = "/opt/so/saltstack/local/salt/firewall/hostgroups.local.yaml"
portgroupsFilename = "/opt/so/saltstack/local/salt/firewall/portgroups.local.yaml"
supportedProtocols = ['tcp', 'udp']
def showUsage(args):
print('Usage: {} <COMMAND> [ARGS...]'.format(sys.argv[0]))
print(' Available commands:')
print(' help - Prints this usage information.')
print(' includedhosts - Lists the IPs included in the given group. Args: <GROUP_NAME>')
print(' excludedhosts - Lists the IPs excluded from the given group. Args: <GROUP_NAME>')
print(' includehost - Includes the given IP in the given group. Args: <GROUP_NAME> <IP>')
print(' excludehost - Excludes the given IP from the given group. Args: <GROUP_NAME> <IP>')
print(' removehost - Removes an excluded IP from the given group. Args: <GROUP_NAME> <IP>')
print(' addhostgroup - Adds a new, custom host group. Args: <GROUP_NAME>')
print(' listports - Lists ports in the given group and protocol. Args: <GROUP_NAME> <PORT_PROTOCOL>')
print(' addport - Adds a PORT to the given group. Args: <GROUP_NAME> <PORT_PROTOCOL> <PORT>')
print(' removeport - Removes a PORT from the given group. Args: <GROUP_NAME> <PORT_PROTOCOL> <PORT>')
print(' addportgroup - Adds a new, custom port group. Args: <GROUP_NAME>')
print('')
print(' Where:')
print(' GROUP_NAME - The name of an alias group (Ex: analyst)')
print(' IP - Either a single IP address (Ex: 8.8.8.8) or a CIDR block (Ex: 10.23.0.0/16).')
print(' PORT_PROTOCOL - Must be one of the following: ' + str(supportedProtocols))
print(' PORT - Either a single numeric port (Ex: 443), or a port range (Ex: 8000:8002).')
sys.exit(1)
def loadYaml(filename):
file = open(filename, "r")
return yaml.load(file.read())
def writeYaml(filename, content):
file = open(filename, "w")
return yaml.dump(content, file)
def listIps(name, mode):
content = loadYaml(hostgroupsFilename)
if name not in content['firewall']['hostgroups']:
print('Host group does not exist', file=sys.stderr)
return 4
hostgroup = content['firewall']['hostgroups'][name]
ips = hostgroup['ips'][mode]
if ips is not None:
for ip in ips:
print(ip)
return 0
def addIp(name, ip, mode):
content = loadYaml(hostgroupsFilename)
if name not in content['firewall']['hostgroups']:
print('Host group does not exist', file=sys.stderr)
return 4
hostgroup = content['firewall']['hostgroups'][name]
ips = hostgroup['ips'][mode]
if ips is None:
ips = []
hostgroup['ips'][mode] = ips
if ip not in ips:
ips.append(ip)
else:
print('Already exists', file=sys.stderr)
return 3
writeYaml(hostgroupsFilename, content)
return 0
def removeIp(name, ip, mode, silence = False):
content = loadYaml(hostgroupsFilename)
if name not in content['firewall']['hostgroups']:
print('Host group does not exist', file=sys.stderr)
return 4
hostgroup = content['firewall']['hostgroups'][name]
ips = hostgroup['ips'][mode]
if ips is None:
ips = []
hostgroup['ips'][mode] = ips
if ip in ips:
ips.remove(ip)
else:
if not silence:
print('IP does not exist', file=sys.stderr)
return 3
writeYaml(hostgroupsFilename, content)
return 0
def createProtocolMap():
map = {}
for protocol in supportedProtocols:
map[protocol] = []
return map
def addhostgroup(args):
if len(args) != 1:
print('Missing host group name argument', file=sys.stderr)
showUsage(args)
name = args[1]
content = loadYaml(hostgroupsFilename)
if name in content['firewall']['hostgroups']:
print('Already exists', file=sys.stderr)
return 3
content['firewall']['hostgroups'][name] = { 'ips': { 'insert': [], 'delete': [] }}
writeYaml(hostgroupsFilename, content)
return 0
def addportgroup(args):
if len(args) != 1:
print('Missing port group name argument', file=sys.stderr)
showUsage(args)
name = args[0]
content = loadYaml(portgroupsFilename)
ports = content['firewall']['aliases']['ports']
if ports is None:
ports = {}
content['firewall']['aliases']['ports'] = ports
if name in ports:
print('Already exists', file=sys.stderr)
return 3
ports[name] = createProtocolMap()
writeYaml(portgroupsFilename, content)
return 0
def listports(args):
if len(args) != 2:
print('Missing port group name or port protocol', file=sys.stderr)
showUsage(args)
name = args[0]
protocol = args[1]
if protocol not in supportedProtocols:
print('Port protocol is not supported', file=sys.stderr)
return 5
content = loadYaml(portgroupsFilename)
ports = content['firewall']['aliases']['ports']
if ports is None:
ports = {}
content['firewall']['aliases']['ports'] = ports
if name not in ports:
print('Port group does not exist', file=sys.stderr)
return 3
ports = ports[name][protocol]
if ports is not None:
for port in ports:
print(port)
return 0
def addport(args):
if len(args) != 3:
print('Missing port group name or port protocol, or port argument', file=sys.stderr)
showUsage(args)
name = args[0]
protocol = args[1]
port = args[2]
if protocol not in supportedProtocols:
print('Port protocol is not supported', file=sys.stderr)
return 5
content = loadYaml(portgroupsFilename)
ports = content['firewall']['aliases']['ports']
if ports is None:
ports = {}
content['firewall']['aliases']['ports'] = ports
if name not in ports:
print('Port group does not exist', file=sys.stderr)
return 3
ports = ports[name][protocol]
if ports is None:
ports = []
content['firewall']['aliases']['ports'][name][protocol] = ports
if port in ports:
print('Already exists', file=sys.stderr)
return 3
ports.append(port)
writeYaml(portgroupsFilename, content)
return 0
def removeport(args):
if len(args) != 3:
print('Missing port group name or port protocol, or port argument', file=sys.stderr)
showUsage(args)
name = args[0]
protocol = args[1]
port = args[2]
if protocol not in supportedProtocols:
print('Port protocol is not supported', file=sys.stderr)
return 5
content = loadYaml(portgroupsFilename)
ports = content['firewall']['aliases']['ports']
if ports is None:
ports = {}
content['firewall']['aliases']['ports'] = ports
if name not in ports:
print('Port group does not exist', file=sys.stderr)
return 3
ports = ports[name][protocol]
if ports is None or port not in ports:
print('Port does not exist', file=sys.stderr)
return 3
ports.remove(port)
writeYaml(portgroupsFilename, content)
return 0
def includedhosts(args):
if len(args) != 1:
print('Missing host group name argument', file=sys.stderr)
showUsage(args)
return listIps(args[0], 'insert')
def excludedhosts(args):
if len(args) != 1:
print('Missing host group name argument', file=sys.stderr)
showUsage(args)
return listIps(args[0], 'delete')
def includehost(args):
if len(args) != 2:
print('Missing host group name or ip argument', file=sys.stderr)
showUsage(args)
result = addIp(args[0], args[1], 'insert')
if result == 0:
removeIp(args[0], args[1], 'delete', True)
return result
def excludehost(args):
if len(args) != 2:
print('Missing host group name or ip argument', file=sys.stderr)
showUsage(args)
result = addIp(args[0], args[1], 'delete')
if result == 0:
removeIp(args[0], args[1], 'insert', True)
return result
def removehost(args):
if len(args) != 2:
print('Missing host group name or ip argument', file=sys.stderr)
showUsage(args)
return removeIp(args[0], args[1], 'delete')
def main():
args = sys.argv[1:]
if len(args) == 0:
showUsage(None)
commands = {
"help": showUsage,
"includedhosts": includedhosts,
"excludedhosts": excludedhosts,
"includehost": includehost,
"excludehost": excludehost,
"removehost": removehost,
"listports": listports,
"addport": addport,
"removeport": removeport,
"addhostgroup": addhostgroup,
"addportgroup": addportgroup
}
cmd = commands.get(args[0], showUsage)
code = cmd(args[1:])
sys.exit(code)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,404 @@
{% import_yaml 'firewall/portgroups.yaml' as portgroups %}
{% set portgroups = portgroups.firewall.aliases.ports %}
role:
eval:
chain:
DOCKER-USER:
hostgroups:
master:
portgroups:
- {{ portgroups.wazuh_endpoint }}
- {{ portgroups.playbook }}
- {{ portgroups.mysql }}
- {{ portgroups.navigator }}
- {{ portgroups.kibana }}
- {{ portgroups.redis }}
- {{ portgroups.influxdb }}
- {{ portgroups.fleet_api }}
- {{ portgroups.cortex }}
- {{ portgroups.elasticsearch_rest }}
- {{ portgroups.elasticsearch_node }}
- {{ portgroups.cortex_es_rest }}
- {{ portgroups.cortex_es_node }}
minion:
portgroups:
- {{ portgroups.acng }}
- {{ portgroups.docker_registry }}
- {{ portgroups.osquery_8080 }}
- {{ portgroups.influxdb }}
- {{ portgroups.wazuh_minion }}
sensor:
portgroups:
- {{ portgroups.sensoroni }}
- {{ portgroups.beats_5044 }}
- {{ portgroups.beats_5644 }}
search_node:
portgroups:
- {{ portgroups.redis }}
- {{ portgroups.elasticsearch_node }}
self:
portgroups:
- {{ portgroups.syslog}}
beats_endpoint:
portgroups:
- {{ portgroups.beats_5044 }}
osquery_endpoint:
portgroups:
- {{ portgroups.fleet_api }}
wazuh_endpoint:
portgroups:
- {{ portgroups.wazuh_endpoint }}
analyst:
portgroups:
- {{ portgroups.nginx }}
INPUT:
hostgroups:
anywhere:
portgroups:
- {{ portgroups.ssh }}
dockernet:
portgroups:
- {{ portgroups.all }}
localhost:
portgroups:
- {{ portgroups.all }}
minion:
portgroups:
- {{ portgroups.salt_master }}
helixsensor:
chain:
DOCKER-USER:
hostgroups:
master:
portgroups:
- {{ portgroups.wazuh_endpoint }}
- {{ portgroups.playbook }}
- {{ portgroups.mysql }}
- {{ portgroups.navigator }}
- {{ portgroups.kibana }}
- {{ portgroups.redis }}
- {{ portgroups.influxdb }}
- {{ portgroups.fleet_api }}
- {{ portgroups.cortex }}
- {{ portgroups.elasticsearch_rest }}
- {{ portgroups.elasticsearch_node }}
- {{ portgroups.cortex_es_rest }}
- {{ portgroups.cortex_es_node }}
minion:
portgroups:
- {{ portgroups.acng }}
- {{ portgroups.docker_registry }}
- {{ portgroups.osquery_8080 }}
- {{ portgroups.influxdb }}
- {{ portgroups.wazuh_minion }}
sensor:
portgroups:
- {{ portgroups.sensoroni }}
- {{ portgroups.beats_5044 }}
- {{ portgroups.beats_5644 }}
search_node:
portgroups:
- {{ portgroups.redis }}
- {{ portgroups.elasticsearch_node }}
self:
portgroups:
- {{ portgroups.syslog}}
beats_endpoint:
portgroups:
- {{ portgroups.beats_5044 }}
osquery_endpoint:
portgroups:
- {{ portgroups.fleet_api }}
wazuh_endpoint:
portgroups:
- {{ portgroups.wazuh_endpoint }}
analyst:
portgroups:
- {{ portgroups.nginx }}
INPUT:
hostgroups:
anywhere:
portgroups:
- {{ portgroups.ssh }}
dockernet:
portgroups:
- {{ portgroups.all }}
localhost:
portgroups:
- {{ portgroups.all }}
minion:
portgroups:
- {{ portgroups.salt_master }}
master:
chain:
DOCKER-USER:
hostgroups:
master:
portgroups:
- {{ portgroups.wazuh_endpoint }}
- {{ portgroups.playbook }}
- {{ portgroups.mysql }}
- {{ portgroups.navigator }}
- {{ portgroups.kibana }}
- {{ portgroups.redis }}
- {{ portgroups.influxdb }}
- {{ portgroups.fleet_api }}
- {{ portgroups.cortex }}
- {{ portgroups.elasticsearch_rest }}
- {{ portgroups.elasticsearch_node }}
- {{ portgroups.cortex_es_rest }}
- {{ portgroups.cortex_es_node }}
minion:
portgroups:
- {{ portgroups.acng }}
- {{ portgroups.docker_registry }}
- {{ portgroups.osquery_8080 }}
- {{ portgroups.influxdb }}
- {{ portgroups.wazuh_minion }}
sensor:
portgroups:
- {{ portgroups.sensoroni }}
- {{ portgroups.beats_5044 }}
- {{ portgroups.beats_5644 }}
search_node:
portgroups:
- {{ portgroups.redis }}
- {{ portgroups.elasticsearch_node }}
self:
portgroups:
- {{ portgroups.syslog}}
beats_endpoint:
portgroups:
- {{ portgroups.beats_5044 }}
osquery_endpoint:
portgroups:
- {{ portgroups.fleet_api }}
wazuh_endpoint:
portgroups:
- {{ portgroups.wazuh_endpoint }}
analyst:
portgroups:
- {{ portgroups.nginx }}
INPUT:
hostgroups:
anywhere:
portgroups:
- {{ portgroups.ssh }}
dockernet:
portgroups:
- {{ portgroups.all }}
localhost:
portgroups:
- {{ portgroups.all }}
minion:
portgroups:
- {{ portgroups.salt_master }}
mastersearch:
chain:
DOCKER-USER:
hostgroups:
master:
portgroups:
- {{ portgroups.wazuh_endpoint }}
- {{ portgroups.playbook }}
- {{ portgroups.mysql }}
- {{ portgroups.navigator }}
- {{ portgroups.kibana }}
- {{ portgroups.redis }}
- {{ portgroups.influxdb }}
- {{ portgroups.fleet_api }}
- {{ portgroups.cortex }}
- {{ portgroups.elasticsearch_rest }}
- {{ portgroups.elasticsearch_node }}
- {{ portgroups.cortex_es_rest }}
- {{ portgroups.cortex_es_node }}
minion:
portgroups:
- {{ portgroups.acng }}
- {{ portgroups.docker_registry }}
- {{ portgroups.osquery_8080 }}
- {{ portgroups.influxdb }}
- {{ portgroups.wazuh_minion }}
sensor:
portgroups:
- {{ portgroups.sensoroni }}
- {{ portgroups.beats_5044 }}
- {{ portgroups.beats_5644 }}
search_node:
portgroups:
- {{ portgroups.redis }}
- {{ portgroups.elasticsearch_node }}
self:
portgroups:
- {{ portgroups.syslog}}
beats_endpoint:
portgroups:
- {{ portgroups.beats_5044 }}
osquery_endpoint:
portgroups:
- {{ portgroups.fleet_api }}
wazuh_endpoint:
portgroups:
- {{ portgroups.wazuh_endpoint }}
analyst:
portgroups:
- {{ portgroups.nginx }}
INPUT:
hostgroups:
anywhere:
portgroups:
- {{ portgroups.ssh }}
dockernet:
portgroups:
- {{ portgroups.all }}
localhost:
portgroups:
- {{ portgroups.all }}
minion:
portgroups:
- {{ portgroups.salt_master }}
standalone:
chain:
DOCKER-USER:
hostgroups:
master:
portgroups:
- {{ portgroups.wazuh_endpoint }}
- {{ portgroups.playbook }}
- {{ portgroups.mysql }}
- {{ portgroups.navigator }}
- {{ portgroups.kibana }}
- {{ portgroups.redis }}
- {{ portgroups.influxdb }}
- {{ portgroups.fleet_api }}
- {{ portgroups.cortex }}
- {{ portgroups.elasticsearch_rest }}
- {{ portgroups.elasticsearch_node }}
- {{ portgroups.cortex_es_rest }}
- {{ portgroups.cortex_es_node }}
minion:
portgroups:
- {{ portgroups.acng }}
- {{ portgroups.docker_registry }}
- {{ portgroups.osquery_8080 }}
- {{ portgroups.influxdb }}
- {{ portgroups.wazuh_minion }}
sensor:
portgroups:
- {{ portgroups.sensoroni }}
- {{ portgroups.beats_5044 }}
- {{ portgroups.beats_5644 }}
search_node:
portgroups:
- {{ portgroups.redis }}
- {{ portgroups.elasticsearch_node }}
self:
portgroups:
- {{ portgroups.syslog}}
beats_endpoint:
portgroups:
- {{ portgroups.beats_5044 }}
osquery_endpoint:
portgroups:
- {{ portgroups.fleet_api }}
wazuh_endpoint:
portgroups:
- {{ portgroups.wazuh_endpoint }}
analyst:
portgroups:
- {{ portgroups.nginx }}
INPUT:
hostgroups:
anywhere:
portgroups:
- {{ portgroups.ssh }}
dockernet:
portgroups:
- {{ portgroups.all }}
localhost:
portgroups:
- {{ portgroups.all }}
minion:
portgroups:
- {{ portgroups.salt_master }}
searchnode:
chain:
DOCKER-USER:
hostgroups:
master:
portgroups:
- {{ portgroups.elasticsearch_node }}
dockernet:
portgroups:
- {{ portgroups.elasticsearch_node }}
- {{ portgroups.elasticsearch_node }}
INPUT:
hostgroups:
anywhere:
portgroups:
- {{ portgroups.ssh }}
dockernet:
portgroups:
- {{ portgroups.all }}
localhost:
portgroups:
- {{ portgroups.all }}
sensor:
chain:
INPUT:
hostgroups:
anywhere:
portgroups:
- {{ portgroups.ssh }}
dockernet:
portgroups:
- {{ portgroups.all }}
localhost:
portgroups:
- {{ portgroups.all }}
heavynode:
chain:
DOCKER-USER:
hostgroups:
self:
portgroups:
- {{ portgroups.redis }}
- {{ portgroups.beats_5044 }}
- {{ portgroups.beats_5644 }}
INPUT:
hostgroups:
anywhere:
portgroups:
- {{ portgroups.ssh }}
localhost:
portgroups:
- {{ portgroups.all }}
fleet:
chain:
DOCKER-USER:
hostgroups:
self:
portgroups:
- {{ portgroups.redis }}
- {{ portgroups.mysql }}
- {{ portgroups.osquery_8080 }}
localhost:
portgroups:
- {{ portgroups.mysql }}
- {{ portgroups.osquery_8080 }}
analyst:
portgroups:
- {{ portgroups.fleet_webui }}
INPUT:
hostgroups:
anywhere:
portgroups:
- {{ portgroups.ssh }}
dockernet:
portgroups:
- {{ portgroups.all }}
localhost:
portgroups:
- {{ portgroups.all }}

View File

@@ -0,0 +1,22 @@
firewall:
hostgroups:
anywhere:
ips:
delete:
insert:
- 0.0.0.0/0
dockernet:
ips:
delete:
insert:
- 172.17.0.0/24
localhost:
ips:
delete:
insert:
- 127.0.0.1
self:
ips:
delete:
insert:
- {{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('master:mainint', salt['pillar.get']('node:mainint'))))[0] }}

View File

@@ -1,17 +1,12 @@
# Firewall Magic for the grid
{% if grains['role'] in ['so-eval','so-master','so-helix','so-mastersearch', 'so-standalone'] %}
{% set ip = salt['pillar.get']('static:masterip', '') %}
{% elif grains['role'] == 'so-node' or grains['role'] == 'so-heavynode' %}
{% set ip = salt['pillar.get']('node:mainip', '') %}
{% elif grains['role'] == 'so-sensor' %}
{% set ip = salt['pillar.get']('sensor:mainip', '') %}
{% elif grains['role'] == 'so-fleet' %}
{% set MAININT = salt['pillar.get']('host:mainint') %}
{% set ip = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
{% endif %}
{% from 'firewall/map.jinja' import hostgroups with context %}
{% from 'firewall/map.jinja' import assigned_hostgroups with context %}
{% set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{% set FLEET_NODE_IP = salt['pillar.get']('static:fleet_ip') %}
create_sysconfig_iptables:
file.touch:
- name: /etc/sysconfig/iptables
- makedirs: True
- unless: 'ls /etc/sysconfig/iptables'
# Quick Fix for Docker being difficult
iptables_fix_docker:
@@ -28,15 +23,6 @@ iptables_fix_fwd:
- position: 1
- target: DOCKER-USER
# Keep localhost in the game
iptables_allow_localhost:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- source: 127.0.0.1
- save: True
# Allow related/established sessions
iptables_allow_established:
iptables.append:
@@ -47,16 +33,6 @@ iptables_allow_established:
- ctstate: 'RELATED,ESTABLISHED'
- save: True
# Always allow SSH so we can like log in
iptables_allow_ssh:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- dport: 22
- proto: tcp
- save: True
# I like pings
iptables_allow_pings:
iptables.append:
@@ -114,593 +90,37 @@ enable_docker_user_established:
- match: conntrack
- ctstate: 'RELATED,ESTABLISHED'
# Add rule(s) for Wazuh manager
enable_wazuh_manager_1514_tcp_{{ip}}:
iptables.insert:
{% for chain, hg in assigned_hostgroups.chain.items() %}
{% for hostgroup, portgroups in assigned_hostgroups.chain[chain].hostgroups.items() %}
{% for action in ['insert', 'delete' ] %}
{% if hostgroups[hostgroup].ips[action] %}
{% for ip in hostgroups[hostgroup].ips[action] %}
{% for portgroup in portgroups.portgroups %}
{% for proto, ports in portgroup.items() %}
{% for port in ports %}
{{action}}_{{chain}}_{{hostgroup}}_{{ip}}_{{port}}_{{proto}}:
iptables.{{action}}:
- table: filter
- chain: DOCKER-USER
- chain: {{ chain }}
- jump: ACCEPT
- proto: tcp
- proto: {{ proto }}
- source: {{ ip }}
- dport: 1514
- dport: {{ port }}
{% if action == 'insert' %}
- position: 1
- save: True
enable_wazuh_manager_1514_udp_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: udp
- source: {{ ip }}
- dport: 1514
- position: 1
- save: True
# Allow syslog
enable_syslog_514_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 514
- position: 1
- save: True
# Rules if you are a Master
{% if grains['role'] in ['so-master', 'so-eval', 'so-helix', 'so-mastersearch', 'so-standalone'] %}
#This should be more granular
iptables_allow_master_docker:
iptables.insert:
- table: filter
- chain: INPUT
- jump: ACCEPT
- source: 172.17.0.0/24
- position: 1
- save: True
{% for ip in pillar.get('masterfw') %}
# Allow Redis
enable_maternode_redis_6379_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 6379
- position: 1
- save: True
enable_masternode_kibana_5601_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 5601
- position: 1
- save: True
enable_masternode_ES_9200_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 9200
- position: 1
- save: True
enable_masternode_ES_9300_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 9300
- position: 1
- save: True
enable_masternode_ES_9400_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 9400
- position: 1
- save: True
enable_masternode_ES_9500_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 9500
- position: 1
- save: True
enable_masternode_influxdb_8086_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 8086
- position: 1
- save: True
enable_masternode_mysql_3306_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 3306
- position: 1
- save: True
enable_master_osquery_8090_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 8090
- position: 1
- save: True
enable_master_playbook_3200_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 3200
- position: 1
- save: True
enable_master_navigator_4200_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 4200
- position: 1
- save: True
enable_master_cortex_9001_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 9001
- position: 1
- save: True
enable_master_cyberchef_9080_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 9080
- position: 1
- save: True
{% endfor %}
# Make it so all the minions can talk to salt and update etc.
{% for ip in pillar.get('minions') %}
enable_salt_minions_4505_{{ip}}:
iptables.insert:
- table: filter
- chain: INPUT
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 4505
- position: 1
- save: True
enable_salt_minions_4506_{{ip}}:
iptables.insert:
- table: filter
- chain: INPUT
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 4506
- position: 1
- save: True
enable_salt_minions_5000_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 5000
- position: 1
- save: True
enable_salt_minions_3142_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 3142
- position: 1
- save: True
enable_minions_influxdb_8086_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 8086
- position: 1
- save: True
enable_minion_osquery_8090_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 8090
- position: 1
- save: True
enable_minion_wazuh_55000_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 55000
- position: 1
- save: True
{% endfor %}
# Allow Forward Nodes to send their beats traffic
{% for ip in pillar.get('forward_nodes') %}
enable_forwardnode_beats_5044_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 5044
- position: 1
- save: True
enable_forwardnode_beats_5644_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 5644
- position: 1
- save: True
enable_forwardnode_sensoroni_443_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 443
- position: 1
- save: True
enable_forwardnode_sensoroni_9822_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 9822
- position: 1
- save: True
{% endfor %}
# Allow Fleet Node to send its beats traffic
{% if FLEET_NODE %}
enable_fleetnode_beats_5644_{{FLEET_NODE_IP}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ FLEET_NODE_IP }}
- dport: 5644
- position: 1
- save: True
{% endif %}
{% for ip in pillar.get('search_nodes') %}
enable_searchnode_redis_6379_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 6379
- position: 1
- save: True
enable_searchnode_ES_9300_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 9300
- position: 1
- save: True
{% endfor %}
# Allow Beats Endpoints to send their beats traffic
{% for ip in pillar.get('beats_endpoint') %}
enable_standard_beats_5044_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 5044
- position: 1
- save: True
{% endfor %}
# Allow OSQuery Endpoints to send their traffic
{% for ip in pillar.get('osquery_endpoint') %}
enable_standard_osquery_8090_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 8090
- position: 1
- save: True
{% endfor %}
# Allow Wazuh Endpoints to send their traffic
{% for ip in pillar.get('wazuh_endpoint') %}
enable_wazuh_endpoint_tcp_1514_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 1514
- position: 1
- save: True
enable_wazuh_endpoint_udp_1514_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: udp
- source: {{ ip }}
- dport: 1514
- position: 1
- save: True
{% endfor %}
# Allow Analysts
{% for ip in pillar.get('analyst') %}
enable_standard_analyst_80_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 80
- position: 1
- save: True
enable_standard_analyst_443_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 443
- position: 1
- save: True
#enable_standard_analyst_3000_{{ip}}:
# iptables.insert:
# - table: filter
# - chain: DOCKER-USER
# - jump: ACCEPT
# - proto: tcp
# - source: {{ ip }}
# - dport: 3000
# - position: 1
# - save: True
#enable_standard_analyst_7000_{{ip}}:
# iptables.insert:
# - table: filter
# - chain: DOCKER-USER
# - jump: ACCEPT
# - proto: tcp
# - source: {{ ip }}
# - dport: 7000
# - position: 1
# - save: True
#enable_standard_analyst_9000_{{ip}}:
# iptables.insert:
# - table: filter
# - chain: DOCKER-USER
# - jump: ACCEPT
# - proto: tcp
# - source: {{ ip }}
# - dport: 9000
# - position: 1
# - save: True
#enable_standard_analyst_9001_{{ip}}:
# iptables.insert:
# - table: filter
# - chain: DOCKER-USER
# - jump: ACCEPT
# - proto: tcp
# - source: {{ ip }}
# - dport: 9001
# - position: 1
# - save: True
# This is temporary for sensoroni testing
#enable_standard_analyst_9822_{{ip}}:
# iptables.insert:
# - table: filter
# - chain: DOCKER-USER
# - jump: ACCEPT
# - proto: tcp
# - source: {{ ip }}
# - dport: 9822
# - position: 1
# - save: True
{% endfor %}
# Rules for search nodes connecting to master
{% endif %}
# Rules if you are a Node
{% if 'node' in grains['role'] %}
#This should be more granular
iptables_allow_docker:
iptables.insert:
- table: filter
- chain: INPUT
- jump: ACCEPT
- source: 172.17.0.0/24
- position: 1
- save: True
enable_docker_ES_9200:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: 172.17.0.0/24
- dport: 9200
- position: 1
- save: True
enable_docker_ES_9300:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: 172.17.0.0/24
- dport: 9300
- position: 1
- save: True
{% for ip in pillar.get('masterfw') %}
enable_cluster_ES_9300_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 9300
- position: 1
- save: True
{% endfor %}
{% endif %}
{% endfor %}
{% endfor %}
{% endfor %}
# Rules if you are a Sensor
{% if grains['role'] == 'so-sensor' %}
iptables_allow_sensor_docker:
iptables.insert:
- table: filter
- chain: INPUT
- jump: ACCEPT
- source: 172.17.0.0/24
- position: 1
- save: True
{% endif %}
# Rules if you are a Hot Node
# Rules if you are a Warm Node
# Some Fixer upper type rules
# Drop it like it's hot
# Make the input policy send stuff that doesn't match to be logged and dropped
iptables_drop_all_the_things:
iptables.append:
@@ -708,160 +128,3 @@ iptables_drop_all_the_things:
- chain: LOGGING
- jump: DROP
- save: True
{% if grains['role'] == 'so-heavynode' %}
# Allow Redis
enable_heavynode_redis_6379_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 6379
- position: 1
- save: True
enable_forwardnode_beats_5044_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 5044
- position: 1
- save: True
enable_forwardnode_beats_5644_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 5644
- position: 1
- save: True
{% endif %}
# Rules if you are a Standalone Fleet node
{% if grains['role'] == 'so-fleet' %}
#This should be more granular
iptables_allow_fleetnode_docker:
iptables.insert:
- table: filter
- chain: INPUT
- jump: ACCEPT
- source: 172.17.0.0/24
- position: 1
- save: True
# Allow Redis
enable_fleetnode_redis_6379_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 6379
- position: 1
- save: True
enable_fleetnode_mysql_3306_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 3306
- position: 1
- save: True
enable_fleet_osquery_8080_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 8080
- position: 1
- save: True
enable_fleetnode_mysql_3306_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: 127.0.0.1
- dport: 3306
- position: 1
- save: True
enable_fleet_osquery_8080_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: 127.0.0.1
- dport: 8080
- position: 1
- save: True
# Allow Analysts to access Fleet WebUI
{% for ip in pillar.get('analyst') %}
enable_fleetnode_fleet_443_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 443
- position: 1
- save: True
{% endfor %}
# Needed for osquery endpoints to checkin to Fleet API for mgt
{% for ip in pillar.get('osquery_endpoint') %}
enable_fleetnode_8090_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 8090
- position: 1
- save: True
{% endfor %}
# Make it so all the minions can talk to fleet standalone node
{% for ip in pillar.get('minions') %}
enable_minion_fleet_standalone_8090_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 8090
- position: 1
- save: True
{% endfor %}
{% endif %}

45
salt/firewall/map.jinja Normal file
View File

@@ -0,0 +1,45 @@
{% set role = grains.id.split('_') | last %}
{% set translated_pillar_assigned_hostgroups = {} %}
{% import_yaml 'firewall/portgroups.yaml' as default_portgroups %}
{% set default_portgroups = default_portgroups.firewall.aliases.ports %}
{% import_yaml 'firewall/portgroups.local.yaml' as local_portgroups %}
{% if local_portgroups.firewall.aliases.ports %}
{% set local_portgroups = local_portgroups.firewall.aliases.ports %}
{% else %}
{% set local_portgroups = {} %}
{% endif %}
{% set portgroups = salt['defaults.merge'](default_portgroups, local_portgroups, in_place=False) %}
{% set defined_portgroups = portgroups %}
{% import_yaml 'firewall/hostgroups.yaml' as default_hostgroups %}
{% import_yaml 'firewall/hostgroups.local.yaml' as local_hostgroups %}
{% set hostgroups = salt['defaults.merge'](default_hostgroups.firewall.hostgroups, local_hostgroups.firewall.hostgroups, in_place=False) %}
{# This block translate the portgroups defined in the pillar to what is defined my portgroups.yaml and portgroups.local.yaml #}
{% if salt['pillar.get']('firewall:assigned_hostgroups:chain') %}
{% for chain, hg in salt['pillar.get']('firewall:assigned_hostgroups:chain').items() %}
{% for pillar_hostgroup, pillar_portgroups in salt['pillar.get']('firewall:assigned_hostgroups:chain')[chain].hostgroups.items() %}
{% do translated_pillar_assigned_hostgroups.update({"chain": {chain: {"hostgroups": {pillar_hostgroup: {"portgroups": []}}}}}) %}
{% for pillar_portgroup in pillar_portgroups.portgroups %}
{% set pillar_portgroup = pillar_portgroup.split('.') | last %}
{% do translated_pillar_assigned_hostgroups.chain[chain].hostgroups[pillar_hostgroup].portgroups.append(defined_portgroups[pillar_portgroup]) %}
{% endfor %}
{% endfor %}
{% endfor %}
{% endif %}
{% import_yaml 'firewall/assigned_hostgroups.map.yaml' as default_assigned_hostgroups %}
{% import_yaml 'firewall/assigned_hostgroups.local.map.yaml' as local_assigned_hostgroups %}
{% if local_assigned_hostgroups.role[role] %}
{% set assigned_hostgroups = salt['defaults.merge'](local_assigned_hostgroups.role[role], default_assigned_hostgroups.role[role], merge_lists=False, in_place=False) %}
{% else %}
{% set assigned_hostgroups = default_assigned_hostgroups.role[role] %}
{% endif %}
{% if translated_pillar_assigned_hostgroups %}
{% do salt['defaults.merge'](assigned_hostgroups, translated_pillar_assigned_hostgroups, merge_lists=True, in_place=True) %}
{% endif %}

View File

@@ -0,0 +1,87 @@
firewall:
aliases:
ports:
all:
tcp:
- '0:65535'
udp:
- '0:65535'
acng:
tcp:
- 3142
beats_5044:
tcp:
- 5044
beats_5644:
tcp:
- 5644
cortex:
tcp:
- 9001
cortex_es_node:
tcp:
- 9500
cortex_es_rest:
tcp:
- 9400
docker_registry:
tcp:
- 5000
elasticsearch_node:
tcp:
- 9300
elasticsearch_rest:
tcp:
- 9200
fleet_api:
tcp:
- 8090
fleet_webui:
tcp:
- 443
influxdb:
tcp:
- 8086
kibana:
tcp:
- 5601
mysql:
tcp:
- 3306
navigator:
tcp:
- 4200
nginx:
tcp:
- 80
- 443
osquery_8080:
tcp:
- 8080
playbook:
tcp:
- 3200
redis:
tcp:
- 6379
salt_master:
tcp:
- 4505
- 4506
sensoroni:
tcp:
- 443
ssh:
tcp:
- 22
syslog:
tcp:
- 514
wazuh_minion:
tcp:
- 55000
wazuh_endpoint:
tcp:
- 1514
udp:
- 1514

View File

@@ -1,4 +1,4 @@
{%- set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') -%}
{%- set BROVER = salt['pillar.get']('static:broversion', '') -%}
{%- set WAZUH = salt['pillar.get']('static:wazuh', '0') -%}
{%- set THEHIVE = salt['pillar.get']('master:thehive', '0') -%}
{%- set PLAYBOOK = salt['pillar.get']('master:playbook', '0') -%}

View File

@@ -774,17 +774,15 @@ fireeye_pillar() {
# Generate Firewall Templates
firewall_generate_templates() {
local firewall_pillar_path=$local_salt_dir/pillar/firewall
local firewall_pillar_path=$local_salt_dir/salt/firewall
mkdir -p "$firewall_pillar_path"
for i in analyst beats_endpoint forward_nodes masterfw minions osquery_endpoint search_nodes wazuh_endpoint
do
printf '%s\n'\
"$i:"\
" - 127.0.0.1"\
"" > "$firewall_pillar_path"/$i.sls
echo "Added $i Template"
cp ../files/firewall/* /opt/so/saltstack/local/salt/firewall/ >> "$setup_log" 2>&1
for i in analyst beats_endpoint sensor master minion osquery_endpoint search_node wazuh_endpoint; do
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost "$i" 127.0.0.1
done
}
fleet_pillar() {
@@ -1415,18 +1413,21 @@ set_initial_firewall_policy() {
set_main_ip
if [ -f $default_salt_dir/pillar/data/addtotab.sh ]; then chmod +x $default_salt_dir/pillar/data/addtotab.sh; fi
if [ -f $default_salt_dir/pillar/firewall/addfirewall.sh ]; then chmod +x $default_salt_dir/pillar/firewall/addfirewall.sh; fi
if [ -f $default_salt_dir/salt/common/tools/sbin/so-firewall ]; then chmod +x $default_salt_dir/salt/common/tools/sbin/so-firewall; fi
case "$install_type" in
'MASTER')
printf " - %s\n" "$MAINIP" | tee -a $local_salt_dir/pillar/firewall/minions.sls $local_salt_dir/pillar/firewall/masterfw.sls
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost master "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
salt-call state.apply -l info firewall >> $setup_log 2>&1
$default_salt_dir/pillar/data/addtotab.sh mastertab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;;
'EVAL' | 'MASTERSEARCH')
printf " - %s\n" "$MAINIP" | tee -a $local_salt_dir/pillar/firewall/minions.sls\
$local_salt_dir/pillar/firewall/masterfw.sls\
$local_salt_dir/pillar/firewall/forward_nodes.sls\
$local_salt_dir/pillar/firewall/search_nodes.sls
'EVAL' | 'MASTERSEARCH' | 'STANDALONE')
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost master "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost search_node "$MAINIP"
salt-call state.apply -l info firewall >> $setup_log 2>&1
case "$install_type" in
'EVAL')
$default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" bond0 True
@@ -1437,24 +1438,28 @@ set_initial_firewall_policy() {
esac
;;
'HELIXSENSOR')
printf " - %s\n" "$MAINIP" | tee -a $local_salt_dir/pillar/firewall/minions.sls\
$local_salt_dir/pillar/firewall/masterfw.sls\
$local_salt_dir/pillar/firewall/forward_nodes.sls
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost master "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP"
salt-call state.apply -l info firewall >> $setup_log 2>&1
;;
'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'FLEET')
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/firewall/addfirewall.sh minions "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
case "$install_type" in
'SENSOR')
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/firewall/addfirewall.sh forward_nodes "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-call state.apply -l info firewall
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" bond0
;;
'SEARCHNODE')
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/firewall/addfirewall.sh search_nodes "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost search_node "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-call state.apply -l info firewall
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;;
'HEAVYNODE')
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/firewall/addfirewall.sh forward_nodes "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/firewall/addfirewall.sh search_nodes "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost search_node "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-call state.apply -l info firewall
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" bond0
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;;
@@ -1534,7 +1539,7 @@ update_sudoers() {
if ! grep -qE '^soremote\ ALL=\(ALL\)\ NOPASSWD:(\/usr\/bin\/salt\-key|\/opt\/so\/saltstack)' /etc/sudoers; then
# Update Sudoers so that soremote can accept keys without a password
echo "soremote ALL=(ALL) NOPASSWD:/usr/bin/salt-key" | tee -a /etc/sudoers
echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/pillar/firewall/addfirewall.sh" | tee -a /etc/sudoers
echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/salt/common/tools/sbin/so-firewall" | tee -a /etc/sudoers
echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/pillar/data/addtotab.sh" | tee -a /etc/sudoers
echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/salt/master/files/add_minion.sh" | tee -a /etc/sudoers
else