Compare commits

...

84 Commits

Author SHA1 Message Date
Mike Reeves
2f1e6fd625 Merge pull request #14773 from Security-Onion-Solutions/2.4/dev
2.4.160
2025-06-25 13:49:06 -04:00
Mike Reeves
6b8ef43cc1 Merge pull request #14772 from Security-Onion-Solutions/2.4.160
2.4.160
2025-06-25 13:02:06 -04:00
Mike Reeves
7e746b87c5 2.4.160 2025-06-25 13:00:26 -04:00
Josh Patterson
2ad2a3110c Merge pull request #14771 from Security-Onion-Solutions/revert-14770-saltupgradechange
Revert "change salt upgrade process"
2025-06-25 12:21:00 -04:00
Josh Patterson
bc24a6c574 Revert "change salt upgrade process" 2025-06-25 12:19:45 -04:00
Josh Patterson
b25bb0faf0 Merge pull request #14770 from Security-Onion-Solutions/saltupgradechange
change salt upgrade process
2025-06-25 11:31:57 -04:00
Josh Patterson
38c74b46b6 change salt upgrade process 2025-06-25 11:05:28 -04:00
Jason Ertel
83ecc02589 Merge pull request #14765 from Security-Onion-Solutions/jertel/wip
fix logging
2025-06-24 11:05:19 -04:00
Jason Ertel
21d9964827 fix logging 2025-06-24 11:03:08 -04:00
Jason Ertel
f3b6d9febb Merge pull request #14764 from Security-Onion-Solutions/jertel/wip
refactor airgap playbook to eliminate dupe code and shrink ISO
2025-06-24 09:39:43 -04:00
Jason Ertel
b052a75e64 refactor airgap playbook to eliminate dupe code and shrink ISO 2025-06-24 09:34:57 -04:00
Josh Brower
6fc7c930a6 Merge pull request #14759 from Security-Onion-Solutions/2.4/fieldmappings
Add support for dns.resolved_ip
2025-06-20 15:08:05 -04:00
Josh Brower
31cd5b1365 Add support for dns.resolved_ip 2025-06-20 15:02:59 -04:00
Josh Brower
92e9bd43ca Merge pull request #14723 from Security-Onion-Solutions/2.4/airgapfix
Create dir if needed
2025-06-09 07:47:59 -04:00
Josh Brower
a600c64229 Create dir if needed 2025-06-09 07:33:02 -04:00
Josh Brower
121dec0180 Merge pull request #14722 from Security-Onion-Solutions/2.4/airgapfix
Add nsm bind
2025-06-08 12:30:58 -04:00
Josh Brower
b451c4c034 Merge pull request #14721 from Security-Onion-Solutions/2.4/SupExtraction
Supress alerts
2025-06-08 12:25:35 -04:00
Josh Brower
dbdbffa4b0 Add nsm bind 2025-06-08 08:23:09 -04:00
Josh Brower
f360c6ecbc Supress alerts 2025-06-07 09:29:59 -04:00
Josh Brower
b9ea151846 Merge pull request #14719 from Security-Onion-Solutions/2.4/playbookairgap
Airgap tweaks
2025-06-06 17:52:08 -04:00
Josh Brower
b428573a0a Airgap tweaks 2025-06-06 17:48:49 -04:00
Josh Brower
350e1c9d91 Merge pull request #14718 from Security-Onion-Solutions/2.4/playbookairgap
Add support for Airgap for Playbooks
2025-06-06 16:55:32 -04:00
Josh Brower
a3b5db5945 Add support for Airgap for Playbooks 2025-06-06 16:17:14 -04:00
Jason Ertel
aca54b4645 Merge pull request #14714 from Security-Onion-Solutions/jertel/wip
enable STS for browser redirects
2025-06-05 18:48:46 -04:00
Jason Ertel
643afeeae7 enable STS for browser redirects 2025-06-05 16:02:27 -04:00
Jason Ertel
43e994f2c2 Merge pull request #14711 from Security-Onion-Solutions/jertel/wip
update to new config location
2025-06-04 17:22:13 -04:00
Jason Ertel
ab89858d04 update to new config location 2025-06-04 17:19:53 -04:00
Jason Ertel
3da2c7cabc Merge pull request #14701 from Security-Onion-Solutions/jertel/wip
upgrade registry to 3.0.0
2025-06-04 09:22:03 -04:00
Jason Ertel
832d66052e upgrade registry to 3.0.0 2025-06-04 09:13:54 -04:00
coreyogburn
add538f6dd Merge pull request #14700 from Security-Onion-Solutions/cogburn/new-playbooks-repo
Updated Playbook Repo Config
2025-06-03 14:21:23 -06:00
Corey Ogburn
fc9107f129 Updated Playbook Repo Config
The repo and folder have changed. We're splitting out playbooks into their own repo: github.com/security-onion-solutions/securityonion-resources-playbooks.
2025-06-03 13:33:30 -06:00
Jorge Reyes
d9790b04f6 Merge pull request #14676 from Security-Onion-Solutions/reyesj2/fixsystemtime
fix system integration time overwrite and delete unused ingest pipeline
2025-06-03 14:01:42 -05:00
Jorge Reyes
88fa04b0f6 Merge pull request #14698 from Security-Onion-Solutions/reyesj2/esidxinfo
add so-elasticsearch-index-growth
2025-06-03 09:37:54 -05:00
reyesj2
d240fca721 remove usage of temp file 2025-06-03 08:45:04 -05:00
reyesj2
4d6171bde6 rename script
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2025-06-03 07:32:12 -05:00
reyesj2
6238a5b3ed tighten up search timeframe 2025-06-02 16:31:26 -05:00
reyesj2
061600fa7a shebang line 2025-06-02 15:55:46 -05:00
reyesj2
1b89cc6818 so-elasticsearch-index-growth script 2025-06-02 15:41:03 -05:00
Josh Brower
7f8bf850a2 Merge pull request #14697 from Security-Onion-Solutions/2.4/playbook-updates
Use Stable branch
2025-06-02 13:13:43 -04:00
Josh Brower
0277891392 Use Stable branch 2025-06-02 13:10:13 -04:00
Doug Burks
773606d876 Merge pull request #14691 from Security-Onion-Solutions/dougburks-patch-1
add echo to end of so-elasticsearch-ilm-start and so-elasticsearch-ilm-stop
2025-05-30 12:03:32 -04:00
Doug Burks
bf38055a6c add echo to end of so-elasticsearch-ilm-stop 2025-05-30 11:41:50 -04:00
Doug Burks
90b8d6b2f7 add echo to end of so-elasticsearch-ilm-start 2025-05-30 11:41:11 -04:00
Doug Burks
2d78fa1a41 Merge pull request #14689 from Security-Onion-Solutions/dougburks-patch-1
FIX: so-elasticsearch-ilm-start needs shebang #14688
2025-05-30 09:58:18 -04:00
Doug Burks
45d541d4f2 FIX: so-elasticsearch-ilm-start needs shebang #14688 2025-05-30 09:55:53 -04:00
Doug Burks
8d42739030 Merge pull request #14687 from Security-Onion-Solutions/dougburks-patch-1
FIX: so-suricata-testrule should disable pcap logging #14685
2025-05-30 09:26:37 -04:00
Doug Burks
27358137f2 FIX: so-suricata-testrule should disable pcap logging #14685 2025-05-30 09:24:41 -04:00
Doug Burks
a54b9ddbe4 Merge pull request #14683 from Security-Onion-Solutions/dougburks-patch-1
FIX: Improve annotation for Elasticsearch index deletion #14682
2025-05-29 15:26:35 -04:00
Doug Burks
58936b31d5 FIX: Improve annotation for Elasticsearch index deletion #14682 2025-05-29 15:19:21 -04:00
reyesj2
fcdacc3b0d fix system integration time overwrite and delete unused ingest pipeline 2025-05-29 12:21:28 -05:00
Jorge Reyes
9df9cc2247 Merge pull request #14668 from Security-Onion-Solutions/reyesj2-patch-1
use zeek network.community_id when available
2025-05-28 12:15:18 -05:00
Jorge Reyes
d3ee5ed7b8 use zeek network.community_id when available 2025-05-28 09:20:41 -05:00
Jason Ertel
db08ac9022 Merge pull request #14651 from Security-Onion-Solutions/jertel/mhf
Backport Hotfix to dev
2025-05-22 13:44:36 -04:00
Jason Ertel
ad5a27f991 clear out hf 2025-05-22 13:39:59 -04:00
Mike Reeves
07ec302267 Merge pull request #14650 from Security-Onion-Solutions/hotfix/2.4.150
Hotfix 2.4.150
2025-05-22 13:35:33 -04:00
Mike Reeves
112704e340 Merge pull request #14649 from Security-Onion-Solutions/hf24150
2.4.150 Hotfix
2025-05-22 13:25:50 -04:00
Mike Reeves
e6753440f8 2.4.150 Hotfix 2025-05-22 13:18:13 -04:00
Mike Reeves
00f811ce31 Merge pull request #14646 from Security-Onion-Solutions/hotfix4150
Update HOTFIX
2025-05-21 14:38:00 -04:00
Mike Reeves
ddd023c69a Update so-docker-prune 2025-05-21 13:47:45 -04:00
Mike Reeves
2911025c0c Update HOTFIX 2025-05-21 13:45:32 -04:00
Josh Brower
2e8ab648fd Merge pull request #14643 from Security-Onion-Solutions/2.4/parsingfix
Tighten parsing
2025-05-21 12:08:10 -04:00
Josh Brower
b753d40861 Tighten parsing 2025-05-20 17:06:11 -04:00
Josh Brower
2fff6232c1 Merge pull request #14638 from Security-Onion-Solutions/2.4/playbooks-parsing
Add parsing for Playbook
2025-05-19 18:06:05 -04:00
coreyogburn
f751c82e1c Merge pull request #14639 from Security-Onion-Solutions/cogburn/ruleset-name
Add RulesetName to Rule Repos
2025-05-19 15:40:02 -06:00
Corey Ogburn
39f74fe547 Use the new JSON object editor for RulesRepos config entries 2025-05-19 15:38:45 -06:00
Corey Ogburn
11fb33fdeb Add RulesetName to Rule Repos
Fill in `rulesetName` in the rules repos of the ElastAlert and Strelka engines. These will act as an example to anybody adding their repos to these lists. The field is not required, but helps avoid collisions when managing repos as the value is used for the folder name. When not present, the final folder of the repo url is used as the rulesetName and as the folder name on disk.

Note that rulesetNames including a `/` will create extra folders in the path but the rulesetName will contain the slash, i.e. `rulesetName="joesecurity/sigma-rules"` will create the nested structure of `reposFolder/joesecurity/sigma-rules" containing the contents of the repo. All rules imported from this repo will have the ruleset of `joesecurity/sigma-rules`.
2025-05-19 14:19:56 -06:00
Josh Brower
58f4db95ea Create playbooks dir 2025-05-19 15:31:50 -04:00
Josh Brower
b55cb257b6 Add parsing for Playbook 2025-05-19 13:25:27 -04:00
Jorge Reyes
2948577b0e Merge pull request #14629 from Security-Onion-Solutions/reyesj2-wt2
logstash isn't running on receivers or manager when kafka is the glob…
2025-05-16 10:27:18 -05:00
reyesj2
870a9ff80c dedup 2025-05-16 10:24:09 -05:00
reyesj2
689db57f5f logstash isn't running on receivers or manager when kafka is the global.pipeline 2025-05-16 10:05:38 -05:00
coreyogburn
2768722132 Merge pull request #14623 from Security-Onion-Solutions/cogburn/playbooks
Cogburn/playbooks
2025-05-15 13:27:02 -06:00
Josh Brower
df103b3dca Spacing 2025-05-14 16:36:59 -04:00
Josh Brower
0542c77137 Remove wip config 2025-05-14 16:35:09 -04:00
Josh Brower
9022dc24fb Add Parsing for Playbooks 2025-05-14 13:19:50 -06:00
Corey Ogburn
78b7068638 Playbook Settings
Map a folder from the manager's soc config folder to soc's sensoroni folder for storing the playbook repo.

Added playbook module section with default values.
2025-05-14 13:19:49 -06:00
Mike Reeves
70339b9a94 Merge pull request #14621 from Security-Onion-Solutions/TOoSmOotH-patch-1
Update soup
2025-05-14 13:48:53 -04:00
Mike Reeves
5c8460fd26 Update soup 2025-05-14 13:47:26 -04:00
Mike Reeves
69e90e1e70 Update soup
Souper Duper!
2025-05-14 13:41:08 -04:00
Jason Ertel
8c5ea19d3c Merge pull request #14619 from Security-Onion-Solutions/jertel/wip
improve consistency
2025-05-14 09:31:56 -04:00
Jason Ertel
82562f89f6 improve consistency 2025-05-14 09:23:35 -04:00
Mike Reeves
ede36b5ef8 Merge pull request #14614 from Security-Onion-Solutions/TOoSmOotH-patch-1
Get ready for .160
2025-05-12 10:49:46 -04:00
Mike Reeves
fd00a4db85 Update VERSION 2025-05-12 10:48:52 -04:00
Mike Reeves
510c7a0c19 Update 2-4.yml 2025-05-12 10:48:12 -04:00
34 changed files with 509 additions and 116 deletions

View File

@@ -28,6 +28,7 @@ body:
- 2.4.140
- 2.4.141
- 2.4.150
- 2.4.160
- Other (please provide detail below)
validations:
required: true

View File

@@ -1,17 +1,17 @@
### 2.4.150-20250512 ISO image released on 2025/05/12
### 2.4.160-20250625 ISO image released on 2025/06/25
### Download and Verify
2.4.150-20250512 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.150-20250512.iso
2.4.160-20250625 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.160-20250625.iso
MD5: 7A7469A7A38EA9A2DB770C36AE36A0CA
SHA1: 7E768D515353F339DC536DED6207B786DAFF7D27
SHA256: F8B2EB6B332F2367F0C097D211577565C8FB5CC7809E97D63687C634035B3699
MD5: 78CF5602EFFAB84174C56AD2826E6E4E
SHA1: FC7EEC3EC95D97D3337501BAA7CA8CAE7C0E15EA
SHA256: 0ED965E8BEC80EE16AE90A0F0F96A3046CEF2D92720A587278DDDE3B656C01C2
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.150-20250512.iso.sig
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.160-20250625.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.150-20250512.iso.sig
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.160-20250625.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.150-20250512.iso
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.160-20250625.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.4.150-20250512.iso.sig securityonion-2.4.150-20250512.iso
gpg --verify securityonion-2.4.160-20250625.iso.sig securityonion-2.4.160-20250625.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Fri 09 May 2025 06:27:29 PM EDT using RSA key ID FE507013
gpg: Signature made Wed 25 Jun 2025 10:13:33 AM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.

View File

@@ -1 +1 @@
2.4.150
2.4.160

View File

@@ -99,6 +99,17 @@ add_interface_bond0() {
fi
}
airgap_playbooks() {
SRC_DIR=$1
# Copy playbooks if using airgap
mkdir -p /nsm/airgap-resources
# Purge old airgap playbooks to ensure SO only uses the latest released playbooks
rm -fr /nsm/airgap-resources/playbooks
tar xf $SRC_DIR/airgap-resources/playbooks.tgz -C /nsm/airgap-resources/
chown -R socore:socore /nsm/airgap-resources/playbooks
git config --global --add safe.directory /nsm/airgap-resources/playbooks
}
check_container() {
docker ps | grep "$1:" > /dev/null 2>&1
return $?
@@ -299,7 +310,8 @@ fail() {
get_agent_count() {
if [ -f /opt/so/log/agents/agentstatus.log ]; then
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}')
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}' | sed 's/,//')
[[ -z "$AGENTCOUNT" ]] && AGENTCOUNT="0"
else
AGENTCOUNT=0
fi

View File

@@ -45,7 +45,7 @@ def check_for_fps():
result = subprocess.run([feat_full + '-mode-setup', '--is-enabled'], stdout=subprocess.PIPE)
if result.returncode == 0:
fps = 1
except FileNotFoundError:
except:
fn = '/proc/sys/crypto/' + feat_full + '_enabled'
try:
with open(fn, 'r') as f:

View File

@@ -4,22 +4,16 @@
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
import sys, argparse, re, docker
import sys, argparse, re, subprocess, json
from packaging.version import Version, InvalidVersion
from itertools import groupby, chain
def get_image_name(string) -> str:
return ':'.join(string.split(':')[:-1])
def get_so_image_basename(string) -> str:
return get_image_name(string).split('/so-')[-1]
def get_image_version(string) -> str:
ver = string.split(':')[-1]
if ver == 'latest':
@@ -35,56 +29,75 @@ def get_image_version(string) -> str:
return '999999.9.9'
return ver
def run_command(command):
process = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if process.returncode != 0:
print(f"Error executing command: {command}", file=sys.stderr)
print(f"Error message: {process.stderr}", file=sys.stderr)
exit(1)
return process.stdout
def main(quiet):
client = docker.from_env()
# Prune old/stopped containers
if not quiet: print('Pruning old containers')
client.containers.prune()
image_list = client.images.list(filters={ 'dangling': False })
# Map list of image objects to flattened list of tags (format: "name:version")
tag_list = list(chain.from_iterable(list(map(lambda x: x.attrs.get('RepoTags'), image_list))))
# Filter to only SO images (base name begins with "so-")
tag_list = list(filter(lambda x: re.match(r'^.*\/so-[^\/]*$', get_image_name(x)), tag_list))
# Group tags into lists by base name (sort by same projection first)
tag_list.sort(key=lambda x: get_so_image_basename(x))
grouped_tag_lists = [ list(it) for _, it in groupby(tag_list, lambda x: get_so_image_basename(x)) ]
no_prunable = True
for t_list in grouped_tag_lists:
try:
# Group tags by version, in case multiple images exist with the same version string
t_list.sort(key=lambda x: Version(get_image_version(x)), reverse=True)
grouped_t_list = [ list(it) for _,it in groupby(t_list, lambda x: get_image_version(x)) ]
# Keep the 2 most current version groups
if len(grouped_t_list) <= 2:
continue
else:
no_prunable = False
for group in grouped_t_list[2:]:
for tag in group:
if not quiet: print(f'Removing image {tag}')
# Prune old/stopped containers using docker CLI
if not quiet: print('Pruning old containers')
run_command('docker container prune -f')
# Get list of images using docker CLI
images_json = run_command('docker images --format "{{json .}}"')
# Parse the JSON output
image_list = []
for line in images_json.strip().split('\n'):
if line: # Skip empty lines
image_list.append(json.loads(line))
# Extract tags in the format "name:version"
tag_list = []
for img in image_list:
# Skip dangling images
if img.get('Repository') != "<none>" and img.get('Tag') != "<none>":
tag = f"{img.get('Repository')}:{img.get('Tag')}"
# Filter to only SO images (base name begins with "so-")
if re.match(r'^.*\/so-[^\/]*$', get_image_name(tag)):
tag_list.append(tag)
# Group tags into lists by base name (sort by same projection first)
tag_list.sort(key=lambda x: get_so_image_basename(x))
grouped_tag_lists = [list(it) for k, it in groupby(tag_list, lambda x: get_so_image_basename(x))]
no_prunable = True
for t_list in grouped_tag_lists:
try:
client.images.remove(tag, force=True)
except docker.errors.ClientError as e:
print(f'Could not remove image {tag}, continuing...')
except (docker.errors.APIError, InvalidVersion) as e:
print(f'so-{get_so_image_basename(t_list[0])}: {e}', file=sys.stderr)
exit(1)
# Group tags by version, in case multiple images exist with the same version string
t_list.sort(key=lambda x: Version(get_image_version(x)), reverse=True)
grouped_t_list = [list(it) for k, it in groupby(t_list, lambda x: get_image_version(x))]
# Keep the 2 most current version groups
if len(grouped_t_list) <= 2:
continue
else:
no_prunable = False
for group in grouped_t_list[2:]:
for tag in group:
if not quiet: print(f'Removing image {tag}')
try:
run_command(f'docker rmi -f {tag}')
except Exception as e:
print(f'Could not remove image {tag}, continuing...')
except (InvalidVersion) as e:
print(f'so-{get_so_image_basename(t_list[0])}: {e}', file=sys.stderr)
exit(1)
except Exception as e:
print('Unhandled exception occurred:')
print(f'so-{get_so_image_basename(t_list[0])}: {e}', file=sys.stderr)
exit(1)
if no_prunable and not quiet:
print('No Security Onion images to prune')
except Exception as e:
print('Unhandled exception occurred:')
print(f'so-{get_so_image_basename(t_list[0])}: {e}', file=sys.stderr)
exit(1)
if no_prunable and not quiet:
print('No Security Onion images to prune')
print(f"Error: {e}", file=sys.stderr)
exit(1)
if __name__ == "__main__":
main_parser = argparse.ArgumentParser(add_help=False)

View File

@@ -19,11 +19,12 @@
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
{ "set": { "if": "ctx.tags != null && ctx.tags.contains('import')", "override": true, "field": "data_stream.dataset", "value": "import" } },
{ "set": { "if": "ctx.tags != null && ctx.tags.contains('import')", "override": true, "field": "data_stream.namespace", "value": "so" } },
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
{"append": {"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"if":"ctx?.event?.dataset == 'endpoint.events.network' && ctx?.source?.ip != null","ignore_failure":true}},
{"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint'","description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}},
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp", "datastream_dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
]
}

View File

@@ -1,11 +0,0 @@
{
"description" : "import.wel",
"processors" : [
{ "set": { "field": "event.ingested", "value": "{{ @timestamp }}" } },
{ "set" : { "field" : "@timestamp", "value" : "{{ event.created }}" } },
{ "remove": { "field": [ "event_record_id", "event.created" , "timestamp" , "winlog.event_data.UtcTime" ], "ignore_failure": true } },
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
{ "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -9,6 +9,7 @@
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.uuid", "ignore_failure": true } },
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.signature", "ignore_failure": true } },
{ "rename":{ "field": "message2.payload_printable", "target_field": "network.data.decoded", "ignore_failure": true } },
{ "dissect": { "field": "rule.rule", "pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}", "ignore_missing": true, "ignore_failure": true } },
{ "pipeline": { "name": "common.nids" } }
]
}
}

View File

@@ -18,6 +18,13 @@
{ "set": { "field": "event.ingested", "value": "{{@timestamp}}" } },
{ "date": { "field": "message2.timestamp", "target_field": "@timestamp", "formats": ["ISO8601", "UNIX"], "timezone": "UTC", "ignore_failure": true } },
{ "remove":{ "field": "agent", "ignore_failure": true } },
{"append":{"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"ignore_failure":true}},
{
"script": {
"source": "boolean isPrivate(def ip) { if (ip == null) return false; int dot1 = ip.indexOf('.'); if (dot1 == -1) return false; int dot2 = ip.indexOf('.', dot1 + 1); if (dot2 == -1) return false; int first = Integer.parseInt(ip.substring(0, dot1)); if (first == 10) return true; if (first == 192 && ip.startsWith('168.', dot1 + 1)) return true; if (first == 172) { int second = Integer.parseInt(ip.substring(dot1 + 1, dot2)); return second >= 16 && second <= 31; } return false; } String[] fields = new String[] {\"source\", \"destination\"}; for (int i = 0; i < fields.length; i++) { def field = fields[i]; def ip = ctx[field]?.ip; if (ip != null) { if (ctx.network == null) ctx.network = new HashMap(); if (isPrivate(ip)) { if (ctx.network.private_ip == null) ctx.network.private_ip = new ArrayList(); if (!ctx.network.private_ip.contains(ip)) ctx.network.private_ip.add(ip); } else { if (ctx.network.public_ip == null) ctx.network.public_ip = new ArrayList(); if (!ctx.network.public_ip.contains(ip)) ctx.network.public_ip.add(ip); } } }",
"ignore_failure": false
}
},
{ "pipeline": { "if": "ctx?.event?.dataset != null", "name": "suricata.{{event.dataset}}" } }
]
}

View File

@@ -12,7 +12,8 @@
{ "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } },
{ "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } },
{ "rename": { "field": "message2.id.resp_p", "target_field": "destination.port", "ignore_missing": true } },
{ "community_id": {} },
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
{ "community_id": { "if": "ctx.network?.community_id == null" } },
{ "set": { "if": "ctx.source?.ip != null", "field": "client.ip", "value": "{{source.ip}}" } },
{ "set": { "if": "ctx.source?.port != null", "field": "client.port", "value": "{{source.port}}" } },
{ "set": { "if": "ctx.destination?.ip != null", "field": "server.ip", "value": "{{destination.ip}}" } },

View File

@@ -20,7 +20,8 @@
{ "rename": { "field": "message2.RD", "target_field": "dns.recursion.desired", "ignore_missing": true } },
{ "rename": { "field": "message2.RA", "target_field": "dns.recursion.available", "ignore_missing": true } },
{ "rename": { "field": "message2.Z", "target_field": "dns.reserved", "ignore_missing": true } },
{ "rename": { "field": "message2.answers", "target_field": "dns.answers.name", "ignore_missing": true } },
{ "rename": { "field": "message2.answers", "target_field": "dns.answers.name", "ignore_missing": true } },
{ "script": { "lang": "painless", "if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null", "source": "def ips = []; for (item in ctx.dns.answers.name) { if (item =~ /^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$/ || item =~ /^([a-fA-F0-9:]+:+)+[a-fA-F0-9]+$/) { ips.add(item); } } ctx.dns.resolved_ip = ips;" } },
{ "rename": { "field": "message2.TTLs", "target_field": "dns.ttls", "ignore_missing": true } },
{ "rename": { "field": "message2.rejected", "target_field": "dns.query.rejected", "ignore_missing": true } },
{ "script": { "lang": "painless", "source": "ctx.dns.query.length = ctx.dns.query.name.length()", "ignore_failure": true } },
@@ -28,4 +29,4 @@
{ "pipeline": { "if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
{ "pipeline": { "name": "zeek.common" } }
]
}
}

View File

@@ -12,7 +12,7 @@ elasticsearch:
description: Specify the memory heap size in (m)egabytes for Elasticsearch.
helpLink: elasticsearch.html
index_clean:
description: Determines if indices should be considered for deletion by available disk space in the cluster. Otherwise, indices will only be deleted by the age defined in the ILM settings.
description: Determines if indices should be considered for deletion by available disk space in the cluster. Otherwise, indices will only be deleted by the age defined in the ILM settings. This setting only applies to EVAL, STANDALONE, and HEAVY NODE installations. Other installations can only use ILM settings.
forcedType: bool
helpLink: elasticsearch.html
retention:

View File

@@ -1,4 +1,4 @@
/bin/bash
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
@@ -6,6 +6,6 @@
. /usr/sbin/so-common
echo "Starting ILM..."
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -X POST https://localhost:9200/_ilm/start
echo

View File

@@ -8,3 +8,4 @@
echo "Stopping ILM..."
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -X POST https://localhost:9200/_ilm/stop
echo

View File

@@ -0,0 +1,113 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
INFLUX_URL="https://localhost:8086/api/v2"
. /usr/sbin/so-common
request() {
curl -skK /opt/so/conf/influxdb/curl.config "$INFLUX_URL/$@"
}
lookup_org_id() {
response=$(request orgs?org=Security+Onion)
echo "$response" | jq -r ".orgs[] | select(.name == \"Security Onion\").id"
}
ORG_ID=$(lookup_org_id)
run_flux_query() {
local query=$1
request "query?org=$ORG_ID" -H 'Accept:application/csv' -H 'Content-type:application/vnd.flux' -d "$query" -XPOST 2>/dev/null
}
read_csv_result() {
local result="$1"
echo "$result" | grep '^,_result,' | head -1 | awk -F',' '{print $NF}' | tr -d '\r\n\t '
}
bytes_to_gb() {
local bytes="${1:-0}"
if [[ "$bytes" =~ ^-?[0-9]+$ ]]; then
echo "$bytes" | awk '{printf "%.2f", $1 / 1024 / 1024 / 1024}'
else
echo "0.00"
fi
}
indexes_query='from(bucket: "telegraf/so_long_term")
|> range(start: -7d)
|> filter(fn: (r) => r._measurement == "elasticsearch_index_size")
|> distinct(column: "_field")
|> keep(columns: ["_field"])'
indexes_result=$(run_flux_query "$indexes_query")
indexes=$(echo "$indexes_result" | tail -n +2 | cut -d',' -f4 | grep -v '^$' | grep -v '^_field$' | sed 's/\r$//' | sort -u)
printf "%-50s %15s %15s %15s\n" "Index Name" "Last 24hr (GB)" "Last 7d (GB)" "Last 30d (GB)"
printf "%-50s %15s %15s %15s\n" "$(printf '%.0s-' {1..50})" "$(printf '%.0s-' {1..15})" "$(printf '%.0s-' {1..15})" "$(printf '%.0s-' {1..15})"
for index in $indexes; do
[[ -z "$index" ]] && continue
current_query="from(bucket: \"telegraf/so_long_term\")
|> range(start: -4h)
|> filter(fn: (r) => r._measurement == \"elasticsearch_index_size\" and r._field == \"$index\")
|> last()
|> keep(columns: [\"_value\"])"
current_result=$(run_flux_query "$current_query")
current_size=$(read_csv_result "$current_result")
current_size=${current_size:-0}
size_24h_query="from(bucket: \"telegraf/so_long_term\")
|> range(start: -25h, stop: -23h)
|> filter(fn: (r) => r._measurement == \"elasticsearch_index_size\" and r._field == \"$index\")
|> last()
|> keep(columns: [\"_value\"])"
size_24h_result=$(run_flux_query "$size_24h_query")
size_24h_ago=$(read_csv_result "$size_24h_result")
size_24h_ago=${size_24h_ago:-$current_size}
size_7d_query="from(bucket: \"telegraf/so_long_term\")
|> range(start: -7d8h, stop: -7d)
|> filter(fn: (r) => r._measurement == \"elasticsearch_index_size\" and r._field == \"$index\")
|> last()
|> keep(columns: [\"_value\"])"
size_7d_result=$(run_flux_query "$size_7d_query")
size_7d_ago=$(read_csv_result "$size_7d_result")
size_7d_ago=${size_7d_ago:-$current_size}
size_30d_query="from(bucket: \"telegraf/so_long_term\")
|> range(start: -30d8h, stop: -30d)
|> filter(fn: (r) => r._measurement == \"elasticsearch_index_size\" and r._field == \"$index\")
|> last()
|> keep(columns: [\"_value\"])"
size_30d_result=$(run_flux_query "$size_30d_query")
size_30d_ago=$(read_csv_result "$size_30d_result")
size_30d_ago=${size_30d_ago:-$current_size}
# if an index was recently cleaned up by ilm it will result in a negative number for 'index growth'.
growth_24h=$(( current_size > size_24h_ago ? current_size - size_24h_ago : 0 ))
growth_7d=$(( current_size > size_7d_ago ? current_size - size_7d_ago : 0 ))
growth_30d=$(( current_size > size_30d_ago ? current_size - size_30d_ago : 0 ))
growth_24h_gb=$(bytes_to_gb "$growth_24h")
growth_7d_gb=$(bytes_to_gb "$growth_7d")
growth_30d_gb=$(bytes_to_gb "$growth_30d")
# Only results for indices with atleast 1 metric above 0.00
if [[ "$growth_24h_gb" != "0.00" ]] || [[ "$growth_7d_gb" != "0.00" ]] || [[ "$growth_30d_gb" != "0.00" ]]; then
printf "%020.2f|%-50s %15s %15s %15s\n" \
"$growth_24h" \
"$index" \
"$growth_24h_gb" \
"$growth_7d_gb" \
"$growth_30d_gb"
fi
done | sort -t'|' -k1,1nr | cut -d'|' -f2-

View File

@@ -1,26 +1,26 @@
# Extract all PDF mime type
alert http any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; sid:1100000; rev:1;)
alert smtp any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; sid:1100001; rev:1;)
alert nfs any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; sid:1100002; rev:1;)
alert smb any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; sid:1100003; rev:1;)
alert http any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; noalert; sid:1100000; rev:1;)
alert smtp any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; noalert; sid:1100001; rev:1;)
alert nfs any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; noalert; sid:1100002; rev:1;)
alert smb any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; noalert; sid:1100003; rev:1;)
# Extract EXE/DLL file types
alert http any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; sid:1100004; rev:1;)
alert smtp any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; sid:1100005; rev:1;)
alert nfs any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; sid:1100006; rev:1;)
alert smb any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; sid:1100007; rev:1;)
alert http any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; sid:1100008; rev:1;)
alert smtp any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; sid:1100009; rev:1;)
alert nfs any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; sid:1100010; rev:1;)
alert smb any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; sid:1100011; rev:1;)
alert http any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; noalert; sid:1100004; rev:1;)
alert smtp any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; noalert; sid:1100005; rev:1;)
alert nfs any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; noalert; sid:1100006; rev:1;)
alert smb any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; noalert; sid:1100007; rev:1;)
alert http any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; noalert; sid:1100008; rev:1;)
alert smtp any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; noalert; sid:1100009; rev:1;)
alert nfs any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; noalert; sid:1100010; rev:1;)
alert smb any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; noalert; sid:1100011; rev:1;)
# Extract all Zip files
alert http any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; sid:1100012; rev:1;)
alert smtp any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; sid:1100013; rev:1;)
alert nfs any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; sid:1100014; rev:1;)
alert smb any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; sid:1100015; rev:1;)
alert http any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; noalert; sid:1100012; rev:1;)
alert smtp any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; noalert; sid:1100013; rev:1;)
alert nfs any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; noalert; sid:1100014; rev:1;)
alert smb any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; noalert; sid:1100015; rev:1;)
# Extract Word Docs
alert http any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; sid:1100016; rev:1;)
alert smtp any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; sid:1100017; rev:1;)
alert nfs any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; sid:1100018; rev:1;)
alert smb any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; sid:1100019; rev:1;)
alert http any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; noalert; sid:1100016; rev:1;)
alert smtp any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; noalert; sid:1100017; rev:1;)
alert nfs any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; noalert; sid:1100018; rev:1;)
alert smb any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; noalert; sid:1100019; rev:1;)

View File

@@ -156,6 +156,13 @@ rules_dir:
- group: socore
- makedirs: True
nsm_playbooks_dir:
file.directory:
- name: /nsm/airgap-resources/playbooks
- user: socore
- group: socore
- makedirs: True
git_config_set_safe_dirs:
git.config_set:
- name: safe.directory
@@ -166,6 +173,8 @@ git_config_set_safe_dirs:
- /nsm/rules/custom-local-repos/local-yara
- /nsm/securityonion-resources
- /opt/so/conf/soc/ai_summary_repos/securityonion-resources
- /nsm/airgap-resources/playbooks
- /opt/so/conf/soc/playbooks
{% else %}
{{sls}}_state_not_allowed:

View File

@@ -417,6 +417,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.130 ]] && up_to_2.4.140
[[ "$INSTALLEDVERSION" == 2.4.140 ]] && up_to_2.4.141
[[ "$INSTALLEDVERSION" == 2.4.141 ]] && up_to_2.4.150
[[ "$INSTALLEDVERSION" == 2.4.150 ]] && up_to_2.4.160
true
}
@@ -444,6 +445,7 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.130 ]] && post_to_2.4.140
[[ "$POSTVERSION" == 2.4.140 ]] && post_to_2.4.141
[[ "$POSTVERSION" == 2.4.141 ]] && post_to_2.4.150
[[ "$POSTVERSION" == 2.4.150 ]] && post_to_2.4.160
true
}
@@ -579,6 +581,11 @@ post_to_2.4.150() {
POSTVERSION=2.4.150
}
post_to_2.4.160() {
echo "Nothing to apply"
POSTVERSION=2.4.160
}
repo_sync() {
echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
@@ -816,6 +823,12 @@ up_to_2.4.150() {
INSTALLEDVERSION=2.4.150
}
up_to_2.4.160() {
echo "Nothing to do for 2.4.160"
INSTALLEDVERSION=2.4.160
}
add_hydra_pillars() {
mkdir -p /opt/so/saltstack/local/pillar/hydra
touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
@@ -1405,6 +1418,8 @@ main() {
if [[ $is_airgap -eq 0 ]]; then
echo "Updating Rule Files to the Latest."
update_airgap_rules
echo "Updating Playbooks to the Latest."
airgap_playbooks "$UPDATE_DIR"
fi
# since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars

View File

@@ -383,6 +383,7 @@ http {
}
if ($request_uri ~* ^/(?!(^/api/.*))) {
add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400";
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains";
}
return 302 /auth/self-service/login/browser;
}
@@ -392,6 +393,7 @@ http {
return 403;
}
add_header Set-Cookie "ory_kratos_session=;Path=/;Max-Age=0;expires=Thu, 01 Jan 1970 00:00:00 GMT;";
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains";
return 302 /auth/self-service/login/browser;
}

View File

@@ -14,7 +14,7 @@ include:
# Install the registry container
so-dockerregistry:
docker_container.running:
- image: ghcr.io/security-onion-solutions/registry:2.8.3
- image: ghcr.io/security-onion-solutions/registry:3.0.0
- hostname: so-registry
- networks:
- sobridge:
@@ -25,7 +25,7 @@ so-dockerregistry:
- {{ BINDING }}
{% endfor %}
- binds:
- /opt/so/conf/docker-registry/etc/config.yml:/etc/docker/registry/config.yml:ro
- /opt/so/conf/docker-registry/etc/config.yml:/etc/distribution/config.yml:ro
- /opt/so/conf/docker-registry:/var/lib/registry:rw
- /nsm/docker-registry/docker:/var/lib/registry/docker:rw
- /etc/pki/registry.crt:/etc/pki/registry.crt:ro

View File

@@ -52,6 +52,13 @@ socsaltdir:
- mode: 770
- makedirs: True
socplaybooksdir:
file.directory:
- name: /opt/so/conf/soc/playbooks
- user: 939
- group: 939
- makedirs: True
socanalytics:
file.managed:
- name: /opt/so/conf/soc/analytics.js

View File

@@ -1415,17 +1415,21 @@ soc:
license: Elastic-2.0
folder: sigma/stable
community: true
rulesetName: securityonion-resources
- repo: file:///nsm/rules/custom-local-repos/local-sigma
license: Elastic-2.0
community: false
rulesetName: local-sigma
airgap:
- repo: file:///nsm/rules/detect-sigma/repos/securityonion-resources
license: Elastic-2.0
folder: sigma/stable
community: true
rulesetName: securityonion-resources
- repo: file:///nsm/rules/custom-local-repos/local-sigma
license: Elastic-2.0
community: false
rulesetName: local-sigma
sigmaRulePackages:
- core
- emerging_threats_addon
@@ -1456,6 +1460,16 @@ soc:
org: Security Onion
bucket: telegraf/so_short_term
verifyCert: false
playbook:
autoUpdateEnabled: true
playbookImportFrequencySeconds: 86400
playbookImportErrorSeconds: 600
playbookRepoUrl:
default: https://github.com/Security-Onion-Solutions/securityonion-resources-playbooks
airgap: file:///nsm/airgap-resources/playbooks/securityonion-resources-playbooks
playbookRepoBranch: main
playbookRepoPath: /opt/sensoroni/playbooks/
playbookPathInRepo: securityonion-normalized
salt:
queueDir: /opt/sensoroni/queue
timeoutMs: 45000
@@ -1492,16 +1506,20 @@ soc:
- repo: https://github.com/Security-Onion-Solutions/securityonion-yara
license: DRL
community: true
rulesetName: securityonion-yara
- repo: file:///nsm/rules/custom-local-repos/local-yara
license: Elastic-2.0
community: false
rulesetName: local-yara
airgap:
- repo: file:///nsm/rules/detect-yara/repos/securityonion-yara
license: DRL
community: true
rulesetName: securityonion-yara
- repo: file:///nsm/rules/custom-local-repos/local-yara
license: Elastic-2.0
community: false
rulesetName: local-yara
yaraRulesFolder: /opt/sensoroni/yara/rules
stateFilePath: /opt/sensoroni/fingerprints/strelkaengine.state
integrityCheckFrequencySeconds: 1200

View File

@@ -31,10 +31,12 @@ so-soc:
- /opt/so/conf/soc/fingerprints:/opt/sensoroni/fingerprints:rw
- /nsm/soc/jobs:/opt/sensoroni/jobs:rw
- /nsm/soc/uploads:/nsm/soc/uploads:rw
- /nsm/airgap-resources:/nsm/airgap-resources:rw
- /opt/so/log/soc/:/opt/sensoroni/logs/:rw
- /opt/so/conf/soc/soc.json:/opt/sensoroni/sensoroni.json:ro
- /opt/so/conf/soc/ai_summary_repos:/opt/sensoroni/ai_summary_repos:rw
- /opt/so/conf/navigator/layers/:/opt/sensoroni/navigator/:rw
- /opt/so/conf/soc/playbooks/:/opt/sensoroni/playbooks/:rw
{% if SOCMERGED.telemetryEnabled and not GLOBALS.airgap %}
- /opt/so/conf/soc/analytics.js:/opt/sensoroni/html/js/analytics.js:ro
{% endif %}

View File

@@ -12,12 +12,30 @@ transformations:
sid: rule.uuid
answer: answers
query: dns.query.name
src_ip: source.ip.keyword
src_ip: source.ip
src_port: source.port
dst_ip: destination.ip.keyword
dst_ip: destination.ip
dst_port: destination.port
winlog.event_data.User: user.name
logtype: event.code # OpenCanary
## Start Temp Linux Mappings ##
ProcessGuid: process.entity_id
ProcessId: process.pid
Image: process.executable
CommandLine: process.command_line
CurrentDirectory: process.working_directory
ParentProcessGuid: process.parent.entity_id
ParentProcessId: process.parent.pid
ParentImage: process.parent.executable
ParentCommandLine: process.parent.command_line
User: user.name
## End Temp Linux Mappings ##
document_id: _id
rule.type: event.module
related_ip: related.ip
community_id: network.community_id
event_dataset: event.dataset
hostname: host.name
# Maps "opencanary" product to SO IDH logs
- id: opencanary_idh_add-fields
type: add_condition
@@ -126,4 +144,96 @@ transformations:
event.type: 'creation'
rule_conditions:
- type: logsource
category: file_event
category: file_event
# Maps network rules to all network logs
# This targets all network logs, all services, generated from endpoints and network
- id: network_add-fields
type: add_condition
conditions:
event.category: 'network'
rule_conditions:
- type: logsource
category: network
# Maps network_connection rules to endpoint network creation logs
# This is an OS-agnostic mapping, to account for logs that don't specify source OS
- id: endpoint_network_connection_add-fields
type: add_condition
conditions:
event.category: 'network'
event.type: 'start'
rule_conditions:
- type: logsource
category: network_connection
# Maps "alert" category to SO Alert events
- id: alert_so_add-fields
type: add_condition
conditions:
tags: 'alert'
rule_conditions:
- type: logsource
category: alert
# Maps "network + connection" to SO connection logs
- id: network_connection_so_add-fields
type: add_condition
conditions:
tags: 'conn'
rule_conditions:
- type: logsource
category: network
service: connection
# Maps "network + dns" to SO DNS logs
- id: network_dns_so_add-fields
type: add_condition
conditions:
tags: 'dns'
rule_conditions:
- type: logsource
category: network
service: dns
# Maps "network + http" to SO HTTP logs
- id: network_http_so_add-fields
type: add_condition
conditions:
tags: 'http'
rule_conditions:
- type: logsource
category: network
service: http
# Maps "network + file" to SO file logs
- id: network_file_so_add-fields
type: add_condition
conditions:
event.category: 'network'
tags: 'file'
rule_conditions:
- type: logsource
category: network
service: file
# Maps "network + x509" to SO x509 logs
- id: network_x509_so_add-fields
type: add_condition
conditions:
event.category: 'network'
tags: 'x509'
rule_conditions:
- type: logsource
category: network
service: x509
# Maps "network + ssl" to SO ssl logs
- id: network_ssl_so_add-fields
type: add_condition
conditions:
event.category: 'network'
tags: 'ssl'
rule_conditions:
- type: logsource
category: network
service: ssl
# Maps file to host or network file events
- id: file_so_add-fields
type: add_condition
conditions:
tags: '*file'
rule_conditions:
- type: logsource
category: file

View File

@@ -61,6 +61,13 @@
{% do SOCMERGED.config.server.update({'airgapEnabled': false}) %}
{% endif %}
{# set playbookRepoUrl based on airgap or not #}
{% if GLOBALS.airgap %}
{% do SOCMERGED.config.server.modules.playbook.update({'playbookRepoUrl': SOCMERGED.config.server.modules.playbook.playbookRepoUrl.airgap}) %}
{% else %}
{% do SOCMERGED.config.server.modules.playbook.update({'playbookRepoUrl': SOCMERGED.config.server.modules.playbook.playbookRepoUrl.default}) %}
{% endif %}
{# remove these modules if detections is disabled #}
{% if not SOCMERGED.config.server.client.detectionsEnabled %}
{% do SOCMERGED.config.server.modules.pop('elastalertengine') %}

View File

@@ -344,6 +344,23 @@ soc:
advanced: True
forcedType: "[]{}"
helpLink: sigma.html
syntax: json
uiElements:
- field: rulesetName
label: Ruleset Name
- field: repo
label: Repo URL
required: True
- field: branch
label: Branch
- field: license
label: License
required: True
- field: folder
label: Folder
- field: community
label: Community
forcedType: bool
airgap: *eerulesRepos
sigmaRulePackages:
description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). Once you have changed the ruleset here, the new settings will be applied within 15 minutes. At that point, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update. WARNING! Changing the ruleset will remove all existing non-overlapping Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.'
@@ -459,6 +476,23 @@ soc:
advanced: True
forcedType: "[]{}"
helpLink: yara.html
syntax: json
uiElements:
- field: rulesetName
label: Ruleset Name
- field: repo
label: Repo URL
required: True
- field: branch
label: Branch
- field: license
label: License
required: True
- field: folder
label: Folder
- field: community
label: Community
forcedType: bool
airgap: *serulesRepos
suricataengine:
aiRepoUrl:
@@ -592,7 +626,7 @@ soc:
label: Query
required: True
- field: showSubtitle
label: Show Query in Dropdown.
label: Show Query in Dropdown.
forcedType: bool
queryToggleFilters:
description: Customize togglable query filters that apply to all queries. Exclusive toggles will invert the filter if toggled off rather than omitting the filter from the query.

View File

@@ -26,7 +26,6 @@ echo "Running all.rules and $TESTRULE against the following pcap: $TESTPCAP"
echo ""
sleep 3
rm -rf /tmp/nids-testing/output
mkdir -p /tmp/nids-testing/output
chown suricata:socore /tmp/nids-testing/output
@@ -45,7 +44,7 @@ echo "==== Begin Suricata Output ==="
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
-v /tmp/nids-testing/output/:/nsm/:rw \
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \
--runmode single -v -k none -r /input.pcap -l /tmp --init-errors-fatal
--runmode single -v -k none -r /input.pcap -l /tmp --init-errors-fatal --set outputs.6.pcap-log.enabled=no
echo "==== End Suricata Output ==="
echo ""

View File

@@ -241,7 +241,12 @@
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
{% if grains.role in ['so-searchnode','so-standalone','so-manager', 'so-managersearch', 'so-heavynode', 'so-receiver'] -%}
{%- set logstash_metrics_roles = ['so-searchnode','so-standalone','so-managersearch','so-heavynode'] %}
{%- if GLOBALS.pipeline != "KAFKA" %}
{%- set logstash_metrics_roles = logstash_metrics_roles + ['so-manager', 'so-receiver'] %}
{%- endif %}
{%- if grains.role in logstash_metrics_roles %}
[[inputs.logstash]]
url = "http://localhost:9600"
collect = ["pipelines"]
@@ -252,7 +257,7 @@
{% if grains.role in ['so-manager','so-managersearch','so-standalone','so-receiver'] and GLOBALS.pipeline == "KAFKA" -%}
[[inputs.jolokia2_agent]]
name_prefix= "kafka_"
urls = ["http://localhost:8778/jolokia"]
urls = ["http://{{ NODEIP }}:8778/jolokia"]
[[inputs.jolokia2_agent.metric]]
name = "topics"

View File

@@ -57,6 +57,7 @@ zeek:
- cve-2020-0601
- securityonion/bpfconf
- securityonion/file-extraction
- securityonion/community-id-extended
- oui-logging
- icsnpp-modbus
- icsnpp-dnp3

View File

@@ -0,0 +1,40 @@
##! Extends community ID logging to Files, and SSL by copying
##! the community_id from the parent connection.
##!
##! Note: Requires that protocols/conn/community-id-logging is loaded
module CommunityIDExt;
@load base/protocols/ssl
@load protocols/conn/community-id-logging
export {
redef record SSL::Info += {
community_id: string &optional &log;
};
redef record Files::Info += {
community_id: string &optional &log;
};
}
# Files
event file_new(f: fa_file) {
if ( f?$conns ) {
# Take community_id from first connection that has it
for ( cid in f$conns ) {
local c = f$conns[cid];
if ( c?$conn && c$conn?$community_id ) {
f$info$community_id = c$conn$community_id;
break;
}
}
}
}
# SSL Connections
event ssl_established(c: connection) {
if ( c?$conn && c$conn?$community_id && c?$ssl ) {
c$ssl$community_id = c$conn$community_id;
}
}

View File

@@ -769,6 +769,10 @@ if ! [[ -f $install_opt_file ]]; then
title "Syncing AI-Generated Detection Summaries"
airgap_detection_summaries
fi
if [[ $is_airgap ]]; then
title "Syncing Playbooks"
logCmd "airgap_playbooks /root/SecurityOnion"
fi
title "Setting up Kibana Default Space"
logCmd "so-kibana-space-defaults"
add_web_user

Binary file not shown.

Binary file not shown.