merge and resolve conflict in elasticsearch state

This commit is contained in:
m0duspwnens
2021-05-27 11:33:44 -04:00
20 changed files with 129 additions and 252 deletions

View File

@@ -96,7 +96,6 @@ commonpkgs:
- netcat
- python3-mysqldb
- sqlite3
- argon2
- libssl-dev
- python3-dateutil
- python3-m2crypto
@@ -129,7 +128,6 @@ commonpkgs:
- net-tools
- curl
- sqlite
- argon2
- mariadb-devel
- nmap-ncat
- python3

View File

@@ -488,6 +488,7 @@ wait_for_web_response() {
maxAttempts=${3:-300}
curlcmd=${4:-curl}
logfile=/root/wait_for_web_response.log
truncate -s 0 "$logfile"
attempt=0
while [[ $attempt -lt $maxAttempts ]]; do
attempt=$((attempt+1))

View File

@@ -128,7 +128,7 @@ update_docker_containers() {
mkdir -p $SIGNPATH >> "$LOG_FILE" 2>&1
# Let's make sure we have the public key
retry 50 10 "curl -sSL https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS -o $SIGNPATH/KEYS" >> "$LOG_FILE" 2>&1
retry 50 10 "curl -f -sSL https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS -o $SIGNPATH/KEYS" >> "$LOG_FILE" 2>&1
result=$?
if [[ $result -eq 0 ]]; then
cat $SIGNPATH/KEYS | gpg --import - >> "$LOG_FILE" 2>&1
@@ -151,7 +151,7 @@ update_docker_containers() {
retry 50 10 "docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$image" >> "$LOG_FILE" 2>&1
# Get signature
retry 50 10 "curl -A '$CURLTYPE/$CURRENTVERSION/$OS/$(uname -r)' https://sigs.securityonion.net/$VERSION/$i:$VERSION$IMAGE_TAG_SUFFIX.sig --output $SIGNPATH/$image.sig" >> "$LOG_FILE" 2>&1
retry 50 10 "curl -f -A '$CURLTYPE/$CURRENTVERSION/$OS/$(uname -r)' https://sigs.securityonion.net/$VERSION/$i:$VERSION$IMAGE_TAG_SUFFIX.sig --output $SIGNPATH/$image.sig" >> "$LOG_FILE" 2>&1
if [[ $? -ne 0 ]]; then
echo "Unable to pull signature file for $image" >> "$LOG_FILE" 2>&1
exit 1

View File

@@ -39,10 +39,9 @@ email=$2
kratosUrl=${KRATOS_URL:-http://127.0.0.1:4434}
databasePath=${KRATOS_DB_PATH:-/opt/so/conf/kratos/db/db.sqlite}
argon2Iterations=${ARGON2_ITERATIONS:-3}
argon2Memory=${ARGON2_MEMORY:-14}
argon2Parallelism=${ARGON2_PARALLELISM:-2}
argon2HashSize=${ARGON2_HASH_SIZE:-32}
bcryptRounds=${BCRYPT_ROUNDS:-12}
elasticUsersFile=${ELASTIC_USERS_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users}
elasticRolesFile=${ELASTIC_ROLES_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users_roles}
function fail() {
msg=$1
@@ -58,7 +57,7 @@ function require() {
# Verify this environment is capable of running this script
function verifyEnvironment() {
require "argon2"
require "htpasswd"
require "jq"
require "curl"
require "openssl"
@@ -95,6 +94,16 @@ function validateEmail() {
fi
}
function hashPassword() {
password=$1
passwordHash=$(echo "${password}" | htpasswd -niBC $bcryptRounds SOUSER)
passwordHash=$(echo "$passwordHash" | cut -c 11-)
passwordHash="\$2a${passwordHash}" # still waiting for https://github.com/elastic/elasticsearch/issues/51132
echo "$passwordHash"
}
function updatePassword() {
identityId=$1
@@ -111,15 +120,61 @@ function updatePassword() {
if [[ -n $identityId ]]; then
# Generate password hash
salt=$(openssl rand -hex 8)
passwordHash=$(echo "${password}" | argon2 ${salt} -id -t $argon2Iterations -m $argon2Memory -p $argon2Parallelism -l $argon2HashSize -e)
passwordHash=$(hashPassword "$password")
# Update DB with new hash
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"${passwordHash}\"}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
[[ $? != 0 ]] && fail "Unable to update password"
fi
}
function createElasticTmpFile() {
filename=$1
tmpFile=${filename}.tmp
truncate -s 0 "$tmpFile"
chmod 600 "$tmpFile"
chown elasticsearch:elasticsearch "$tmpFile"
echo "$tmpFile"
}
function syncElastic() {
usersFileTmp=$(createElasticTmpFile "${elasticUsersFile}")
rolesFileTmp=$(createElasticTmpFile "${elasticRolesFile}")
sysUser=$(lookup_pillar "auth:user" "elasticsearch")
sysPass=$(lookup_pillar "auth:pass" "elasticsearch")
[[ -z "$sysUser" || -z "$sysPass" ]] && fail "Elastic auth credentials for system user are missing"
sysHash=$(hashPassword "$sysPass")
# Generate the new users file
echo "${sysUser}:${sysHash}" >> "$usersFileTmp"
echo "select '{\"user\":\"' || ici.identifier || '\", \"data\":' || ic.config || '}'" \
"from identity_credential_identifiers ici, identity_credentials ic " \
"where ici.identity_credential_id=ic.id and ic.config like '%hashed_password%' " \
"order by ici.identifier;" | \
sqlite3 "$databasePath" | \
jq -r '.user + ":" + .data.hashed_password' \
>> "$usersFileTmp"
[[ $? != 0 ]] && fail "Unable to read credential hashes from database"
mv -f "$usersFileTmp" "$elasticUsersFile"
[[ $? != 0 ]] && fail "Unable to create users file: $elasticUsersFile"
# Generate the new users_roles file
echo "superuser:${sysUser}" >> "$rolesFileTmp"
echo "select 'superuser:' || ici.identifier " \
"from identity_credential_identifiers ici, identity_credentials ic " \
"where ici.identity_credential_id=ic.id and ic.config like '%hashed_password%' " \
"order by ici.identifier;" | \
sqlite3 "$databasePath" \
>> "$rolesFileTmp"
[[ $? != 0 ]] && fail "Unable to read credential IDs from database"
mv -f "$rolesFileTmp" "$elasticRolesFile"
[[ $? != 0 ]] && fail "Unable to create users file: $elasticRolesFile"
}
function syncAll() {
syncElastic
}
function listUsers() {
response=$(curl -Ss -L ${kratosUrl}/identities)
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
@@ -259,6 +314,11 @@ case "${operation}" in
check_container fleet && so-fleet-user-enable "$email" false
;;
"sync")
syncAll
echo "Synchronization complete"
;;
"validate")
validateEmail "$email"
updatePassword

View File

@@ -201,7 +201,6 @@ auth_users_roles:
- cmd: syncesusers
{% endif %}
so-elasticsearch:
docker_container.running:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }}
@@ -235,6 +234,8 @@ so-elasticsearch:
- binds:
- /opt/so/conf/elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
- /opt/so/conf/elasticsearch/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
- /opt/so/conf/elasticsearch/users:/usr/share/elasticsearch/config/users:ro
- /opt/so/conf/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles:ro
- /nsm/elasticsearch:/usr/share/elasticsearch/data:rw
- /opt/so/log/elasticsearch:/var/log/elasticsearch:rw
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro

View File

@@ -11,7 +11,7 @@
{% set GRAFANA_SETTINGS = salt['grains.filter_by'](default_settings, default='grafana', merge=salt['pillar.get']('grafana', {})) %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] or (grains.role == 'so-eval' and GRAFANA == 1) %}
# Grafana all the things
grafanadir:

View File

@@ -3,7 +3,7 @@
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] or (grains.role == 'so-eval' and GRAFANA == 1) %}
{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
@@ -17,7 +17,6 @@
include:
- salt.minion
- salt.python3-influxdb
# Influx DB
influxconfdir:
file.directory:
@@ -139,4 +138,4 @@ so_downsample_cq:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -149,6 +149,12 @@ http {
root /opt/socore/html;
index index.html;
add_header Content-Security-Policy "default-src 'self' 'unsafe-inline' 'unsafe-eval' https: data:; frame-ancestors 'self'";
add_header X-Frame-Options SAMEORIGIN;
add_header X-XSS-Protection "1; mode=block";
add_header X-Content-Type-Options nosniff;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload";
ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m;

View File

@@ -44,12 +44,24 @@ hold_salt_packages:
{% endfor %}
{% endif %}
remove_info_log_level_logfile:
file.line:
- name: /etc/salt/minion
- match: "log_level_logfile: info"
- mode: delete
remove_info_log_level:
file.line:
- name: /etc/salt/minion
- match: "log_level: info"
- mode: delete
set_log_levels:
file.append:
- name: /etc/salt/minion
- text:
- "log_level: info"
- "log_level_logfile: info"
- "log_level: error"
- "log_level_logfile: error"
- listen_in:
- service: salt_minion_service

View File

@@ -41,12 +41,8 @@ serve:
base_url: https://{{ WEBACCESS }}/kratos/
hashers:
argon2:
parallelism: 2
memory: 16384
iterations: 3
salt_length: 16
key_length: 32
bcrypt:
cost: 12
identity:
default_schema_url: file:///kratos-conf/schema.json

View File

@@ -54,7 +54,7 @@
"verifyCert": false
},
"influxdb": {
{%- if grains['role'] in ['so-import'] %}
{%- if grains['role'] in ['so-import'] or (grains['role'] == 'so-eval' and GRAFANA == 0) %}
"hostUrl": "",
{%- else %}
"hostUrl": "https://{{ MANAGERIP }}:8086",

View File

@@ -78,6 +78,7 @@ zeekspoolownership:
file.directory:
- name: /nsm/zeek/spool
- user: 937
- max_depth: 0
- recurse:
- user