mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
remove unneeded files
This commit is contained in:
@@ -1,113 +0,0 @@
|
||||
'''
|
||||
Add Virtual Node Beacon
|
||||
|
||||
This beacon monitors for creation or modification of files matching a specific pattern
|
||||
and sends the contents of the files up to the Salt Master's event bus, including
|
||||
the hypervisor and nodetype extracted from the file path.
|
||||
|
||||
Configuration:
|
||||
|
||||
beacons:
|
||||
add_virtual_node_beacon:
|
||||
- base_path: /path/to/files/*
|
||||
|
||||
If base_path is not specified, it defaults to '/opt/so/saltstack/local/salt/hypervisor/hosts/*/add_*'
|
||||
'''
|
||||
|
||||
import os
|
||||
import glob
|
||||
import logging
|
||||
import re
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'add_virtual_node_beacon'
|
||||
DEFAULT_BASE_PATH = '/opt/so/saltstack/local/salt/hypervisor/hosts/*/add_*'
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Return the virtual name of the beacon.
|
||||
'''
|
||||
return __virtualname__
|
||||
|
||||
def validate(config):
|
||||
'''
|
||||
Validate the beacon configuration.
|
||||
|
||||
Args:
|
||||
config (list): Configuration of the beacon.
|
||||
|
||||
Returns:
|
||||
tuple: A tuple of (bool, str) indicating success and message.
|
||||
'''
|
||||
if not isinstance(config, list):
|
||||
return False, 'Configuration for add_virtual_node_beacon must be a list of dictionaries'
|
||||
for item in config:
|
||||
if not isinstance(item, dict):
|
||||
return False, 'Each item in configuration must be a dictionary'
|
||||
if 'base_path' in item and not isinstance(item['base_path'], str):
|
||||
return False, 'base_path must be a string'
|
||||
return True, 'Valid beacon configuration'
|
||||
|
||||
def beacon(config):
|
||||
'''
|
||||
Monitor for creation or modification of files and send events.
|
||||
|
||||
Args:
|
||||
config (list): Configuration of the beacon.
|
||||
|
||||
Returns:
|
||||
list: A list of events to send to the Salt Master.
|
||||
'''
|
||||
if 'add_virtual_node_beacon' not in __context__:
|
||||
__context__['add_virtual_node_beacon'] = {}
|
||||
|
||||
ret = []
|
||||
|
||||
for item in config:
|
||||
base_path = item.get('base_path', DEFAULT_BASE_PATH)
|
||||
file_list = glob.glob(base_path)
|
||||
|
||||
log.debug('Starting add_virtual_node_beacon. Found %d files matching pattern %s', len(file_list), base_path)
|
||||
|
||||
for file_path in file_list:
|
||||
try:
|
||||
mtime = os.path.getmtime(file_path)
|
||||
prev_mtime = __context__['add_virtual_node_beacon'].get(file_path, 0)
|
||||
if mtime > prev_mtime:
|
||||
log.info('File %s is new or modified', file_path)
|
||||
with open(file_path, 'r') as f:
|
||||
contents = f.read()
|
||||
|
||||
data = {}
|
||||
# Parse the contents of the file
|
||||
for line in contents.splitlines():
|
||||
if ':' in line:
|
||||
key, value = line.split(':', 1)
|
||||
data[key.strip()] = value.strip()
|
||||
else:
|
||||
log.warning('Line in file %s does not contain colon: %s', file_path, line)
|
||||
|
||||
# Extract hypervisor and nodetype from the file path
|
||||
match = re.match(r'^.*/hosts/(?P<hypervisor>[^/]+)/add_(?P<nodetype>[^/]+)$', file_path)
|
||||
if match:
|
||||
data['hypervisor'] = match.group('hypervisor')
|
||||
data['nodetype'] = match.group('nodetype')
|
||||
else:
|
||||
log.warning('Unable to extract hypervisor and nodetype from file path: %s', file_path)
|
||||
data['hypervisor'] = None
|
||||
data['nodetype'] = None
|
||||
|
||||
event = {'tag': f'add_virtual_node/{os.path.basename(file_path)}', 'data': data}
|
||||
ret.append(event)
|
||||
__context__['add_virtual_node_beacon'][file_path] = mtime
|
||||
else:
|
||||
log.debug('File %s has not been modified since last check', file_path)
|
||||
except FileNotFoundError:
|
||||
log.warning('File not found: %s', file_path)
|
||||
except PermissionError:
|
||||
log.error('Permission denied when accessing file: %s', file_path)
|
||||
except Exception as e:
|
||||
log.error('Error processing file %s: %s', file_path, str(e))
|
||||
|
||||
return ret
|
||||
@@ -1,20 +0,0 @@
|
||||
# quick script to create raid
|
||||
# this is an example of the base functions
|
||||
parted -s /dev/nvme0n1 rm 1
|
||||
parted -s /dev/nvme0n1 mklabel gpt
|
||||
parted -s /dev/nvme0n1 mkpart primary xfs 0% 100%
|
||||
parted -s /dev/nvme0n1 set 1 raid on
|
||||
parted -s /dev/nvme1n1 rm 1
|
||||
parted -s /dev/nvme1n1 mklabel gpt
|
||||
parted -s /dev/nvme1n1 mkpart primary xfs 0% 100%
|
||||
parted -s /dev/nvme1n1 set 1 raid on
|
||||
yes | mdadm --create /dev/md0 --level=1 --raid-devices=2 /dev/nvme0n1p1 /dev/nvme1n1p1
|
||||
|
||||
mkfs -t xfs -f /dev/md0
|
||||
echo "Create NSM mount point"
|
||||
mkdir -p /nsm
|
||||
echo "Add mount to fstab"
|
||||
echo "/dev/md0 /nsm xfs defaults,nofail 0 0" >> /etc/fstab
|
||||
echo "Mounting /nsm"
|
||||
mount -a
|
||||
mdadm --detail --scan --verbose >> /etc/mdadm.conf
|
||||
@@ -1,200 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#raid3.sh is a refinement of raid2.sh. raid2.sh was used to create the raid in testing
|
||||
#More detailed logging
|
||||
#More thorough RAID array cleanup
|
||||
#Better organized cleanup procedures
|
||||
#Simplified device wiping using more modern tools (sgdisk instead of dd)
|
||||
#More robust handling of existing MD arrays
|
||||
#The core RAID creation and mounting functionality remains the same between both scripts, but the second version has improved error handling and cleanup procedures.
|
||||
|
||||
# Exit on any error
|
||||
set -e
|
||||
|
||||
# Function to log messages
|
||||
log() {
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1"
|
||||
}
|
||||
|
||||
# Function to check if running as root
|
||||
check_root() {
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
log "Error: Please run as root"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to perform thorough device cleanup
|
||||
ensure_devices_free() {
|
||||
local device=$1
|
||||
|
||||
log "Performing thorough cleanup of device $device"
|
||||
|
||||
# Kill any processes using the device
|
||||
fuser -k "${device}"* 2>/dev/null || true
|
||||
|
||||
# Force unmount any partitions
|
||||
for part in "${device}"*; do
|
||||
if mount | grep -q "$part"; then
|
||||
umount -f "$part" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Stop any MD arrays using this device
|
||||
for md in $(ls /dev/md* 2>/dev/null || true); do
|
||||
if mdadm --detail "$md" 2>/dev/null | grep -q "$device"; then
|
||||
log "Stopping MD array $md"
|
||||
mdadm --stop "$md" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Thorough RAID cleanup
|
||||
log "Cleaning RAID metadata from $device"
|
||||
mdadm --zero-superblock "$device" 2>/dev/null || true
|
||||
if [ -e "${device}p1" ]; then
|
||||
mdadm --zero-superblock "${device}p1" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Remove LVM PV if exists
|
||||
pvremove -ff -y "$device" 2>/dev/null || true
|
||||
|
||||
# Clear all signatures
|
||||
log "Wiping all signatures from $device"
|
||||
wipefs -a "$device" 2>/dev/null || true
|
||||
|
||||
# Clear partition table
|
||||
log "Clearing partition table on $device"
|
||||
sgdisk -Z "$device" 2>/dev/null || true
|
||||
|
||||
# Force kernel to reread
|
||||
log "Forcing kernel to reread partition table"
|
||||
partprobe "$device" 2>/dev/null || true
|
||||
sleep 2
|
||||
}
|
||||
|
||||
# Function to check if RAID is already set up
|
||||
check_existing_raid() {
|
||||
# Clear existing mdadm configuration first
|
||||
log "Initializing clean mdadm configuration"
|
||||
echo "DEVICE partitions" > /etc/mdadm.conf
|
||||
|
||||
if [ -e "/dev/md0" ]; then
|
||||
if mdadm --detail /dev/md0 &>/dev/null; then
|
||||
local raid_state=$(mdadm --detail /dev/md0 | grep "State" | awk '{print $3}')
|
||||
local mount_point="/nsm"
|
||||
|
||||
log "Found existing RAID array /dev/md0 (State: $raid_state)"
|
||||
|
||||
if mountpoint -q "$mount_point"; then
|
||||
log "RAID is already mounted at $mount_point"
|
||||
log "Current RAID details:"
|
||||
mdadm --detail /dev/md0
|
||||
|
||||
# Check if resyncing
|
||||
if grep -q "resync" /proc/mdstat; then
|
||||
log "RAID is currently resyncing:"
|
||||
grep resync /proc/mdstat
|
||||
log "You can monitor progress with: watch -n 60 cat /proc/mdstat"
|
||||
else
|
||||
log "RAID is fully synced and operational"
|
||||
fi
|
||||
|
||||
# Show disk usage
|
||||
log "Current disk usage:"
|
||||
df -h "$mount_point"
|
||||
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if any MD arrays exist and try to clean them up
|
||||
if [ -f /proc/mdstat ]; then
|
||||
log "Checking for existing MD arrays"
|
||||
if grep -q "md" /proc/mdstat; then
|
||||
log "Found existing MD arrays, attempting cleanup"
|
||||
for md in $(awk '/md/{print $1}' /proc/mdstat); do
|
||||
log "Stopping array $md"
|
||||
mdadm --stop "/dev/$md" 2>/dev/null || true
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if any of the target devices are in use
|
||||
for device in "/dev/nvme0n1" "/dev/nvme1n1"; do
|
||||
if lsblk -o NAME,MOUNTPOINT "$device" | grep -q "nsm"; then
|
||||
log "Error: $device is already mounted at /nsm"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Main script
|
||||
main() {
|
||||
log "Starting RAID setup script"
|
||||
|
||||
# Check if running as root
|
||||
check_root
|
||||
|
||||
# Check for existing RAID setup
|
||||
check_existing_raid
|
||||
|
||||
# Clean and prepare devices
|
||||
for device in "/dev/nvme0n1" "/dev/nvme1n1"; do
|
||||
ensure_devices_free "$device"
|
||||
|
||||
log "Creating new partition table on $device"
|
||||
sgdisk -Z "$device"
|
||||
sgdisk -o "$device"
|
||||
|
||||
log "Creating RAID partition"
|
||||
sgdisk -n 1:0:0 -t 1:fd00 "$device"
|
||||
|
||||
partprobe "$device"
|
||||
udevadm settle
|
||||
sleep 5
|
||||
done
|
||||
|
||||
log "Final verification of partition availability"
|
||||
if ! [ -b "/dev/nvme0n1p1" ] || ! [ -b "/dev/nvme1n1p1" ]; then
|
||||
log "Error: Partitions not available after creation"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Creating RAID array"
|
||||
mdadm --create /dev/md0 --level=1 --raid-devices=2 \
|
||||
--metadata=1.2 \
|
||||
/dev/nvme0n1p1 /dev/nvme1n1p1 \
|
||||
--force --run
|
||||
|
||||
log "Creating XFS filesystem"
|
||||
mkfs.xfs -f /dev/md0
|
||||
|
||||
log "Creating mount point"
|
||||
mkdir -p /nsm
|
||||
|
||||
log "Updating fstab"
|
||||
sed -i '/\/dev\/md0/d' /etc/fstab
|
||||
echo "/dev/md0 /nsm xfs defaults,nofail 0 0" >> /etc/fstab
|
||||
|
||||
log "Reloading systemd daemon"
|
||||
systemctl daemon-reload
|
||||
|
||||
log "Mounting filesystem"
|
||||
mount -a
|
||||
|
||||
log "Saving RAID configuration"
|
||||
mdadm --detail --scan > /etc/mdadm.conf
|
||||
|
||||
log "RAID setup complete"
|
||||
log "RAID array details:"
|
||||
mdadm --detail /dev/md0
|
||||
|
||||
if grep -q "resync" /proc/mdstat; then
|
||||
log "RAID is currently resyncing. You can monitor progress with:"
|
||||
log "watch -n 60 cat /proc/mdstat"
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -1,5 +1,39 @@
|
||||
#!/bin/bash
|
||||
|
||||
#################################################################
|
||||
# RAID-1 Setup Script for NVMe Drives
|
||||
#################################################################
|
||||
#
|
||||
# DESCRIPTION:
|
||||
# This script automatically sets up a RAID-1 (mirrored) array using two NVMe drives
|
||||
# (/dev/nvme0n1 and /dev/nvme1n1) and mounts it at /nsm with XFS filesystem.
|
||||
#
|
||||
# FUNCTIONALITY:
|
||||
# - Detects and reports existing RAID configurations
|
||||
# - Thoroughly cleans target drives of any existing data/configurations
|
||||
# - Creates GPT partition tables with RAID-type partitions
|
||||
# - Establishes RAID-1 array (/dev/md0) for data redundancy
|
||||
# - Formats the array with XFS filesystem for performance
|
||||
# - Automatically mounts at /nsm and configures for boot persistence
|
||||
# - Provides monitoring information for resync operations
|
||||
#
|
||||
# SAFETY FEATURES:
|
||||
# - Requires root privileges
|
||||
# - Exits gracefully if RAID already exists and is mounted
|
||||
# - Performs comprehensive cleanup to avoid conflicts
|
||||
# - Forces partition table updates and waits for system recognition
|
||||
#
|
||||
# PREREQUISITES:
|
||||
# - Two NVMe drives: /dev/nvme0n1 and /dev/nvme1n1
|
||||
# - Root access
|
||||
# - mdadm, sgdisk, and standard Linux utilities
|
||||
#
|
||||
# WARNING: This script will DESTROY all data on the target drives!
|
||||
#
|
||||
# USAGE: sudo ./raid_setup.sh
|
||||
#
|
||||
#################################################################
|
||||
|
||||
# Exit on any error
|
||||
set -e
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
#listen_tls = "0"
|
||||
#listen_tcp = "0"
|
||||
#tls_port = "16514"
|
||||
#tcp_port = "16509"
|
||||
#listen_addr = "0.0.0.0"
|
||||
#unix_sock_group = "root"
|
||||
#unix_sock_ro_perms = "0777"
|
||||
#unix_sock_rw_perms = "0770"
|
||||
#unix_sock_admin_perms = "0700"
|
||||
#unix_sock_dir = "/run/libvirt"
|
||||
#auth_unix_ro = "none"
|
||||
#auth_unix_rw = "none"
|
||||
#auth_tcp = "none"
|
||||
#auth_tls = "none"
|
||||
#access_drivers = "[ \"nop\" ]"
|
||||
#key_file = "/etc/pki/libvirt/private/serverkey.pem"
|
||||
#cert_file = "/etc/pki/libvirt/servercert.pem"
|
||||
#ca_file = "/etc/pki/CA/cacert.pem"
|
||||
#crl_file = "/etc/pki/CA/crl.pem"
|
||||
#tls_no_sanity_certificate = "0"
|
||||
#tls_no_verify_certificate = "0"
|
||||
#tls_allowed_dn_list = "[\"DN1\", \"DN2\"]"
|
||||
#tls_priority = "NORMAL"
|
||||
#sasl_allowed_username_list = "[\"joe@example.com\", \"fred@example.com\"]"
|
||||
#max_clients = "5000"
|
||||
#max_queued_clients = "20"
|
||||
#max_anonymous_clients = "20"
|
||||
#min_workers = "5"
|
||||
#max_workers = "20"
|
||||
#prio_workers = "5"
|
||||
#max_client_requests = "5"
|
||||
#admin_min_workers = "1"
|
||||
#admin_max_workers = "5"
|
||||
#admin_max_clients = "5"
|
||||
#admin_max_queued_clients = "5"
|
||||
#admin_max_client_requests = "5"
|
||||
#log_level = "3"
|
||||
#log_filters = "\"1:qemu 1:libvirt 4:object 4:json 4:event 1:util\""
|
||||
#log_outputs = "\"3:syslog:libvirtd\""
|
||||
#audit_level = "1"
|
||||
#audit_logging = "0"
|
||||
#host_uuid = "00000000-0000-0000-0000-000000000000"
|
||||
#host_uuid_source = "smbios"
|
||||
#keepalive_interval = "5"
|
||||
#keepalive_count = "5"
|
||||
#keepalive_required = "0"
|
||||
#admin_keepalive_required = "0"
|
||||
#admin_keepalive_interval = "5"
|
||||
#admin_keepalive_count = "5"
|
||||
#ovs_timeout = "5"
|
||||
@@ -1,51 +0,0 @@
|
||||
#listen_tls = 0
|
||||
#listen_tcp = 1
|
||||
#tls_port = "16514"
|
||||
#tcp_port = "16509"
|
||||
#listen_addr = "192.168.0.1"
|
||||
#unix_sock_group = "libvirt"
|
||||
#unix_sock_ro_perms = "0777"
|
||||
#unix_sock_rw_perms = "0770"
|
||||
#unix_sock_admin_perms = "0700"
|
||||
#unix_sock_dir = "/run/libvirt"
|
||||
#auth_unix_ro = "polkit"
|
||||
#auth_unix_rw = "polkit"
|
||||
#auth_tcp = "sasl"
|
||||
#auth_tls = "none"
|
||||
#tcp_min_ssf = 112
|
||||
#access_drivers = [ "polkit" ]
|
||||
#key_file = "/etc/pki/libvirt/private/serverkey.pem"
|
||||
#cert_file = "/etc/pki/libvirt/servercert.pem"
|
||||
#ca_file = "/etc/pki/CA/cacert.pem"
|
||||
#crl_file = "/etc/pki/CA/crl.pem"
|
||||
#tls_no_sanity_certificate = 1
|
||||
#tls_no_verify_certificate = 1
|
||||
#tls_allowed_dn_list = ["DN1", "DN2"]
|
||||
#tls_priority="NORMAL"
|
||||
#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ]
|
||||
#max_clients = 5000
|
||||
#max_queued_clients = 1000
|
||||
#max_anonymous_clients = 20
|
||||
#min_workers = 5
|
||||
#max_workers = 20
|
||||
#prio_workers = 5
|
||||
#max_client_requests = 5
|
||||
#admin_min_workers = 1
|
||||
#admin_max_workers = 5
|
||||
#admin_max_clients = 5
|
||||
#admin_max_queued_clients = 5
|
||||
#admin_max_client_requests = 5
|
||||
#log_level = 3
|
||||
#log_filters="1:qemu 1:libvirt 4:object 4:json 4:event 1:util"
|
||||
#log_outputs="3:syslog:libvirtd"
|
||||
#audit_level = 2
|
||||
#audit_logging = 1
|
||||
#host_uuid = "00000000-0000-0000-0000-000000000000"
|
||||
#host_uuid_source = "smbios"
|
||||
#keepalive_interval = 5
|
||||
#keepalive_count = 5
|
||||
#keepalive_required = 1
|
||||
#admin_keepalive_required = 1
|
||||
#admin_keepalive_interval = 5
|
||||
#admin_keepalive_count = 5
|
||||
#ovs_timeout = 5
|
||||
@@ -1 +1 @@
|
||||
# The files in this directory (/opt/so/saltstack/local/salt/libvirt/images/sool9) are generated by createvm.sh. They are then distributed to the hypervisors where a storage pool will be created then the image can be installed.
|
||||
# The files in this directory (/opt/so/saltstack/local/salt/libvirt/images/sool9) are generated by the setup_hypervisor runner. They are then distributed to the hypervisors where a storage pool will be created then the image can be installed.
|
||||
|
||||
@@ -34,7 +34,7 @@ libvirt_conf_dir:
|
||||
libvirt_config:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/libvirt/libvirtd.conf
|
||||
- source: salt://libvirt/configstockstock
|
||||
- source: salt://libvirt/etc/libvirtd.conf
|
||||
# - source: salt://libvirt/etc/libvirtd.conf.jinja
|
||||
# - template: jinja
|
||||
# - defaults:
|
||||
|
||||
@@ -1,190 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Ensure /root/create_vm/var/lib/libvirt/images exists
|
||||
# Place this script in /root/create_vm
|
||||
# Download OL9U5_x86_64-kvm-b253.qcow2 from https://yum.oracle.com/oracle-linux-templates.html, place in /root/create_vm/
|
||||
|
||||
# These steps will be removed from the process to create the final image and is being used for development
|
||||
# This is used for the user-data auth portion of cloud-init
|
||||
# Create passwd hash:
|
||||
# python3 -c 'import crypt; print(crypt.crypt("YOUR_PASSWD_HERE", crypt.mksalt(crypt.METHOD_SHA512)))'
|
||||
# Create ssh keypair:
|
||||
# ssh-keygen -t ed25519 -C "soqemussh" -f ~/.ssh/soqemussh
|
||||
|
||||
# Run the script: createvm.sh coreol9Small 205G
|
||||
# IP options may be removed for final version
|
||||
|
||||
# After running the script, the following will be output:
|
||||
#[root@jppvirtman create_vm]# ll var/lib/libvirt/images/coreol9Small/
|
||||
#total 610376
|
||||
#-rw-r--r--. 1 root root 380928 Dec 20 14:33 coreol9Small-cidata.iso
|
||||
#-rw-r--r--. 1 root root 624623616 Dec 20 14:33 coreol9Small.qcow2
|
||||
#-rw-r--r--. 1 root root 55 Dec 20 14:32 meta-data
|
||||
#-rw-r--r--. 1 root root 333 Dec 20 14:32 network-config
|
||||
#-rw-r--r--. 1 root root 1047 Dec 20 14:32 user-data
|
||||
|
||||
# These files are now scp to a hypervisor node
|
||||
# Place the files in /var/lib/libvirt/images/coreol9Small (or whatever is the same as the vm name)
|
||||
# Create your storage pool as instructed by the script. this is only needed if one doesn't already exist
|
||||
# Run the virt-install command as instructed by the script
|
||||
|
||||
# Could add the following to the final runcmd in the user-data to fill the disk to avoid the cons of thin provisioning the disk
|
||||
# - dd if=/dev/zero of=/tmp/fill bs=1M || true
|
||||
# - rm -f /tmp/fill
|
||||
|
||||
# Exit on any error
|
||||
set -e
|
||||
|
||||
# Set variables and defaults
|
||||
VM=${1:-"small-vm"} # VM name
|
||||
DISK_SIZE=${2:-"205G"} # Disk size with unit (default 205G)
|
||||
IP=${3:-"192.168.1.10"} # IP address
|
||||
GATEWAY=${4:-"192.168.1.1"} # Gateway
|
||||
DNS=${5:-"192.168.1.1"} # Comma-separated list of DNS servers
|
||||
MAC_ADDRESS="52:54:00:f2:c3:df" # Default MAC - will be overridden if found
|
||||
|
||||
# Show usage if help is requested
|
||||
if [[ "$1" == "-h" || "$1" == "--help" ]]; then
|
||||
echo "Usage: $0 <vm_name> <disk_size> <ip> <gateway> <dns_servers>"
|
||||
echo "Example: $0 myvm 100G 192.168.1.50 192.168.1.1 8.8.8.8,8.8.4.4"
|
||||
echo "Parameters:"
|
||||
echo " vm_name : Name of the VM (default: small-vm)"
|
||||
echo " disk_size : Size of the disk with unit G/M (default: 205G)"
|
||||
echo " ip : IP address (default: 192.168.1.10)"
|
||||
echo " gateway : Gateway address (default: 192.168.1.1)"
|
||||
echo " dns_servers: Comma-separated DNS servers (default: 192.168.1.1)"
|
||||
echo "All parameters are optional and will use defaults if not specified"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Validate disk size format
|
||||
if ! [[ $DISK_SIZE =~ ^[0-9]+[GM]$ ]]; then
|
||||
echo "Error: Disk size must be a number followed by G (gigabytes) or M (megabytes)"
|
||||
echo "Example: 100G or 51200M"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Convert comma-separated DNS servers to yaml format
|
||||
format_dns() {
|
||||
local IFS=','
|
||||
local dns_list=($1)
|
||||
local yaml=""
|
||||
for dns in "${dns_list[@]}"; do
|
||||
yaml="$yaml - $dns"$'\n'
|
||||
done
|
||||
echo "$yaml"
|
||||
}
|
||||
|
||||
DNS_YAML=$(format_dns "$DNS")
|
||||
|
||||
# Set up directory structure
|
||||
D=/root/create_vm/var/lib/libvirt/images
|
||||
mkdir -vp $D/$VM
|
||||
cd $D/$VM
|
||||
|
||||
# Create cloud-init metadata
|
||||
cat > meta-data << EOF
|
||||
instance-id: ${VM}
|
||||
local-hostname: ${VM}
|
||||
EOF
|
||||
|
||||
# Create network configuration
|
||||
cat > network-config << EOF
|
||||
version: 2
|
||||
ethernets:
|
||||
eth0:
|
||||
match:
|
||||
macaddress: ${MAC_ADDRESS}
|
||||
dhcp4: false
|
||||
dhcp6: false
|
||||
addresses:
|
||||
- ${IP}/24
|
||||
routes:
|
||||
- to: 0.0.0.0/0
|
||||
via: ${GATEWAY}
|
||||
nameservers:
|
||||
addresses:
|
||||
$(format_dns "$DNS")
|
||||
EOF
|
||||
|
||||
# Create user-data with network configuration
|
||||
cat > user-data << EOF
|
||||
#cloud-config
|
||||
preserve_hostname: False
|
||||
hostname: ${VM}
|
||||
fqdn: ${VM}.local
|
||||
|
||||
users:
|
||||
- default
|
||||
- name: soqemussh
|
||||
groups: ['wheel']
|
||||
shell: /bin/bash
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
lock_passwd: false
|
||||
passwd: $(echo '___YOUR_HASH_HERE___')
|
||||
ssh-authorized-keys:
|
||||
- ssh-ed25519 ___YOUR_PUB_KEY_HERE___ soqemussh
|
||||
|
||||
# Configure where output will go
|
||||
output:
|
||||
all: ">> /var/log/cloud-init.log"
|
||||
|
||||
# configure interaction with ssh server
|
||||
ssh_genkeytypes: ['ed25519', 'rsa']
|
||||
|
||||
# set timezone for VM
|
||||
timezone: UTC
|
||||
|
||||
# Install QEMU guest agent. Enable and start the service
|
||||
packages:
|
||||
- qemu-guest-agent
|
||||
|
||||
runcmd:
|
||||
- systemctl enable --now qemu-guest-agent
|
||||
- systemctl enable --now serial-getty@ttyS0.service
|
||||
- systemctl enable --now NetworkManager
|
||||
- growpart /dev/vda 2
|
||||
- pvresize /dev/vda2
|
||||
- lvextend -l +100%FREE /dev/vg_main/lv_root
|
||||
- xfs_growfs /dev/vg_main/lv_root
|
||||
EOF
|
||||
|
||||
# First, copy the base image with progress
|
||||
echo "Creating base VM image..."
|
||||
rsync --progress /root/create_vm/OL9U5_x86_64-kvm-b253.qcow2 $VM.qcow2
|
||||
|
||||
# Resize the image to specified size
|
||||
echo "Resizing image to $DISK_SIZE..."
|
||||
echo "Current image size: $(qemu-img info $VM.qcow2 | grep 'virtual size' | cut -d':' -f2 | cut -d'(' -f1 | tr -d ' ')"
|
||||
qemu-img resize -f qcow2 $VM.qcow2 $DISK_SIZE
|
||||
echo "New image size: $(qemu-img info $VM.qcow2 | grep 'virtual size' | cut -d':' -f2 | cut -d'(' -f1 | tr -d ' ')"
|
||||
|
||||
# Now compress it with progress
|
||||
echo "Compressing image..."
|
||||
qemu-img convert -p -O qcow2 -c $VM.qcow2 $VM-compressed.qcow2
|
||||
mv -v $VM-compressed.qcow2 $VM.qcow2
|
||||
|
||||
# Create a cloud-init ISO with network config and progress indication
|
||||
echo "Creating cloud-init ISO..."
|
||||
mkisofs -output $VM-cidata.iso -volid CIDATA -rock -verbose user-data meta-data network-config
|
||||
|
||||
# Echo the configuration for verification
|
||||
echo "Creating VM with the following configuration:"
|
||||
echo "VM Name: $VM"
|
||||
echo "Disk Size: $DISK_SIZE"
|
||||
echo "IP Address: $IP"
|
||||
echo "Gateway: $GATEWAY"
|
||||
echo "DNS Servers: $DNS"
|
||||
echo "MAC Address: $MAC_ADDRESS"
|
||||
|
||||
echo "Files have been created in $D/$VM"
|
||||
echo
|
||||
echo "To complete VM creation on the hypervisor, run:"
|
||||
echo "virsh pool-create-as --name $VM --type dir --target $D/$VM"
|
||||
echo "virt-install --import --name ${VM} \\"
|
||||
echo " --memory 4096 --vcpus 4 --cpu host \\"
|
||||
echo " --disk ${VM}.qcow2,format=qcow2,bus=virtio \\"
|
||||
echo " --disk ${VM}-cidata.iso,device=cdrom \\"
|
||||
echo " --network bridge=br0,model=virtio,mac=${MAC_ADDRESS} \\"
|
||||
echo " --os-variant=ol9.5 \\"
|
||||
echo " --noautoconsole"
|
||||
@@ -1,144 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Ensure /root/create_vm/var/lib/libvirt/images exists
|
||||
# Place this script in /root/create_vm
|
||||
# Download OL9U5_x86_64-kvm-b253.qcow2 from https://yum.oracle.com/oracle-linux-templates.html, place in /root/create_vm/
|
||||
|
||||
# These steps will be removed from the process to create the final image and is being used for development
|
||||
# This is used for the user-data auth portion of cloud-init
|
||||
# Create passwd hash:
|
||||
# python3 -c 'import crypt; print(crypt.crypt("YOUR_PASSWD_HERE", crypt.mksalt(crypt.METHOD_SHA512)))'
|
||||
# Create ssh keypair:
|
||||
# ssh-keygen -t ed25519 -C "soqemussh" -f ~/.ssh/soqemussh
|
||||
|
||||
# Run the script: createbase.sh coreol9Small 20G
|
||||
# After running the script, the following will be output:
|
||||
#[root@jppvirtman create_vm]# ll var/lib/libvirt/images/coreol9Small/
|
||||
#total 610376
|
||||
#-rw-r--r--. 1 root root 380928 Dec 20 14:33 coreol9Small-cidata.iso
|
||||
#-rw-r--r--. 1 root root 624623616 Dec 20 14:33 coreol9Small.qcow2
|
||||
#-rw-r--r--. 1 root root 55 Dec 20 14:32 meta-data
|
||||
#-rw-r--r--. 1 root root 1047 Dec 20 14:32 user-data
|
||||
|
||||
# These files are now scp to a hypervisor node
|
||||
# Place the files in /var/lib/libvirt/images/coreol9Small (or whatever is the same as the vm name)
|
||||
# Create your storage pool as instructed by the script if one doesn't already exist
|
||||
# Run the virt-install command as instructed by the script
|
||||
|
||||
# Exit on any error
|
||||
set -e
|
||||
|
||||
# Set variables and defaults
|
||||
VM=${1:-"base-vm"} # VM name
|
||||
DISK_SIZE=${2:-"220G"} # Disk size with unit (default 20G)
|
||||
|
||||
# Show usage if help is requested
|
||||
if [[ "$1" == "-h" || "$1" == "--help" ]]; then
|
||||
echo "Usage: $0 <vm_name> <disk_size>"
|
||||
echo "Example: $0 myvm 20G"
|
||||
echo "Parameters:"
|
||||
echo " vm_name : Name of the VM (default: base-vm)"
|
||||
echo " disk_size : Size of the disk with unit G/M (default: 20G)"
|
||||
echo "All parameters are optional and will use defaults if not specified"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Validate disk size format
|
||||
if ! [[ $DISK_SIZE =~ ^[0-9]+[GM]$ ]]; then
|
||||
echo "Error: Disk size must be a number followed by G (gigabytes) or M (megabytes)"
|
||||
echo "Example: 20G or 20480M"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Set up directory structure
|
||||
#D=/root/create_vm/var/lib/libvirt/images
|
||||
D=/opt/so/saltstack/local/salt/libvirt/images
|
||||
mkdir -vp $D/$VM
|
||||
cd $D/$VM
|
||||
|
||||
# Create cloud-init metadata
|
||||
cat > meta-data << EOF
|
||||
instance-id: ${VM}
|
||||
local-hostname: ${VM}
|
||||
EOF
|
||||
|
||||
# Create user-data configuration
|
||||
cat > user-data << EOF
|
||||
#cloud-config
|
||||
preserve_hostname: False
|
||||
hostname: ${VM}
|
||||
fqdn: ${VM}.local
|
||||
|
||||
users:
|
||||
- default
|
||||
- name: soqemussh
|
||||
groups: ['wheel']
|
||||
shell: /bin/bash
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
lock_passwd: false
|
||||
passwd: $(echo '___YOUR_HASH_HERE___')
|
||||
ssh-authorized-keys:
|
||||
- ssh-ed25519 ___YOUR_PUB_KEY_HERE___ soqemussh
|
||||
|
||||
# Configure where output will go
|
||||
output:
|
||||
all: ">> /var/log/cloud-init.log"
|
||||
|
||||
# configure interaction with ssh server
|
||||
ssh_genkeytypes: ['ed25519', 'rsa']
|
||||
|
||||
# set timezone for VM
|
||||
timezone: UTC
|
||||
|
||||
# Install QEMU guest agent. Enable and start the service
|
||||
packages:
|
||||
- qemu-guest-agent
|
||||
|
||||
runcmd:
|
||||
- systemctl enable --now qemu-guest-agent
|
||||
- systemctl enable --now serial-getty@ttyS0.service
|
||||
- systemctl enable --now NetworkManager
|
||||
- growpart /dev/vda 2
|
||||
- pvresize /dev/vda2
|
||||
- lvextend -l +100%FREE /dev/vg_main/lv_root
|
||||
- xfs_growfs /dev/vg_main/lv_root
|
||||
- touch /etc/cloud/cloud-init.disabled
|
||||
- shutdown -P now
|
||||
EOF
|
||||
|
||||
# First, copy the base image with progress
|
||||
echo "Creating base VM image..."
|
||||
rsync --progress /root/create_vm/OL9U5_x86_64-kvm-b253.qcow2 $VM.qcow2
|
||||
|
||||
# Resize the image to specified size
|
||||
echo "Resizing image to $DISK_SIZE..."
|
||||
echo "Current image size: $(qemu-img info $VM.qcow2 | grep 'virtual size' | cut -d':' -f2 | cut -d'(' -f1 | tr -d ' ')"
|
||||
qemu-img resize -f qcow2 $VM.qcow2 $DISK_SIZE
|
||||
echo "New image size: $(qemu-img info $VM.qcow2 | grep 'virtual size' | cut -d':' -f2 | cut -d'(' -f1 | tr -d ' ')"
|
||||
|
||||
# Now compress it with progress
|
||||
echo "Compressing image..."
|
||||
qemu-img convert -p -O qcow2 -c $VM.qcow2 $VM-compressed.qcow2
|
||||
mv -v $VM-compressed.qcow2 $VM.qcow2
|
||||
|
||||
# Create a cloud-init ISO with progress indication
|
||||
echo "Creating cloud-init ISO..."
|
||||
mkisofs -output $VM-cidata.iso -volid CIDATA -rock -verbose user-data meta-data
|
||||
|
||||
# Echo the configuration for verification
|
||||
echo "Creating VM with the following configuration:"
|
||||
echo "VM Name: $VM"
|
||||
echo "Disk Size: $DISK_SIZE"
|
||||
|
||||
echo "Files have been created in $D/$VM"
|
||||
echo
|
||||
echo "To complete VM creation on the hypervisor, run:"
|
||||
echo "virsh pool-create-as --name $VM --type dir --target $D/$VM"
|
||||
echo "virt-install --name ${VM} \\"
|
||||
echo " --memory 4096 --vcpus 4 --cpu host \\"
|
||||
echo " --disk ${VM}.qcow2,format=qcow2,bus=virtio \\"
|
||||
echo " --disk ${VM}-cidata.iso,device=cdrom \\"
|
||||
echo " --network bridge=br0,model=virtio \\"
|
||||
echo " --os-variant=ol9.5 \\"
|
||||
echo " --import \\"
|
||||
echo " --noautoconsole"
|
||||
@@ -1,7 +0,0 @@
|
||||
post_setup_cron:
|
||||
cron.present:
|
||||
- name: 'PATH=$PATH:/usr/sbin salt-call state.highstate'
|
||||
- identifier: post_setup_cron
|
||||
- user: root
|
||||
- minute: '*/5'
|
||||
- identifier: post_setup_cron
|
||||
Reference in New Issue
Block a user