Catching up with current dev

This commit is contained in:
DarkFeather 2022-04-19 12:01:03 -05:00
parent a881363b9b
commit d1140cf78b
Signed by: DarkFeather
GPG Key ID: 1CC1E3F4ED06F296
15 changed files with 423 additions and 109 deletions

View File

@ -20,10 +20,9 @@ ansible-playbook -i your-inventory.yml playbooks/sshkey.yml
ansible-playbook -i your-inventory.yml playbooks/deploy.yml
```
For convenience, we recommend adding the following alias to your .bashrc or .bashrc.local.
```
alias deploy="cd ~/src/Ubiqtorate; ansible-playbook -i examples/msn0.yml playbooks/deploy.yml; cd -"
```
We've also added two scripts in `./bin` to make your life easier:
* `full-deploy`: This is the general role. If you are creating an AniNIX replica, once you have your inventory and vault populated, then you can run this script to push everything. This is also optimal when rotating vault secrets or other global tasks. This is effectively standardizing invocation of our overall deployment playbook.
* `deploy-role`: When you are updating a specific role, use this script to push that role to your group. Ideally, this should only be used to push a role that you have been working on to a target group in your inventory that's already tagged for the role in the deployment playbook.
Happy hacking!
@ -31,6 +30,3 @@ Happy hacking!
Some services, such as AniNIX/Sharingan and AniNIX/Geth, store their configuration in internal datastructures and databases such that we cannot easily export our build for others to use. We will document what we have done for each of these as best we can in the README.md files for others to replicate. Backups of these services into AniNIX/Aether are therefore dumps of these databases and not available to share.

49
bin/deploy-role Executable file
View File

@ -0,0 +1,49 @@
#!/bin/bash
# Role is first argument
role="$1"
if [ -z "$role" ]; then
echo Need a role as first argument.
exit 1
fi
# Handle verbosity
if [ "$1" == "-v" ]; then
set -x
shift
role="$1"
fi
# Handle usage
if [ "$role" == "-h" ] || [ "$role" == "--help" ]; then
echo "Usage: $0 -h"
echo " $0 \$role \$targetgroup [\$optional_inventory]"
exit 0
fi
# Find the root of the git clone
while [ ! -d .git ]; do
cd ..
if [ "$PWD" == '/' ]; then
echo "This needs to be run from the Ubiqtorate checkout"
exit 3
fi
done
# Get the targetgroup
targetgroup="$2"
if [ -z "$targetgroup" ]; then
echo Need a group
exit 2
fi
# Allow an inventory override
inventory="$3"
if [ -z "$inventory" ]; then
inventory=examples/msn0.yml
fi
# Invoke the one-role playbook for the role on the targetgroup
ansible-playbook -i "$inventory" -e "role=$role" -e "targets=$targetgroup" playbooks/one-role.yml
# and return the exit status
exit $?

24
bin/full-deploy Executable file
View File

@ -0,0 +1,24 @@
#!/bin/bash
# Arguments
inventory="$1"
if [ "$inventory" == "-h" ] || [ "$inventory" == "--help" ]; then
echo "Usage: $0 -h # Usage"
echo " $0 # Run a complete deployment."
exit 0
elif [ -z "$inventory" ]; then
inventory="examples/msn0.yml"
fi
# Find the root of the git clone
while [ ! -d .git ]; do
cd ..
if [ "$PWD" == '/' ]; then
echo "This needs to be run from the Ubiqtorate checkout"
exit 3
fi
done
ansible-playbook -i examples/msn0.yml playbooks/deploy.yml

95
bin/generate-systemd-vms.py Executable file
View File

@ -0,0 +1,95 @@
#!/usr/bin/env python3
# File: generate-systemd-vms.py
#
# Description: This file generates the systemd.service files that run our VM's
#
# Package: AniNIX/Ubiqtorate
# Copyright: WTFPL
#
# Author: DarkFeather <darkfeather@aninix.net>
import os
import shutil
import sys
import yaml
filepath="roles/Node/files/vm-definitions/"
def WriteVMFile(content,hosttype,hostclass):
### Create the service files for the hosts
# param content: the yaml content to parse
# param hosttype: managed or unmanaged
# param hostclass: the type of host as classified in the yaml
global filepath
for host in content['all']['children'][hosttype]['children'][hostclass]['hosts']:
cores = 0
memory = 0
vnc = 0
disks = ''
mac = ''
bridge = ''
# Make sure the host definition has all the critera
try:
cores = str(content['all']['children'][hosttype]['children'][hostclass]['hosts'][host]['cores'])
memory = str(content['all']['children'][hosttype]['children'][hostclass]['hosts'][host]['memory'])
vnc = str(content['all']['children'][hosttype]['children'][hostclass]['hosts'][host]['vnc'])
disks = ' '.join(content['all']['children'][hosttype]['children'][hostclass]['hosts'][host]['disks'])
mac = content['all']['children'][hosttype]['children'][hostclass]['hosts'][host]['mac']
bridge = content['all']['children'][hosttype]['children'][hostclass]['hosts'][host]['bridge']
except Exception as e:
print('Host ' + host + " doesn't have the attributes needed to be a VM -- skipping.")
print(e)
1 == 1
# Write the file.
with open(filepath+host+'-vm.service','w') as vmfile:
vmfile.write('[Unit]\n')
vmfile.write('Description=AniNIX/' + host + '\n')
vmfile.write('After=network.target\n')
vmfile.write('\n')
vmfile.write('[Service]\n')
vmfile.write('ExecStart=/usr/sbin/qemu-system-x86_64 -name AniNIX/' + host + ' -machine type=pc,accel=kvm')
if 'uefi' in content['all']['children'][hosttype]['children'][hostclass]['hosts'][host].keys(): vmfile.write(' -bios /usr/share/edk2-ovmf/x64/OVMF.fd')
vmfile.write(' -cpu qemu64 -smp ' + cores + ' ' + disks + ' -net nic,macaddr=' + mac + ',model=virtio -net bridge,br=' + bridge + ' -vga std -nographic -vnc :' + str(vnc) + ' -m size=' + str(memory) + 'G -device virtio-rng-pci\n')
vmfile.write('ExecReload=/bin/kill -HUP $MAINPID\n')
vmfile.write('KillMode=process\n')
vmfile.write('Restart=always\n')
vmfile.write('User=root\n')
vmfile.write('Group=root\n')
vmfile.write('\n')
vmfile.write('[Install]\n')
vmfile.write('WantedBy=multi-user.target\n')
print(host+'-vm.service')
def GenerateFiles(file):
### Open the file and parse it
# param file: the file to work on
global filepath
try:
shutil.rmtree(filepath)
except:
1 == 1
finally:
os.mkdir(filepath)
# Parse the yaml
with open(file, 'r') as stream:
content = yaml.safe_load(stream)
# Add service files for each host
WriteVMFile(content,'managed','virtual')
WriteVMFile(content,'unmanaged','ovas')
#WriteVMFile(content,'unmanaged','appliances')
if __name__ == '__main__':
if len(sys.argv) != 2:
print("You need to supply an inventory file.")
sys.exit(1)
GenerateFiles(sys.argv[1])
sys.exit(0)

18
bin/reverse-copy Normal file
View File

@ -0,0 +1,18 @@
#!/bin/bash
if [ "$USER" != root ]; then
sudo $0 $@
exit 0
fi
grep -A 2 copy: tasks/main.yml | tr '\n' ' ' | sed 's/--/\n/g' | while read copyline; do
dest="$(echo "$copyline" | sed 's/ /\n/g' | grep src: | awk '{ print $2; }' )"
src="$(echo "$copyline" | sed 's/ /\n/g' | grep dest: | awk '{ print $2; }' )"
if [ -d "$src" ]; then
cp -r "$src"/* files/"$dest"
else
cp -r "$src" files/"$dest"
fi
chown -R "$SUDO_USER": files/"$dest"
done

91
bin/tmux-hosts Executable file
View File

@ -0,0 +1,91 @@
#!/bin/bash
# File: tmux-hosts
#
# Description: This script allows you to open groups of hosts in 2x2 tmux panes
#
# Package: AniNIX/Ubiqtorate
# Copyright: WTFPL
#
# Author: DarkFeather <ircs://irc.aninix.net:6697/DarkFeather>
# Sanity
set -Eo pipefail
# Defaults
group=all
offset=0
unset inventory
function usage() {
# Show helptext
# param retcode: what to exit
retcode="$1"
echo "Usage: $0 [ -o offset ] [-g group ] -i inventory.yml"
echo " $0 -h"
echo "Group is optional -- add it if you only want to look at a specific subset."
echo "Add -v for verbosity."
exit "$retcode"
}
function tmuxHosts() {
# Open hosts in Tmux -- ported from pnp/misc-scripts.git geotmux
# param host1: the first host
# param host2: the second host
# param host3: the third host
# param host4: the fourth host
host1="$1"
host2="$2"
host3="$3"
host4="$4"
name="ansible-tmux-$offset"
# If no TMUX session started, then add one with four panes.
if [ -z "$TMUX" ]; then
tmux new-session -s "$name" -d "/bin/bash -l -c ssh\\ $host1"
tmux select-window -t "$name":0
tmux split-window "/bin/bash -l -c ssh\\ $host2"
tmux split-window -h -t 0 "/bin/bash -l -c ssh\\ $host3"
tmux select-window -t "$name":1
tmux split-window -h -t 2 "/bin/bash -l -c ssh\\ $host4"
tmux setw synchronize-panes
tmux a -d -t "$name"
# Otherwise, add a new window to the current session with all four sessions.
else
tmux new-window -n "$name" "/bin/bash -l -c ssh\\ $host1"
tmux select-window -t "$name"
tmux split-window "/bin/bash -l -c ssh\\ $host2"
tmux select-window -t "$name"
tmux split-window -h -t 0 "/bin/bash -l -c ssh\\ $host3"
tmux select-window -t "$name"
tmux split-window -h -t 2 "/bin/bash -l -c ssh\\ $host4"
tmux setw synchronize-panes
tmux select-window -t "$name"
fi
}
# main
if [ "$(basename $0)" == "tmux-hosts" ]; then
while getopts 'g:hi:o:v' OPTION; do
case "${OPTION}" in
g) group="${OPTARG}" ;;
h) echo Open Ansible hosts in TMUX panes.; usage 0 ;;
i) inventory="${OPTARG}" ;;
o) offset="${OPTARG}" ;;
v) set -x ;;
*) usage 1 ;;
esac
done
if [ -z "$inventory" ]; then
echo Need an inventory.
usage 2;
fi
tmuxHosts $(ansible -i "$inventory" --list-hosts "$group"\
| grep -v hosts\ \( \
| sed 's/\s\+//g' \
| if [ $offset -gt 0 ]; then head -n -"${offset}"; else cat; fi \
| head -n 4 \
| tr '\n' ' ')
fi

View File

@ -2,16 +2,16 @@ all:
vars:
# Environment-wide data
external_domain: AniNIX.net
replica_domain: "MSN0.{{ external_domain }}"
replica_domain: "MSN0.AniNIX.net"
time_zone: "America/Chicago"
# Services used by all
router: 10.0.1.1
netmask: 24
dns: 10.0.1.2
dhcprange: '10.0.1.224,10.0.1.254,255.255.255.0,12h'
staticrange: '10.0.1.1,10.0.1.223,255.255.255.0,12h'
logserver: "Sharingan.{{ replica_domain }}"
ldapserver: "Core.{{ replica_domain }}"
dns: "10.0.1.2"
logserver: "10.0.1.16"
ldapserver: "10.0.1.3"
# Standards
daemon_shell: /sbin/nologin
user_shell: /bin/bash
@ -36,11 +36,12 @@ all:
mac: 00:25:90:0d:6e:86
static: true
sslidentity: aninix.net-0001
secdetection: true
Node0:
ipinterface: enp1s0f0
ip: 10.0.1.4
mac: DE:8B:9E:19:55:1D
static: true
tap: true
virtual: # 10.0.1.16/28
vars:
hosts:
@ -48,12 +49,14 @@ all:
ip: 10.0.1.16
ipinterface: ens3
mac: 00:15:5D:01:02:10
cores: 6
memory: 6
cores: 4
memory: 4
vnc: 8
bridge: br0
uefi: true
siem: true
disks:
- '-drive format=qcow2,l2-cache-size=8M,file=/srv/maat/vm/Sharingan.qcow2'
- '-drive format=raw,index=0,media=disk,file=/dev/sdd'
DarkNet:
ip: 10.0.1.17
ipinterface: eth0
@ -63,7 +66,7 @@ all:
vnc: 9
bridge: br0
disks:
- '-drive if=none,id=disk0,cache=none,format=raw,aio=native,file=/dev/sdb'
- '-drive format=raw,index=0,media=disk,file=/dev/sdb'
Maat:
ip: 10.0.1.18
ipinterface: ens3
@ -74,6 +77,37 @@ all:
vnc: 7
disks:
- '-drive format=qcow2,l2-cache-size=8M,file=/srv/maat/vm/Maat.qcow2'
test1:
ip: 10.0.1.19
ipinterface: ens3
mac: 00:15:5d:01:02:06
cores: 2
memory: 2
bridge: br0
vnc: 6
disks:
- '-drive format=qcow2,l2-cache-size=8M,file=/srv/maat/vm/test1.qcow2'
test2:
ip: 10.0.1.20
ipinterface: ens3
mac: 00:15:5d:01:02:05
cores: 2
memory: 2
bridge: br0
vnc: 5
disks:
- '-drive format=qcow2,l2-cache-size=8M,file=/srv/maat/vm/test2.qcow2'
test3:
ip: 10.0.1.21
ipinterface: ens3
mac: 00:15:5d:01:02:04
cores: 2
memory: 2
bridge: br0
vnc: 4
disks:
- '-drive format=qcow2,l2-cache-size=8M,file=/srv/maat/vm/test3.qcow2'
geth_hubs: # 10.0.1.32/28
vars:
motion_enabled: yes
@ -81,13 +115,16 @@ all:
Geth-Hub-1:
ip: 10.0.1.32
mac: 84:16:F9:14:15:C5
rotate: 0
Geth-Hub-2:
ip: 10.0.1.33
mac: 84:16:F9:13:B6:E6
motion_enabled: no
rotate: 180
Geth-Hub-3:
ip: 10.0.1.34
mac: b8:27:eb:60:73:68
rotate: 90
unmanaged:
children:
ovas: # 10.0.1.48/28
@ -108,6 +145,7 @@ all:
memory: 2
vnc: 6
bridge: br0
uefi: true
disks:
- '-drive format=qcow2,l2-cache-size=8M,file=/srv/maat/vm/hassos_ova-5.13.qcow2'
DedNet:
@ -166,10 +204,10 @@ all:
Dedsec:
ip: 10.0.1.73
mac: 34:F6:4B:36:12:8F
"Core.Console":
"Core-Console":
ip: 10.0.1.74
mac: 00:25:90:0D:82:5B
"Node0.Console":
"Node0-Console":
ip: 10.0.1.75
mac: 00:25:90:3E:C6:8C
# dhcp build space: 10.0.1.224/27

View File

@ -4,24 +4,50 @@
# This playbook details how an entire datacenter should be deployed
#
# Parameters:
# threads: Number of threads to use; default is 8.
- hosts: managed
# threads: Number of threads to use; default is 16.
#
- hosts: Nazara
order: sorted
serial: "{{ threads | default('8') }}"
serial: "{{ threads | default('16') }}"
gather_facts: true
ignore_unreachable: true
vars_files:
- "{{ lookup('env', 'ANSIBLE_VAULT_FILE') }}"
vars:
- ansible_password: "{{ passwords[inventory_hostname] }}"
- ansible_become_password: "{{ passwords[inventory_hostname] }}"
roles:
- basics
- SSH
- Sharingan-Data
- Nazara
- hosts: geth-hubs
- hosts: managed
order: sorted
serial: "{{ threads | default('8') }}"
serial: "{{ threads | default('16') }}"
gather_facts: true
ignore_unreachable: true
vars_files:
- "{{ lookup('env', 'ANSIBLE_VAULT_FILE') }}"
vars: # This is the only segment that should need these variables, as the basics role should take care of sudo and the SSH key.
- ansible_password: "{{ passwords[inventory_hostname] }}"
- ansible_become_password: "{{ passwords[inventory_hostname] }}"
roles:
- ShadowArch
- SSH
- Sharingan
- hosts: Core
order: sorted
serial: "{{ threads | default('16') }}"
gather_facts: true
ignore_unreachable: true
vars_files:
- "{{ lookup('env', 'ANSIBLE_VAULT_FILE') }}"
roles:
- hardware
- SSL
- hosts: geth_hubs
order: sorted
serial: "{{ threads | default('16') }}"
gather_facts: true
ignore_unreachable: true
vars_files:
@ -29,16 +55,14 @@
roles:
- Geth-Hub
- hosts: Node0
order: sorted
serial: "{{ threads | default('16') }}"
gather_facts: true
ignore_unreachable: true
vars_files:
- "{{ lookup('env', 'ANSIBLE_VAULT_FILE') }}"
roles:
- hardware
- Node
# - hosts: Core
# order: sorted
# serial: "{{ threads | default('8') }}"
# gather_facts: true
# ignore_unreachable: true
# vars_files:
# - "{{ lookup('env', 'ANSIBLE_VAULT_FILE') }}"
# vars:
# roles:
# - Yggdrasil
# - WebServer
# - TheRaven

View File

@ -9,13 +9,12 @@
#
#
# Patch then restart a node
- hosts: "{{ targetlist | default('all') }}"
- hosts: "{{ targets | default('geth_hubs') }}"
order: sorted
ignore_unreachable: true
serial: 1
vars:
ansible_become: yes
ansible_become_user: root
ansible_become_method: sudo
oldmajor: stretch
newmajor: buster

View File

@ -20,6 +20,7 @@
vars:
ansible_ssh_port: "{{ sshport | default('22') }}"
therole: "{{ role | default('Uptime') }}"
ansible_become_password: "{{ passwords[inventory_hostname] }}"
vars_files:
- "{{ lookup('env', 'ANSIBLE_VAULT_FILE') }}"

View File

@ -4,7 +4,7 @@
# Variables:
# - hosts: what hosts in the inventory to use
# - threads: how many to check in parallel
- hosts: "{{ hosts | default('all') }}"
- hosts: "{{ hosts | default('managed') }}"
order: sorted
serial: "{{ threads | default('4') }}"
ignore_unreachable: true

View File

@ -7,44 +7,26 @@
# - target: the host grouper in the inventory -- default: all
#
# Patch then restart a node
- hosts: "{{ target | default('all') }}"
- hosts: Node0
order: sorted
ignore_unreachable: true
serial: 1
vars:
ansible_become: yes
ansible_become_user: root
ansible_become_method: sudo
vars_files:
- "{{ lookup('env', 'ANSIBLE_VAULT_FILE') }}"
tasks:
- name: Check /var free percentage
command: /bin/bash -c "df -m /var | tail -n 1 | awk '{ print $5; }' | sed 's/%//' "
become: no
register: df_output
- name: Verify /var space
assert:
that:
- 90 > {{ df_output.stdout }}
fail_msg: "Not enough free space"
- name: Patching all packages (ArchLinux)
ignore_errors: yes
when: ansible_os_family == "Archlinux"
pacman:
upgrade: yes
update_cache: yes
- name: Patching all packages (Debian)
ignore_errors: yes
when: ansible_os_family == "Debian"
apt:
upgrade: yes
update_cache: yes
- name: Reboot
ignore_errors: yes
reboot:
reboot_timeout: 2
roles:
- patching
- hosts: managed
order: sorted
ignore_unreachable: true
serial: 4
vars:
ansible_become: yes
ansible_become_method: sudo
vars_files:
- "{{ lookup('env', 'ANSIBLE_VAULT_FILE') }}"
roles:
- patching

View File

@ -13,53 +13,27 @@
#
- hosts: "{{ targets | default('managed') }}"
order: sorted
serial: "{{ threads | default('1') }}"
gather_facts: false
serial: "{{ threads | default('8') }}"
gather_facts: true
ignore_unreachable: true
vars:
ansible_ssh_password: "{{ passwords[inventory_hostname] }}"
ansible_ssh_port: "{{ sshport | default('22') }}"
keyfile: "{{ pubkey | default(lookup('env','HOME') + '/.ssh/id_ed25519.pub') }}"
vars_files:
- "{{ lookup('env', 'ANSIBLE_VAULT_FILE') }}"
tasks:
# Scanning SSH keys has been replaced with ../bin/generate-ssh-keyscan
- name: Get key
delegate_to: localhost
command: "cat {{ keyfile }}"
register: key
- name: Ensure known_hosts is commented
delegate_to: localhost
lineinfile:
dest: ~/.ssh/known_hosts
create: yes
state: present
line: "# {{ inventory_hostname + '.' + replica_domain }}"
# Thanks to https://gist.github.com/shirou/6928012
- name: Ensure ssh host RSA key known
delegate_to: localhost
lineinfile:
dest: ~/.ssh/known_hosts
create: yes
state: present
line: "{{ ip + ',' + inventory_hostname + '.' + replica_domain + ',' + lookup('pipe', 'ssh-keyscan -trsa -p' + ansible_ssh_port + ' ' + inventory_hostname) }}"
# Thanks to https://gist.github.com/shirou/6928012
- name: Ensure ssh host ED25519 key known
delegate_to: localhost
lineinfile:
dest: ~/.ssh/known_hosts
create: yes
state: present
line: "{{ ip + ',' + inventory_hostname + '.' + replica_domain + ',' + lookup('pipe', 'ssh-keyscan -ted25519 -p' + ansible_ssh_port + ' ' + inventory_hostname) }}"
- authorized_key:
user: "{{ depriv_user }}"
user: "{{ ansible_user_id }}"
key: "{{ key.stdout }}"
state: present
exclusive: true
name: "Pass authorized key"
vars:
ansible_ssh_password: "{{ vars['passwords'][inventory_hostname] }}"

View File

@ -25,5 +25,4 @@
tasks:
- debug:
msg: "{{ lookup('vars', variablename) }}"
msg: "{{ lookup('vars',variablename) }}"

View File

@ -0,0 +1,24 @@
- name: Check /var free percentage
command: /bin/bash -c "df -m /var | tail -n 1 | awk '{ print $5; }' | sed 's/%//' "
become: no
register: df_output
- name: Verify /var space
assert:
that:
- 90 > {{ df_output.stdout }}
fail_msg: "Not enough free space"
- name: Patching all packages
ignore_errors: yes
register: updates
package:
upgrade: yes
update_cache: yes
- name: Reboot
ignore_errors: yes
when: '"linux" in updates.stdout or "kernel" in updates.stdout'
reboot:
reboot_timeout: 2