This commit is contained in:
DarkFeather 2023-10-08 12:28:14 -05:00
parent 5ab88dc387
commit ea75da1b41
Signed by: DarkFeather
GPG Key ID: 1CC1E3F4ED06F296
26 changed files with 257 additions and 134 deletions

2
.gitignore vendored
View File

@ -1,8 +1,10 @@
# Generated files
roles/Node/files/*-vm.service roles/Node/files/*-vm.service
roles/Nazara/files/dns roles/Nazara/files/dns
roles/Nazara/files/dhcp roles/Nazara/files/dhcp
roles/Node/files/vm-definitions/** roles/Node/files/vm-definitions/**
roles/ShadowArch/files/mirrorlist roles/ShadowArch/files/mirrorlist
roles/Sharingan/files/monit/checks/availability
roles/Foundation/files/custom/public/img/** roles/Foundation/files/custom/public/img/**
venv/** venv/**
**/pkg/** **/pkg/**

View File

@ -33,8 +33,7 @@ done
# Get the targetgroup # Get the targetgroup
targetgroup="$2" targetgroup="$2"
if [ -z "$targetgroup" ]; then if [ -z "$targetgroup" ]; then
echo Need a group targetgroup="$role" # Deploy a role to the server named for that function
exit 2
fi fi
# Allow an inventory override # Allow an inventory override

0
bin/generate-mirrorlist Normal file → Executable file
View File

83
bin/generate-monitoring.py Executable file
View File

@ -0,0 +1,83 @@
#!/usr/bin/env python3
# File: generate-pihole-dns-dhcp.py
#
# Description: This file generates the DNS and DHCP files for pihole.
#
# Package: AniNIX/Ubiqtorate
# Copyright: WTFPL
#
# Author: DarkFeather <darkfeather@aninix.net>
import os
import subprocess
import sys
import yaml
rolepath='../roles/Sharingan/files'
monfilepath=rolepath+"/monit/checks/availability"
def WriteMonitoringEntry(content,hosttype,hostclass):
### Create the ping-based monitoring entry
# param content: the yaml content to parse
# param hosttype: managed or unmanaged
# param hostclass: the type of host as classified in the yaml
global monfile
with open(monfilepath,'a') as monfile:
# Write host entries
for host in content['all']['children'][hosttype]['children'][hostclass]['hosts']:
try:
hostname= host + '.' + content['all']['vars']['replica_domain']
monfile.write('check program ' + host + '_ping_mon with path "/usr/lib/monitoring-plugins/check_ping -H ' + hostname + ' -w 100,50% -c 1000,100% -p 3 -t 60 -4"\n')
monfile.write(' if status != 0 for 3 times within 5 cycles then exec "/etc/monit.d/scripts/critical ' + hostname + ' is not online."\n\n')
except:
print(host + ' is not complete for monitoring.')
def WriteSSHMonitoringEntry(content,hosttype,hostclass):
### Create the ping-based monitoring entry
# param content: the yaml content to parse
# param hosttype: managed or unmanaged
# param hostclass: the type of host as classified in the yaml
global monfile
with open(monfilepath,'a') as monfile:
# Write host entries
for host in content['all']['children'][hosttype]['children'][hostclass]['hosts']:
try:
hostname= host + '.' + content['all']['vars']['replica_domain']
monfile.write('check program ' + host + '_ssh_mon with path "/usr/lib/monitoring-plugins/check_ssh -H ' + hostname + '"\n')
monfile.write(' if status != 0 for 3 times within 5 cycles then exec "/etc/monit.d/scripts/critical ' + hostname + ' is not responding to SSH."\n\n')
except:
print(host + ' is not complete for monitoring.')
def GenerateFiles(file):
### Open the file and parse it
# param file: the file to work on
global monfilepath
if not os.path.isdir(rolepath):
os.mkdir(rolepath)
# Parse the yaml
with open(file, 'r') as stream:
content = yaml.safe_load(stream)
if os.path.isfile(monfilepath): os.remove(monfilepath)
# Add DNS entries for each host
hosttype = 'managed'
for hostclass in ['physical','virtual','geth_hubs']:
WriteMonitoringEntry(content,hosttype,hostclass)
WriteSSHMonitoringEntry(content,hosttype,hostclass)
hosttype = 'unmanaged'
for hostclass in ['ovas','appliances']:
WriteMonitoringEntry(content,hosttype,hostclass)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("You need to supply an inventory file.")
sys.exit(1)
GenerateFiles(sys.argv[1])
sys.exit(0)

View File

@ -74,7 +74,7 @@ def GenerateFiles(file):
WriteDNSEntry(content,hosttype,hostclass) WriteDNSEntry(content,hosttype,hostclass)
WriteDHCPEntry(content,hosttype,hostclass) WriteDHCPEntry(content,hosttype,hostclass)
hosttype = 'unmanaged' hosttype = 'unmanaged'
for hostclass in ['ovas','appliances','iot']: for hostclass in ['ovas','test_ovas','appliances','adhoc_appliances','iot']:
WriteDNSEntry(content,hosttype,hostclass) WriteDNSEntry(content,hosttype,hostclass)
WriteDHCPEntry(content,hosttype,hostclass) WriteDHCPEntry(content,hosttype,hostclass)

View File

@ -84,8 +84,8 @@ def GenerateFiles(file):
# Add service files for each host # Add service files for each host
WriteVMFile(content,'managed','virtual') WriteVMFile(content,'managed','virtual')
WriteVMFile(content,'unmanaged','ovas') WriteVMFile(content,'unmanaged','ovas',
#WriteVMFile(content,'unmanaged','appliances') WriteVMFile(content,'unmanaged','test_ovas')
if __name__ == '__main__': if __name__ == '__main__':
if len(sys.argv) != 2: if len(sys.argv) != 2:

View File

@ -71,6 +71,8 @@ all:
siem: true siem: true
disks: disks:
- '-drive format=raw,index=0,media=disk,file=/dev/sdb' - '-drive format=raw,index=0,media=disk,file=/dev/sdb'
# On hold because of https://aninix.net/DarkFeather/MSN0/issues/6
holdpkg: "elasticsearch graylog mongodb44-bin mongodb-tools-bin"
DarkNet: DarkNet:
ipinterface: ens3 ipinterface: ens3
ip: 10.0.1.17 ip: 10.0.1.17
@ -110,17 +112,9 @@ all:
rotate: 90 rotate: 90
unmanaged: unmanaged:
children: children:
# Both OVA groups are in the same subnet -- test_ovas aren't monitored
ovas: # 10.0.1.48/28 ovas: # 10.0.1.48/28
hosts: hosts:
TDS-Jump:
ip: 10.0.1.48
mac: 00:15:5d:01:02:08
cores: 2
memory: 2
vnc: 4
bridge: br0
disks:
- '-drive format=qcow2,l2-cache-size=8M,file=/srv/maat/vm/TDSJump.qcow2'
Geth: Geth:
ip: 10.0.1.49 ip: 10.0.1.49
mac: DE:8B:9E:19:55:1E mac: DE:8B:9E:19:55:1E
@ -131,6 +125,17 @@ all:
uefi: true uefi: true
disks: disks:
- '-drive format=qcow2,l2-cache-size=8M,file=/srv/maat/vm/hassos_ova-5.13.qcow2' - '-drive format=qcow2,l2-cache-size=8M,file=/srv/maat/vm/hassos_ova-5.13.qcow2'
test_ovas: # 10.0.1.48/28
hosts:
TDS-Jump:
ip: 10.0.1.48
mac: 00:15:5d:01:02:08
cores: 2
memory: 2
vnc: 4
bridge: br0
disks:
- '-drive format=qcow2,l2-cache-size=8M,file=/srv/maat/vm/TDSJump.qcow2'
DedNet: DedNet:
ip: 10.0.1.50 ip: 10.0.1.50
mac: 00:15:5d:01:02:09 mac: 00:15:5d:01:02:09
@ -181,11 +186,26 @@ all:
vnc: 12 vnc: 12
disks: disks:
- '-drive format=qcow2,l2-cache-size=8M,file=/srv/maat/vm/test3.qcow2' - '-drive format=qcow2,l2-cache-size=8M,file=/srv/maat/vm/test3.qcow2'
# appliances are monitored -- adhoc_appliances are convenience only and not monitored.
appliances: appliances:
hosts: # 10.0.1.64/27 hosts: # 10.0.1.64/27
Shadowfeed: Shadowfeed: # Router must be at root
ip: 10.0.1.1 ip: 10.0.1.1
mac: 2c:30:33:64:f4:03 mac: 2c:30:33:64:f4:03
Print: # Print is excepted for legacy setup reasons before we laid out subnets.
ip: 10.0.1.6
mac: 00:80:92:77:CE:E4
Geth-Eyes:
ip: 10.0.1.68
mac: 9C:A3:AA:33:A3:99
"Core-Console":
ip: 10.0.1.74
mac: 00:25:90:0D:82:5B
"Node0-Console":
ip: 10.0.1.75
mac: 00:25:90:3E:C6:8C
adhoc_appliances:
hosts: # 10.0.1.64/27
DarkFeather: DarkFeather:
ip: 10.0.1.64 ip: 10.0.1.64
mac: D0:40:EF:D4:14:CF mac: D0:40:EF:D4:14:CF
@ -195,19 +215,13 @@ all:
Games: Games:
ip: 10.0.1.66 ip: 10.0.1.66
mac: E0:BE:03:77:0E:88 mac: E0:BE:03:77:0E:88
Print:
ip: 10.0.1.67
mac: 00:80:92:77:CE:E4
Geth-Eyes:
ip: 10.0.1.68
mac: 9C:A3:AA:33:A3:99
LivingRoomTV: LivingRoomTV:
ip: 10.0.1.69 ip: 10.0.1.69
mac: 80:D2:1D:17:63:0E mac: 80:D2:1D:17:63:0E
BedRoomTV: BedRoomTV:
ip: 10.0.1.70 ip: 10.0.1.70
mac: 80:D2:1D:17:63:0F mac: 80:D2:1D:17:63:0F
TraingRoomTV: TrainingRoomTV:
ip: 10.0.1.71 ip: 10.0.1.71
mac: 80:D2:1D:17:63:10 mac: 80:D2:1D:17:63:10
Tachikoma: Tachikoma:
@ -216,12 +230,6 @@ all:
Dedsec: Dedsec:
ip: 10.0.1.73 ip: 10.0.1.73
mac: 34:F6:4B:36:12:8F mac: 34:F6:4B:36:12:8F
"Core-Console":
ip: 10.0.1.74
mac: 00:25:90:0D:82:5B
"Node0-Console":
ip: 10.0.1.75
mac: 00:25:90:3E:C6:8C
# dhcp build space: 10.0.1.224/27 # dhcp build space: 10.0.1.224/27
iot: # 10.0.2.0/24 iot: # 10.0.2.0/24
hosts: hosts:

View File

@ -13,7 +13,7 @@ _gaq.push(['_trackPageview']);
</script> </script>
<!-- Replace Gitea icon with AniNIX --> <!-- Replace Gitea icon with AniNIX -->
<script type="text/javascript"> <script type="text/javascript">
document.getElementsByClassName('brand')[0].children[0].children[0].src="/assets/img/AniNIX.png"; document.getElementById('navbar').children[0].children[0].children[0].src="/assets/img/AniNIX.png";
$('meta[property=og\\:image]').attr('content', '/assets/img/AniNIX.png'); $('meta[property=og\\:image]').attr('content', '/assets/img/AniNIX.png');
$('link[rel="mask-icon"]').attr('href', '/assets/img/AniNIX.png'); $('link[rel="mask-icon"]').attr('href', '/assets/img/AniNIX.png');
$('link[rel="mask-icon"]').attr('color', '#000000'); $('link[rel="mask-icon"]').attr('color', '#000000');

View File

@ -0,0 +1,10 @@
[Trigger]
Operation = Install
Operation = Upgrade
Type = Package
Target = gitea
[Action]
Description = Updating Gitea Custom Pages
When = PostTransaction
Exec = /usr/bin/runuser -u gitea -- /usr/bin/bash /var/lib/gitea/custom/bin/gen-aninix-custom

View File

@ -20,10 +20,6 @@ FORCE_PRIVATE = false
DEFAULT_PRIVATE = last DEFAULT_PRIVATE = last
; Global limit of repositories per user, applied at creation time. -1 means no limit ; Global limit of repositories per user, applied at creation time. -1 means no limit
MAX_CREATION_LIMIT = -1 MAX_CREATION_LIMIT = -1
; Mirror sync queue length, increase if mirror syncing starts hanging
MIRROR_QUEUE_LENGTH = 1000
; Patch test queue length, increase if pull request patch testing starts hanging
PULL_REQUEST_QUEUE_LENGTH = 1000
; Preferred Licenses to place at the top of the List ; Preferred Licenses to place at the top of the List
; The name here must match the filename in conf/license or custom/conf/license ; The name here must match the filename in conf/license or custom/conf/license
PREFERRED_LICENSES = AniNIX-WTFPL PREFERRED_LICENSES = AniNIX-WTFPL
@ -233,12 +229,14 @@ LANDING_PAGE = home
; Enables git-lfs support. true or false, default is false. ; Enables git-lfs support. true or false, default is false.
LFS_START_SERVER = true LFS_START_SERVER = true
; Where your lfs files reside, default is data/lfs. ; Where your lfs files reside, default is data/lfs.
LFS_CONTENT_PATH = data/lfs
; LFS authentication secret, change this yourself ; LFS authentication secret, change this yourself
LFS_JWT_SECRET = {{ secrets.Foundation.lfs_jwt_secret }} LFS_JWT_SECRET = {{ secrets.Foundation.lfs_jwt_secret }}
; LFS authentication validity period (in time.Duration), pushes taking longer than this may fail. ; LFS authentication validity period (in time.Duration), pushes taking longer than this may fail.
LFS_HTTP_AUTH_EXPIRY = 20m LFS_HTTP_AUTH_EXPIRY = 20m
[lfs]
PATH = data/lfs
; Define allowed algorithms and their minimum key length (use -1 to disable a type) ; Define allowed algorithms and their minimum key length (use -1 to disable a type)
[ssh.minimum_key_sizes] [ssh.minimum_key_sizes]
ED25519 = 256 ED25519 = 256
@ -278,19 +276,10 @@ DB_RETRY_BACKOFF = 3s
ISSUE_INDEXER_TYPE = bleve ISSUE_INDEXER_TYPE = bleve
; Issue indexer storage path, available when ISSUE_INDEXER_TYPE is bleve ; Issue indexer storage path, available when ISSUE_INDEXER_TYPE is bleve
ISSUE_INDEXER_PATH = indexers/issues.bleve ISSUE_INDEXER_PATH = indexers/issues.bleve
; Issue indexer queue, currently support: channel or levelqueue, default is levelqueue
ISSUE_INDEXER_QUEUE_TYPE = levelqueue
; When ISSUE_INDEXER_QUEUE_TYPE is levelqueue, this will be the queue will be saved path,
; default is indexers/issues.queue
ISSUE_INDEXER_QUEUE_DIR = indexers/issues.queue
; When `ISSUE_INDEXER_QUEUE_TYPE` is `redis`, this will store the redis connection string. ; When `ISSUE_INDEXER_QUEUE_TYPE` is `redis`, this will store the redis connection string.
ISSUE_INDEXER_QUEUE_CONN_STR = addrs=127.0.0.1:6379 db=0
; Batch queue number, default is 20
ISSUE_INDEXER_QUEUE_BATCH_NUMBER = 20
; repo indexer by default disabled, since it uses a lot of disk space ; repo indexer by default disabled, since it uses a lot of disk space
REPO_INDEXER_ENABLED = false REPO_INDEXER_ENABLED = false
REPO_INDEXER_PATH = indexers/repos.bleve REPO_INDEXER_PATH = indexers/repos.bleve
UPDATE_BUFFER_LEN = 20
MAX_FILE_SIZE = 1048576 MAX_FILE_SIZE = 1048576
[admin] [admin]
@ -361,7 +350,7 @@ RESET_PASSWD_CODE_LIVE_MINUTES = 180
REGISTER_EMAIL_CONFIRM = false REGISTER_EMAIL_CONFIRM = false
; List of domain names that are allowed to be used to register on a Gitea instance ; List of domain names that are allowed to be used to register on a Gitea instance
; gitea.io,example.com ; gitea.io,example.com
EMAIL_DOMAIN_WHITELIST = EMAIL_DOMAIN_ALLOWLIST =
; Disallow registration, only allow admins to create accounts. ; Disallow registration, only allow admins to create accounts.
DISABLE_REGISTRATION = true DISABLE_REGISTRATION = true
; Allow registration only using third-party services, it works only when DISABLE_REGISTRATION is false ; Allow registration only using third-party services, it works only when DISABLE_REGISTRATION is false
@ -430,43 +419,9 @@ SKIP_TLS_VERIFY = false
PAGING_NUM = 10 PAGING_NUM = 10
ALLOWED_HOST_LIST = ::1/128, 127.0.0.1/32 ALLOWED_HOST_LIST = ::1/128, 127.0.0.1/32
; We don't use mail
[mailer] [mailer]
ENABLED = false ENABLED = false
; Buffer length of channel, keep it as it is if you don't know what it is.
SEND_BUFFER_LEN = 100
; Prefix displayed before subject in mail
SUBJECT_PREFIX =
; Mail server
; Gmail: smtp.gmail.com:587
; QQ: smtp.qq.com:465
; Note, if the port ends with "465", SMTPS will be used. Using STARTTLS on port 587 is recommended per RFC 6409. If the server supports STARTTLS it will always be used.
HOST =
; Disable HELO operation when hostnames are different.
DISABLE_HELO =
; Custom hostname for HELO operation, if no value is provided, one is retrieved from system.
HELO_HOSTNAME =
; Do not verify the certificate of the server. Only use this for self-signed certificates
SKIP_VERIFY =
; Use client certificate
USE_CERTIFICATE = false
CERT_FILE = custom/mailer/cert.pem
KEY_FILE = custom/mailer/key.pem
; Should SMTP connection use TLS
IS_TLS_ENABLED = false
; Mail from address, RFC 5322. This can be just an email address, or the `"Name" <email@example.com>` format
FROM =
; Mailer user name and password
USER =
; Use PASSWD = `your password` for quoting if you use special characters in the password.
PASSWD =
; Send mails as plain text
SEND_AS_PLAIN_TEXT = false
; Set Mailer Type (either SMTP, sendmail or dummy to just send to the log)
MAILER_TYPE = smtp
; Specify an alternative sendmail binary
SENDMAIL_PATH = sendmail
; Specify any extra sendmail arguments
SENDMAIL_ARGS =
[cache] [cache]
; Either "memory", "redis", or "memcache", default is "memory" ; Either "memory", "redis", or "memcache", default is "memory"
@ -544,6 +499,13 @@ MAX_FILES = 5
FORMAT = FORMAT =
[log] [log]
ROOT_PATH = %(GITEA_WORK_DIR)/log
MODE = console
LEVEL = Info
STACKTRACE_LEVEL = None
logger.router.MODE = ,
logger.xorm.MODE = ,
logger.access.MODE =
ROOT_PATH = /var/log/gitea/ ROOT_PATH = /var/log/gitea/
; Either "console", "file", "conn", "smtp" or "database", default is "console" ; Either "console", "file", "conn", "smtp" or "database", default is "console"
; Use comma to separate multiple modes, e.g. "console, file" ; Use comma to separate multiple modes, e.g. "console, file"
@ -551,11 +513,8 @@ MODE = console
; Buffer length of the channel, keep it as it is if you don't know what it is. ; Buffer length of the channel, keep it as it is if you don't know what it is.
BUFFER_LEN = 10000 BUFFER_LEN = 10000
; Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Info" ; Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Info"
ROUTER_LOG_LEVEL = Critical
ROUTER = none
ENABLE_ACCESS_LOG = true
ACCESS_LOG_TEMPLATE = {{ '{{' }}.Ctx.RemoteAddr{{ '}}' }} - {{ '{{' }}.Identity{{ '}}' }} {{ '{{' }}.Start.Format "[02/Jan/2006:15:04:05 -0700]" {{ '}}' }} "{{ '{{' }}.Ctx.Req.Method{{ '}}' }} {{ '{{' }}.Ctx.Req.RequestURI{{ '}}' }} {{ '{{' }}.Ctx.Req.Proto{{ '}}' }}" {{ '{{' }}.ResponseWriter.Status{{ '}}' }} {{ '{{' }}.ResponseWriter.Size{{ '}}' }} "{{ '{{' }}.Ctx.Req.Referer{{ '}}' }}\" \"{{ '{{' }}.Ctx.Req.UserAgent{{ '}}' }}" ACCESS_LOG_TEMPLATE = {{ '{{' }}.Ctx.RemoteAddr{{ '}}' }} - {{ '{{' }}.Identity{{ '}}' }} {{ '{{' }}.Start.Format "[02/Jan/2006:15:04:05 -0700]" {{ '}}' }} "{{ '{{' }}.Ctx.Req.Method{{ '}}' }} {{ '{{' }}.Ctx.Req.RequestURI{{ '}}' }} {{ '{{' }}.Ctx.Req.Proto{{ '}}' }}" {{ '{{' }}.ResponseWriter.Status{{ '}}' }} {{ '{{' }}.ResponseWriter.Size{{ '}}' }} "{{ '{{' }}.Ctx.Req.Referer{{ '}}' }}\" \"{{ '{{' }}.Ctx.Req.UserAgent{{ '}}' }}"
ACCESS = console logger.access.MODE = console
; Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace" ; Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace"
LEVEL = Info LEVEL = Info
; Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "None" ; Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "None"
@ -570,9 +529,10 @@ COLORIZE = false
; For "console" mode only ; For "console" mode only
[log.console] [log.console]
LEVEL = MODE = console
COLORIZE = false FLAGS = stdflags
STDERR = false PREFIX =
COLORIZE = true
; For "file" mode only ; For "file" mode only
[log.file] [log.file]

View File

@ -67,8 +67,9 @@
<sslprofile <sslprofile
name="clients" name="clients"
provider="openssl" provider="openssl"
cafile="/etc/letsencrypt/live/{{ ssl['identity'] }}/fullchain.pem" cafile="/etc/letsencrypt/live/{{ ssl['identity'] }}/chain.pem"
certfile="/etc/letsencrypt/live/{{ ssl['identity'] }}/cert.pem" keyfile="/etc/letsencrypt/live/{{ ssl['identity'] }}/privkey.pem" certfile="/etc/letsencrypt/live/{{ ssl['identity'] }}/fullchain.pem"
keyfile="/etc/letsencrypt/live/{{ ssl['identity'] }}/privkey.pem"
ciphers="{{ ssl['ciphersuite'] }}" ciphers="{{ ssl['ciphersuite'] }}"
hash="sha256" hash="sha256"
renegotiation="no" renegotiation="no"

View File

@ -71,6 +71,7 @@ https://aur.archlinux.org/perl-php-serialization.git
https://aur.archlinux.org/perl-sys-mmap.git https://aur.archlinux.org/perl-sys-mmap.git
https://aur.archlinux.org/perl-term-shellui.git https://aur.archlinux.org/perl-term-shellui.git
https://aur.archlinux.org/php-pear.git https://aur.archlinux.org/php-pear.git
https://aur.archlinux.org/php-zts.git
https://aur.archlinux.org/pm-utils.git https://aur.archlinux.org/pm-utils.git
https://aur.archlinux.org/powerpanel.git https://aur.archlinux.org/powerpanel.git
https://aur.archlinux.org/python-aiohttp.git https://aur.archlinux.org/python-aiohttp.git

View File

@ -84,9 +84,10 @@
- name: Set up pacman.conf - name: Set up pacman.conf
vars: vars:
ansible_become_password: "{{ passwords[inventory_hostname] }}" ansible_become_password: "{{ passwords[inventory_hostname] }}"
ignorepkg: "{{ holdpackages | default('') }}"
become: yes become: yes
copy: template:
src: pacman.conf src: pacman.conf.j2
dest: /etc/pacman.conf dest: /etc/pacman.conf
owner: root owner: root
group: root group: root

View File

@ -23,7 +23,7 @@ CleanMethod = KeepCurrent
Architecture = auto Architecture = auto
# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup # Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup
IgnorePkg = mediawiki IgnorePkg = {{ ignorepkg }}
# IgnoreGroup = # IgnoreGroup =
#NoUpgrade = #NoUpgrade =

View File

@ -25,5 +25,12 @@ check program https_singularity with path "/usr/lib/monitoring-plugins/check_htt
check program https_wolfpack with path "/usr/lib/monitoring-plugins/check_http --ssl -w 10 -c 10 -H wolfpack.aninix.net" check program https_wolfpack with path "/usr/lib/monitoring-plugins/check_http --ssl -w 10 -c 10 -H wolfpack.aninix.net"
if status != 0 for 3 times within 5 cycles then exec "/etc/monit.d/scripts/critical wolfpack.aninix.net not reporting OK" if status != 0 for 3 times within 5 cycles then exec "/etc/monit.d/scripts/critical wolfpack.aninix.net not reporting OK"
#check program https_yggdrasil with path "/usr/lib/monitoring-plugins/check_http --ssl -w 10 -c 10 -u /web/index.html -H yggdrasil.aninix.net" check program https_yggdrasil with path "/usr/lib/monitoring-plugins/check_http --ssl -w 10 -c 10 -u /web/index.html -H yggdrasil.aninix.net"
# if status != 0 for 3 times within 5 cycles then exec "/etc/monit.d/scripts/critical yggdrasil.aninix.net not reporting OK" every "* 6-23 * * *"
if status != 0 for 3 times within 5 cycles then exec "/etc/monit.d/scripts/critical yggdrasil.aninix.net not reporting OK"
check program http_eyes with path "/usr/lib/monitoring-plugins/check_http -w 10 -c 10 -u / -H geth-eyes.msn0.aninix.net"
if status != 0 for 3 times within 5 cycles then exec "/etc/monit.d/scripts/critical geth-eyes HTTP not reporting OK"
check program http_shadowfeed with path "/usr/lib/monitoring-plugins/check_http -w 10 -c 10 -u / -H shadowfeed.msn0.aninix.net"
if status != 0 for 3 times within 5 cycles then exec "/etc/monit.d/scripts/critical shadowfeed HTTP not reporting OK"

View File

@ -1,3 +1,3 @@
include "/etc/monit.d/checks/system" include "/etc/monit.d/checks/system"
include "/etc/monit.d/checks/vips" include "/etc/monit.d/checks/vips"
include "/etc/monit.d/checks/availability"

View File

@ -1,6 +1,11 @@
--- ---
- name: Sharingan-Eval service - name: Generate monitoring from inventory
delegate_to: localhost
run_once: true
command: "python3 ../bin/generate-monitoring.py {{ inventory_file }}"
- name: Sharingan-Eval service copy
become: yes become: yes
register: eval_service register: eval_service
copy: copy:
@ -55,5 +60,3 @@
owner: root owner: root
group: root group: root
mode: 0700 mode: 0700

View File

@ -45,4 +45,3 @@
name: syslog-ng@default.service name: syslog-ng@default.service
state: stopped state: stopped
enabled: no enabled: no

View File

@ -12,4 +12,3 @@
- import_tasks: ../roles/Sharingan/tasks/scans.yml - import_tasks: ../roles/Sharingan/tasks/scans.yml
when: ansible_os_family == "Archlinux" when: ansible_os_family == "Archlinux"

View File

@ -5,7 +5,7 @@
package: package:
name: name:
- elasticsearch - elasticsearch
- mongodb - mongodb44-bin # Temporarily pinned for extensions
- graylog - graylog
state: present state: present

View File

@ -27,7 +27,7 @@ server {
location /martialarts/maqotw.xml { location /martialarts/maqotw.xml {
proxy_hide_header Content-Type; proxy_hide_header Content-Type;
add_header content-type "application/atom+xml"; add_header content-type "application/atom+xml";
rewrite /martialarts/maqotw.xml /AniNIX/Wiki/raw/branch/main/rss/maqotw.xml; rewrite /martialarts/maqotw.xml /MartialArts/Wiki/raw/branch/main/rss/maqotw.xml;
} }
location /whatismyip { location /whatismyip {

View File

@ -1,22 +1,46 @@
server { server {
listen 443 ssl http2; listen 443 ssl;
server_name cyberbrain.aninix.net; server_name cyberbrain.aninix.net;
include sec.conf; include local.conf;
include default.csp.conf;
location / root /usr/share/webapps/;
{
auth_basic "Cyberbrain"; client_max_body_size 5m;
auth_basic_user_file ../passwords/cyberbrain.htpasswd; client_body_timeout 60;
proxy_pass http://127.0.0.1:8822;
proxy_http_version 1.1; include ../conf.d/fastcgi7.config;
proxy_read_timeout 300;
proxy_set_header Upgrade $http_upgrade; location /mediawiki-gb/ {
proxy_set_header Connection "upgrade"; try_files $uri $uri/ @rewrite;
proxy_set_header Host $http_host; rewrite ^/mediawiki-gb/(.*)$ /mediawiki/index.php?title=$1&$args;
proxy_set_header X-Real-IP $remote_addr; rewrite ^$ /mediawiki-gb/Main_Page;
proxy_set_header X-Real-PORT $remote_port; rewrite ^/$ /mediawiki-gb/Main_Page;
rewrite ^mediawiki-gb$ /mediawiki-gb/Main_Page;
rewrite ^mediawiki-gb/$ /mediawiki-gb/Main_Page;
}
location /mediawiki-ma/ {
try_files $uri $uri/ @rewrite;
rewrite ^/mediawiki-ma/(.*)$ /mediawiki/index.php?title=$1&$args;
rewrite ^$ /mediawiki-ma/Main_Page;
rewrite ^/$ /mediawiki-ma/Main_Page;
rewrite ^mediawiki-ma$ /mediawiki-ma/Main_Page;
rewrite ^mediawiki-ma/$ /mediawiki-ma/Main_Page;
}
location ^~ /maintenance/ {
return 403;
}
location ~* \.(js|css|png|jpg|jpeg|gif|ico)$ {
try_files $uri /mediawiki/index.php;
expires max;
log_not_found off;
}
location ^~ /cache/ {
deny all;
} }
include letsencrypt.conf; include letsencrypt.conf;

View File

@ -1,13 +0,0 @@
location ~ \.php$ {
try_files $fastcgi_script_name =404;
include fastcgi_params;
fastcgi_pass unix:/run/php-fpm7/php-fpm.sock;
fastcgi_index index.php;
fastcgi_buffers 8 16k;
fastcgi_buffer_size 32k;
fastcgi_param DOCUMENT_ROOT $realpath_root;
fastcgi_param SCRIPT_FILENAME $realpath_root$fastcgi_script_name;
}

View File

@ -9,7 +9,7 @@ server {
client_max_body_size 5m; client_max_body_size 5m;
client_body_timeout 60; client_body_timeout 60;
include ../conf.d/fastcgi7.config; include ../conf.d/fastcgi.config;
location / { location / {
try_files $uri $uri/ @rewrite; try_files $uri $uri/ @rewrite;

View File

@ -0,0 +1,37 @@
user http;
worker_processes 4;
# Logs
error_log logs/error.log;
error_log logs/error.log notice;
error_log logs/error.log info;
events {
worker_connections 1024;
}
http {
include mime.types;
include fastcgi.conf;
default_type application/octet-stream;
server_tokens off;
sendfile on;
keepalive_timeout 65;
gzip on;
# Redirect all HTTP to HTTPS
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
location / {
return 301 https://$host$request_uri;
}
}
include ../conf.d/*.conf;
}

View File

@ -35,12 +35,13 @@
- name: Copy conf.d - name: Copy conf.d
become: yes become: yes
copy: copy:
src: "conf.d/{{ inventory_hostname }}" src: "conf.d/{{ inventory_hostname }}/"
dest: /opt/openresty/nginx/conf.d dest: /opt/openresty/nginx/conf.d/
owner: http owner: http
group: http group: http
mode: 0660 mode: 0660
directory_mode: 0770 directory_mode: 0770
follow: true
register: confd register: confd
- name: Copy conf - name: Copy conf
@ -51,12 +52,13 @@
owner: http owner: http
group: http group: http
mode: 0660 mode: 0660
follow: true
register: conf register: conf
- name: Populate security config - name: Populate security config
become: yes become: yes
template: template:
src: sec.conf.j2 src: conf/sec.conf.j2
dest: /opt/openresty/nginx/conf/sec.conf dest: /opt/openresty/nginx/conf/sec.conf
owner: http owner: http
group: http group: http