Update 19 files

- /home/.chezmoiscripts/universal/run_onchange_after_40-firefox.tmpl
- /home/dot_config/VirtualBox/VirtualBox.xml.tmpl
- /home/dot_config/docker/templates/healthchecks.docker-stack.yml.tmpl
- /home/dot_config/docker/templates/dashy.docker-stack.yml.tmpl
- /home/dot_config/docker/templates/portainer.docker-stack.yml.tmpl
- /home/dot_config/docker/templates/sonatype.docker-stack.yml.tmpl
- /home/dot_config/docker/templates/statping.docker-stack.yml.tmpl
- /home/dot_config/docker/templates/wazuh.docker-stack.yml.tmpl
- /home/dot_config/docker/templates/wireguard.docker-stack.yml.tmpl
- /home/dot_config/docker/templates/rundeck.docker-stack.yml.tmpl
- /home/dot_config/docker/templates/nginx.docker-stack.yml.tmpl
- /home/dot_config/docker/templates/htpc.docker-stack.yml.tmpl
- /home/dot_config/docker/templates/gitlab.docker-stack.yml.tmpl
- /home/dot_config/docker/templates/code-server.docker-stack.yml.tmpl
- /home/dot_config/docker/templates/instapy.docker-stack.yml.tmpl
- /home/dot_config/docker/templates/josh.docker-stack.yml.tmpl
- /home/dot_config/docker/templates/nextcloud.docker-stack.yml.tmpl
- /home/dot_config/docker/templates/nginx-proxy-manager.docker-stack.yml.tmpl
- /home/dot_config/docker/templates/thelounge.docker-stack.yml.tmpl
This commit is contained in:
Brian Zalewski 2023-02-05 05:55:09 +00:00
parent 2c361fa687
commit 42060c43c3
19 changed files with 830 additions and 11 deletions

View file

@ -5,6 +5,7 @@
{{ includeTemplate "universal/logg" }}
# Firefox plugins: {{ list (.firefoxAddOns | toString | replace "[" "" | replace "]" "") | uniq | join " " }}
#
### Installs the Firefox Profile Connector on Linux systems (Snap / Flatpak installs are not included in this function, but instead inline below)
function installFirefoxProfileConnector() {

View file

@ -1,27 +1,18 @@
<?xml version="1.0"?>
<!--
** DO NOT EDIT THIS FILE.
** If you make changes to this file while any VirtualBox related application
** is running, your changes will be overwritten later, without taking effect.
** Use VBoxManage or the VirtualBox Manager GUI to make changes.
-->
<VirtualBox xmlns="http://www.virtualbox.org/" version="1.12-linux">
<Global>
<ExtraData>
<ExtraDataItem name="GUI/Details/Elements" value="general,system,preview,display,storage,audio,network,usb,sharedFolders,description"/>
<ExtraDataItem name="GUI/GroupDefinitions/" value="n=GLOBAL"/>
<ExtraDataItem name="GUI/LastItemSelected" value="n=GLOBAL"/>
<ExtraDataItem name="GUI/LastWindowPosition" value="341,164,683,404"/>
<ExtraDataItem name="GUI/SplitterSizes" value="226,456"/>
<ExtraDataItem name="GUI/Toolbar" value="false"/>
<ExtraDataItem name="GUI/Toolbar" value="true"/>
<ExtraDataItem name="GUI/Tools/LastItemsSelected" value="Welcome,Details"/>
<ExtraDataItem name="GUI/UpdateCheckCount" value="2"/>
<ExtraDataItem name="GUI/UpdateDate" value="1 d, 2021-12-15, stable, 6.1.30"/>
</ExtraData>
<MachineRegistry/>
<NetserviceRegistry>
<DHCPServers>
<DHCPServer networkName="HostInterfaceNetworking-vboxnet0" IPAddress="192.168.56.100" networkMask="255.255.255.0" lowerIP="192.168.56.101" upperIP="192.168.56.254" enabled="1"/>
<DHCPServer networkName="HostInterfaceNetworking-vboxnet0" IPAddress="192.168.55.100" networkMask="255.255.255.0" lowerIP="192.168.55.101" upperIP="192.168.55.254" enabled="1"/>
</DHCPServers>
</NetserviceRegistry>
<SystemProperties defaultMachineFolder="/home/{{ .user.username }}/.local/virtualbox" defaultHardDiskFormat="VDI" VRDEAuthLibrary="VBoxAuth" webServiceAuthLibrary="VBoxAuth" LogHistoryCount="3" proxyMode="0" exclusiveHwVirt="true"/>

View file

@ -0,0 +1,16 @@
---
version: '3.8'
services:
code-server:
image: linuxserver/code-server
container_name: CodeServer
environment:
PUID: 1000
PGID: 1000
TZ: America/New_York
volumes:
- ./config:/config
ports:
- 28814:8443
restart: unless-stopped

View file

@ -0,0 +1,28 @@
---
version: "3.8"
services:
dashy:
# To build from source, replace 'image: lissy93/dashy' with 'build: .'
# build: .
image: lissy93/dashy
container_name: Dashy
# Pass in your config file below, by specifying the path on your host machine
volumes:
- /root/my-config.yml:{{ .user.home }}/config/dashy/conf.yml
ports:
- 4000:80
# Set any environmental variables
environment:
- NODE_ENV=production
# Specify your user ID and group ID. You can find this by running `id -u` and `id -g`
# - UID=1000
# - GID=1000
# Specify restart policy
restart: unless-stopped
# Configure healthchecks
healthcheck:
test: ['CMD', 'node', '/app/services/healthcheck']
interval: 1m30s
timeout: 10s
retries: 3
start_period: 40s

View file

@ -0,0 +1,39 @@
---
version: "3.6"
services:
gitlab:
image: gitlab/gitlab-ee:latest
ports:
- "22:22"
- "80:80"
- "443:443"
volumes:
- gitlab-data:/var/opt/gitlab
- gitlab-logs:/var/log/gitlab
- gitlab-config:/etc/gitlab
shm_size: '256m'
environment:
GITLAB_OMNIBUS_CONFIG: "from_file('/omnibus_config.rb')"
configs:
- source: gitlab
target: /omnibus_config.rb
secrets:
- gitlab_root_password
gitlab-runner:
image: gitlab/gitlab-runner:alpine
deploy:
mode: replicated
replicas: 4
configs:
gitlab:
external: true
secrets:
gitlab_root_password:
external: true
volumes:
gitlab-data:
gitlab-logs:
gitlab-config:

View file

@ -0,0 +1,105 @@
---
version: '3.7'
services:
healthchecks:
image: lscr.io/linuxserver/healthchecks:latest
container_name: Healthchecks
depends_on:
- postgres
networks:
- healthchecks_network
- nginx_network
environment:
ALLOWED_HOSTS: "{{ template.ALLOWED_HOSTS }}"
APPRISE_ENABLED: 'False'
DB_HOST: postgres
DB_NAME_FILE: /run/secrets/healthchecks_db_name
DB_PASSWORD_FILE: /run/secrets/healthchecks_db_password
DB_USER_FILE: /run/secrets/healthchecks_db_user
DEBUG: 'False'
DEFAULT_FROM_EMAIL: "{{ template.DEFAULT_FROM_EMAIL }}"
EMAIL_HOST_PASSWORD_FILE: /run/secrets/healthchecks_sendgrid_api_key
EMAIL_HOST_USER: apikey
EMAIL_HOST: smtp.sendgrid.net
EMAIL_PORT: 587
EMAIL_USE_TLS: 'True'
PGID: 1000
PROMETHEUS_ENABLED: 'True'
PUID: 1000
REGENERATE_SETTINGS: 'True'
SECRET_KEY_FILE: /run/secrets/healthchecks_secret_key
SHELL_ENABLED: 'True'
SITE_LOGO_URL: "{{ template.SITE_LOGO_URL }}"
SITE_NAME: "{{ template.SITE_NAME }}"
SITE_ROOT: "{{ template.SITE_ROOT }}"
SLACK_CLIENT_ID_FILE: /run/secrets/healthchecks_slack_client_id
SLACK_CLIENT_SECRET_FILE: /run/secrets/healthchecks_slack_client_secret
SLACK_ENABLED: 'True'
SUPERUSER_EMAIL_FILE: /run/secrets/healthchecks_superuser_email
SUPERUSER_PASSWORD_FILE: /run/secrets/healthchecks_superuser_password
WEBHOOKS_ENABLED: 'True'
deploy:
mode: replicated
replicas: 1
volumes:
- healthchecks_config:/config
expose:
- 8000
restart: unless-stopped
secrets:
- healthchecks_db_name
- healthchecks_db_password
- healthchecks_db_user
- healthchecks_secret_key
- healthchecks_sendgrid_api_key
- healthchecks_superuser_email
- healthchecks_superuser_password
postgres:
container_name: Postgres
image: postgres:10
restart: unless-stopped
volumes:
- healthchecks_postgres:/var/lib/postgresql/data
networks:
- healthchecks_network
environment:
POSTGRES_PASSWORD_FILE: /run/secrets/healthchecks_db_password
POSTGRES_USER_FILE: /run/secrets/healthchecks_db_user
POSTGRES_DB_FILE: /run/secrets/healthchecks_db_name
deploy:
mode: replicated
replicas: 1
secrets:
- healthchecks_db_name
- healthchecks_db_password
- healthchecks_db_user
networks:
healthchecks_network:
driver: overlay
attachable: true
internal: true
nginx_network:
external: true
secrets:
healthchecks_db_name:
external: true
healthchecks_db_password:
external: true
healthchecks_db_user:
external: true
healthchecks_secret_key:
external: true
healthchecks_sendgrid_api_key:
external: true
healthchecks_superuser_email:
external: true
healthchecks_superuser_password:
external: true
volumes:
healthchecks_config:
healthchecks_postgres:

View file

@ -0,0 +1,196 @@
---
version: "3"
services:
wireguard:
container_name: WireGuard
image: linuxserver/wireguard
cap_add:
- NET_ADMIN
- SYS_MODULE
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
volumes:
- ./config/wireguard:/config
ports:
- 26671:6767
- 26673:9117
- 26674:8080
- 26675:9777/udp
- 26676:8686
- 26678:6789
- 26679:3579
- 26680:81
- 26681:7878
- 26682:8989
- 26683:8181
- 26684:9091
- 26685:51413
- 26686:51413/udp
sysctls:
- net.ipv4.conf.all.src_valid_mark=1
restart: unless-stopped
bazarr:
container_name: Bazarr
image: linuxserver/bazarr
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
network_mode: "service:wireguard"
restart: unless-stopped
volumes:
- ./config/bazarr:/config
- /mnt/movies:/movies
- /mnt/tv:/tv
heimdall:
container_name: Heimdall
image: linuxserver/heimdall
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
ports:
- 29914:443
- 29915:80
restart: unless-stopped
volumes:
- ./config/heimdall:/config
jackett:
container_name: Jackett
image: linuxserver/jackett
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
network_mode: "service:wireguard"
restart: unless-stopped
volumes:
- ./config/jackett:/config
- /mnt/auxilary/Downloads:/downloads
kodi-headless:
container_name: Kodi-Headless
image: linuxserver/kodi-headless
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
network_mode: service:wireguard
restart: unless-stopped
lidarr:
container_name: Lidarr
image: linuxserver/lidarr
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
- UMASK_SET=022
network_mode: "service:wireguard"
restart: unless-stopped
volumes:
- ./config/lidarr:/config
- /mnt/auxilary/Music:/music
- /mnt/auxilary/Downloads:/downloads
nzbget:
container_name: NZBGet
image: linuxserver/nzbget
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
network_mode: "service:wireguard"
restart: unless-stopped
volumes:
- ./config/nzbget:/config
- /mnt/auxilary/Downloads:/downloads
ombi:
container_name: Ombi
image: linuxserver/ombi
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
network_mode: "service:wireguard"
restart: unless-stopped
volumes:
- ./config/ombi:/config
organizr:
container_name: Organizr
image: linuxserver/organizr
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
network_mode: "service:wireguard"
restart: unless-stopped
volumes:
- ./config/organizr:/config
radarr:
container_name: Radarr
image: linuxserver/radarr
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
- UMASK_SET=022
network_mode: "service:wireguard"
restart: unless-stopped
volumes:
- ./config/radarr:/config
- /mnt/movies:/movies
- /mnt/auxilary/Downloads:/downloads
sonarr:
container_name: Sonarr
image: linuxserver/sonarr
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
- UMASK_SET=022
network_mode: "service:wireguard"
restart: unless-stopped
volumes:
- ./config/sonarr:/config
- /mnt/tv:/tv
- /mnt/auxilary/Downloads:/downloads
tautulli:
container_name: Tautulli
image: linuxserver/tautulli
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
network_mode: "service:wireguard"
restart: unless-stopped
volumes:
- ./config/tautulli:/config
- ./logs/plex:/logs
transmission:
container_name: Transmission
image: linuxserver/transmission
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
- USER=admin
- PASS=password8388**
network_mode: "service:wireguard"
restart: unless-stopped
volumes:
- ./config/transmission:/config
- /mnt/auxilary/Downloads:/downloads
- ./torrents:/watch

View file

@ -0,0 +1,17 @@
---
version: "3"
services:
web:
image: megabytelabs/instapy:latest
container_name: InstaPy-megabytelabs
environment:
PYTHONUNBUFFERED: 0
INSTAPY_USERNAME: username
INSTAPY_PASSWORD: password
INSTAPY_POD: website
INSTAPY_WORKSPACE: /code/InstaPy
volumes:
- instapy-megabytelabs:/code
volumes:
instapy-megabytelabs:

View file

@ -0,0 +1,28 @@
---
version: '3.7'
services:
josh-github:
image: joshproject/josh-proxy:latest
container_name: Josh
volumes:
- josh-github:/data/github
ports:
- 8141:8080 # @domain github.megabyte.space
environment:
JOSH_REMOTE: https://github.com
restart: unless-stopped
josh-gitlab:
image: joshproject/josh-proxy:latest
container_name: Josh
volumes:
- josh-gitlab:/data/gitlab
ports:
- 8143:8080 # @domain gitlab.megabyte.space
environment:
JOSH_REMOTE: https://gitlab.com
restart: unless-stopped
volumes:
josh-github:
josh-gitlab:

View file

@ -0,0 +1,33 @@
---
version: '3.7'
services:
nextcloud:
image: linuxserver/nextcloud
container_name: NextCloud
environment:
PUID: 1000
PGID: 1000
TZ: America/New_York
volumes:
- ./config/nextcloud:/config
- /mnt/auxilary/NextCloud:/data
ports:
- 26777:443
restart: unless-stopped
mariadb:
image: linuxserver/mariadb
container_name: MariaDB
environment:
MYSQL_ROOT_PASSWORD: $MYSQL_ROOT_PASSWORD
MYSQL_DATABASE: $MYSQL_DATABASE
MYSQL_USER: $MYSQL_USER
MYSQL_PASSWORD: $MYSQL_PASSWORD
PUID: 1000
PGID: 1000
TZ: America/New_York
volumes:
- ./config/mariadb:/config
expose:
- 3306
restart: unless-stopped

View file

@ -0,0 +1,44 @@
---
version: "3"
services:
app:
image: jc21/nginx-proxy-manager:2
restart: always
ports:
# Public HTTP Port:
- '80:80'
# Public HTTPS Port:
- '443:443'
# Admin Web Port:
- '81:81'
environment:
# Uncomment this if IPv6 is not enabled on your host
# DISABLE_IPV6: 'true'
volumes:
# Make sure this config.json file exists as per instructions above:
- ./config.json:/app/config/production.json
- ./data:/data
- ./letsencrypt:/etc/letsencrypt
depends_on:
- db
db:
image: jc21/mariadb-aria:10.4
restart: always
environment:
MYSQL_ROOT_PASSWORD: '2m4arw2dlMfUdpKGbsI1mA8yIGQtMC3EWr1hjPEQp'
MYSQL_DATABASE: 'npm'
MYSQL_USER: 'maziithi'
MYSQL_PASSWORD: 'BYXKerAl5jpJG0HMX8oYi7y9Sqk4XEuM5u1oolAR2'
volumes:
- ./data/mysql:/var/lib/mysql
#{
# "database": {
# "engine": "mysql",
# "host": "db",
# "name": "npm",
# "user": "maziithi",
# "password": "BYXKerAl5jpJG0HMX8oYi7y9Sqk4XEuM5u1oolAR2",
# "port": 3306
# }
# }

View file

@ -0,0 +1,22 @@
---
version: '3.8'
services:
nginx:
image: megabytelabs/nginx:latest
container_name: NGINX
ports:
- "443:443"
- "80:80"
volumes:
- ''
networks:
- nginx_network
deploy:
mode: global
placement:
constraints: [node.role == manager]
networks:
nginx_network:
external: true

View file

@ -0,0 +1,50 @@
---
version: '3.8'
services:
agent:
image: portainer/agent:2.14.1
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /var/lib/docker/volumes:/var/lib/docker/volumes
networks:
- portainer_agent_network
deploy:
mode: global
placement:
constraints: [node.platform.os == linux]
portainer:
image: portainer/portainer-ce:2.14.1
command: -H tcp://tasks.agent:9001 --tlsskipverify --admin-password-file /run/secrets/portainer_admin_password --logo {{ template.SITE_LOGO_URL }} --no-analytics true --templates {{ template.TEMPLATES_URL }}
expose:
- 9443
ports:
- "8000:8000"
volumes:
- portainer_manager_data:/data
networks:
- portainer_agent_network
- nginx_network
secrets:
- portainer_admin_password
deploy:
mode: replicated
replicas: 1
placement:
constraints: [node.role == manager]
networks:
portainer_agent_network:
driver: overlay
attachable: true
internal: true
nginx_network:
external: true
secrets:
portainer_admin_password:
external: true
volumes:
portainer_manager_data:

View file

@ -0,0 +1,21 @@
---
version: "2.1"
services:
rundeck:
image: rundeck/rundeck
container_name: Rundeck
environment:
PUID: 1000
PGID: 1000
MENU_VERSION: 2.0.19 #optional
volumes:
- $HOME/.ssh:/home/rundeck/.ssh
- rundeck-data:/home/rundeck/server/data
ports:
- 3000:3000
- 69:69/udp
- 26688:80
restart: unless-stopped
volumes:
rundeck-data:

View file

@ -0,0 +1,13 @@
---
version: "3"
services:
nexus:
image: sonatype/nexus3
volumes:
- "nexus-data:/nexus-data"
ports:
- "8081:8081"
volumes:
nexus-data:

View file

@ -0,0 +1,74 @@
---
version: '3.7'
services:
statup:
container_name: StatPing
image: statping/statping:latest
restart: unless-stopped
depends_on:
- postgres
networks:
- statping_network
- nginx_network
expose:
- 8080
volumes:
- statping_app:/app
environment:
VIRTUAL_HOST: localhost
VIRTUAL_PORT: 8080
DB_CONN: postgres
DB_HOST: postgres
DB_USER_FILE: /run/secrets/statping_database_user
DB_PASS_FILE: /run/secrets/statping_database_password
DB_DATABASE_FILE: /run/secrets/statping_database_name
NAME: "{{ template.NAME }}"
DESCRIPTION: "{{ template.DESCRIPTION }}"
deploy:
mode: replicated
replicas: 1
secrets:
- statping_database_name
- statping_database_password
- statping_database_user
postgres:
container_name: Postgres
image: postgres:10
restart: unless-stopped
volumes:
- statping_postgres:/var/lib/postgresql/data
networks:
- statping_network
environment:
POSTGRES_PASSWORD_FILE: /run/secrets/statping_database_password
POSTGRES_USER_FILE: /run/secrets/statping_database_user
POSTGRES_DB_FILE: /run/secrets/statping_database_name
deploy:
mode: replicated
replicas: 1
secrets:
- statping_database_name
- statping_database_password
- statping_database_user
networks:
statping_network:
driver: overlay
attachable: true
internal: true
nginx_network:
external: true
secrets:
statping_database_name:
external: true
statping_database_password:
external: true
statping_database_user:
external: true
volumes:
statping_app:
statping_postgres:

View file

@ -0,0 +1,103 @@
---
# Wazuh App Copyright (C) 2017, Wazuh Inc. (License GPLv2)
version: '3.7'
services:
wazuh.manager:
image: wazuh/wazuh-manager:4.5.0
hostname: wazuh.manager
restart: always
ports:
- "1514:1514"
- "1515:1515"
- "514:514/udp"
- "55000:55000"
environment:
- INDEXER_URL=https://wazuh.indexer:9200
- INDEXER_USERNAME=admin
- INDEXER_PASSWORD=SecretPassword
- FILEBEAT_SSL_VERIFICATION_MODE=full
- SSL_CERTIFICATE_AUTHORITIES=/etc/ssl/root-ca.pem
- SSL_CERTIFICATE=/etc/ssl/filebeat.pem
- SSL_KEY=/etc/ssl/filebeat.key
- API_USERNAME=wazuh-wui
- API_PASSWORD=MyS3cr37P450r.*-
volumes:
- wazuh_api_configuration:/var/ossec/api/configuration
- wazuh_etc:/var/ossec/etc
- wazuh_logs:/var/ossec/logs
- wazuh_queue:/var/ossec/queue
- wazuh_var_multigroups:/var/ossec/var/multigroups
- wazuh_integrations:/var/ossec/integrations
- wazuh_active_response:/var/ossec/active-response/bin
- wazuh_agentless:/var/ossec/agentless
- wazuh_wodles:/var/ossec/wodles
- filebeat_etc:/etc/filebeat
- filebeat_var:/var/lib/filebeat
- ./config/wazuh_indexer_ssl_certs/root-ca-manager.pem:/etc/ssl/root-ca.pem
- ./config/wazuh_indexer_ssl_certs/wazuh.manager.pem:/etc/ssl/filebeat.pem
- ./config/wazuh_indexer_ssl_certs/wazuh.manager-key.pem:/etc/ssl/filebeat.key
- ./config/wazuh_cluster/wazuh_manager.conf:/wazuh-config-mount/etc/ossec.conf
wazuh.indexer:
image: wazuh/wazuh-indexer:4.5.0
hostname: wazuh.indexer
restart: always
ports:
- "9200:9200"
environment:
- "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
volumes:
- wazuh-indexer-data:/var/lib/wazuh-indexer
- ./config/wazuh_indexer_ssl_certs/root-ca.pem:/usr/share/wazuh-indexer/config/certs/root-ca.pem
- ./config/wazuh_indexer_ssl_certs/wazuh.indexer-key.pem:/usr/share/wazuh-indexer/config/certs/wazuh.indexer.key
- ./config/wazuh_indexer_ssl_certs/wazuh.indexer.pem:/usr/share/wazuh-indexer/config/certs/wazuh.indexer.pem
- ./config/wazuh_indexer_ssl_certs/admin.pem:/usr/share/wazuh-indexer/config/certs/admin.pem
- ./config/wazuh_indexer_ssl_certs/admin-key.pem:/usr/share/wazuh-indexer/config/certs/admin-key.pem
- ./config/wazuh_indexer/wazuh.indexer.yml:/usr/share/wazuh-indexer/config/opensearch.yml
- ./config/wazuh_indexer/internal_users.yml:/usr/share/wazuh-indexer/plugins/opensearch-security/securityconfig/internal_users.yml
wazuh.dashboard:
image: wazuh/wazuh-dashboard:4.5.0
hostname: wazuh.dashboard
restart: always
ports:
- 443:5601
environment:
- INDEXER_USERNAME=admin
- INDEXER_PASSWORD=SecretPassword
- WAZUH_API_URL=https://wazuh.manager
- API_USERNAME=wazuh-wui
- API_PASSWORD=MyS3cr37P450r.*-
volumes:
- ./config/wazuh_indexer_ssl_certs/wazuh.dashboard.pem:/usr/share/wazuh-dashboard/certs/wazuh-dashboard.pem
- ./config/wazuh_indexer_ssl_certs/wazuh.dashboard-key.pem:/usr/share/wazuh-dashboard/certs/wazuh-dashboard-key.pem
- ./config/wazuh_indexer_ssl_certs/root-ca.pem:/usr/share/wazuh-dashboard/certs/root-ca.pem
- ./config/wazuh_dashboard/opensearch_dashboards.yml:/usr/share/wazuh-dashboard/config/opensearch_dashboards.yml
- ./config/wazuh_dashboard/wazuh.yml:/usr/share/wazuh-dashboard/data/wazuh/config/wazuh.yml
depends_on:
- wazuh.indexer
links:
- wazuh.indexer:wazuh.indexer
- wazuh.manager:wazuh.manager
volumes:
wazuh_api_configuration:
wazuh_etc:
wazuh_logs:
wazuh_queue:
wazuh_var_multigroups:
wazuh_integrations:
wazuh_active_response:
wazuh_agentless:
wazuh_wodles:
filebeat_etc:
filebeat_var:
wazuh-indexer-data:

View file

@ -0,0 +1,38 @@
---
version: '3.8'
services:
wireguard:
image: lscr.io/linuxserver/wireguard:latest
container_name: WireGuard
cap_add:
- NET_ADMIN
- SYS_MODULE
environment:
PUID: 1000
PGID: 1000
TZ: "{{ timezone }}"
SERVERURL: "{{ template.SERVERURL }}"
SERVERPORT: 51820
PEERS: 1
PEERDNS: auto
INTERNAL_SUBNET: "{{ template.INTERNAL_SUBNET }}"
ALLOWEDIPS: "{{ template.INTERNAL_SUBNET }}/24"
LOG_CONFS: true
volumes:
- /srv/stacks/wireguard:/config
- /lib/modules:/lib/modules
ports:
- 51820:51820/udp
sysctls:
- net.ipv4.conf.all.src_valid_mark=1
restart: unless-stopped
deploy:
mode: global
networks:
agent_network:
driver: overlay
attachable: true
nginx_network:
external: true