This commit is contained in:
Brian Zalewski 2022-12-24 15:04:59 -05:00
parent cf0132bc7b
commit f42899b107
190 changed files with 25488 additions and 0 deletions

221
.config/Brewfile Normal file
View file

@ -0,0 +1,221 @@
# Standard Homebrew taps
tap "homebrew/cask"
tap "homebrew/core"
tap "homebrew/bundle"
tap "homebrew/services"
# Homebrew Formulae
# e.g. `brew install <program>`
# @brew [act](https://github.com/nektos/act) - Run GitHub Actions locally
brew "act"
# @brew [appium](https://appium.io/) - A framework focused on native Android/iOS testing
brew "appium"
# @brew [azure-cli](https://docs.microsoft.com/en-us/cli/azure/) - The official CLI for interacting with Microsoft Azure
brew "azure-cli"
# @brew [bat](https://github.com/sharkdp/bat) - Clone of cat with syntax highlighting and Git integration
brew "bat"
# @brew [bitwarden-cli](https://github.com/bitwarden/cli) - Access and manage a BitWarden instance via CLI
brew "bitwarden-cli"
# @brew [codeclimate](https://github.com/codeclimate/codeclimate) - Interact with CodeClimate via CLI
# tap "codeclimate/formulae"
# brew "codeclimate"
##### ERROR #####
# ==> Installing codeclimate from codeclimate/formulae
# ==> make install
# Last 15 lines from /home/megabyte/.cache/Homebrew/Logs/codeclimate/01.make:
# 2022-03-11 08:34:36 +0000
#
# make
# install
#
# bin/check
# Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
# Unable to run `docker version', the docker daemon may not be running
# Please ensure `docker version' succeeds and try again
# make: *** [Makefile:43: install] Error 1
#
# If reporting this issue please do so at (not Homebrew/brew or Homebrew/core):
# https://github.com/codeclimate/homebrew-formulae/issues
#
# Installing codeclimate has failed!
##### ERROR #####
# @brew [croc](https://github.com/schollz/croc) - A sharing tool that helps transfer files from one computer to another
brew "croc"
# @brew [curl](https://curl.se) - An HTTP command-line tool
brew "curl"
# @brew [dasel](https://github.com/TomWright/dasel) - Select, put, and delete data from JSON, TOML, YAML, XML and CSV files
brew "dasel"
# @brew [direnv](https://github.com/direnv/direnv) - Loads and unloads environment variables based on the directory you are in
brew "direnv"
# @brew [dive](https://github.com/wagoodman/dive) - Tool for exploring layer in a Docker image
brew "dive"
# @brew [docker](https://www.docker.com/) - A powerful toolchain for developing containerized applications
if OS.linux?
brew "docker"
end
# @brew [docker-slim](https://github.com/docker-slim/docker-slim) - A tool that shrinks Docker images and makes them more secure
brew "docker-slim"
# @brew [Dockle](https://github.com/goodwithtech/dockle) - A container image security scanner
tap "goodwithtech/r"
brew "goodwithtech/r/dockle"
# @brew [exiftool](https://exiftool.org) - A library for reading and writing EXIF data to files
brew "exiftool"
# @brew [ffsend](https://github.com/timvisee/ffsend) - Fully featured Firefox Send client that makes sharing files easy
brew "ffsend"
# @brew [gh](https://github.com/cli/cli) - The official GitHub command line tool
brew "gh"
# @brew [git](https://git-scm.com) - Tool for interacting with git repositories
brew "git"
# @brew [gitlab-runner](https://docs.gitlab.com/runner/) - Test GitLab CI configurations and add self-hosted runners
brew "gitlab-runner"
# @brew [gitleaks](https://github.com/zricethezav/gitleaks) - Scans git repos for secrets
brew "gitleaks"
# @brew [git-subrepo](https://github.com/ingydotnet/git-subrepo) - An alternative to git submodules
brew "git-subrepo"
# @brew [glab](https://glab.readthedocs.io/) - Open-source GitLab CLI
brew "glab"
# @brew [go](https://go.dev) - Open source programming language
brew "go"
# @brew [goofys](https://github.com/kahing/goofys) - High-performance, POSIX-ish Amazon S3 file system written in Go
brew "goofys"
# @brew [grex](https://github.com/pemistahl/grex) - Generate regular expressions by providing target matches
brew "grex"
# @brew [helm](https://helm.sh/) - The self-proclaimed package manager for Kubernetes
brew "helm"
# @brew [htmlq](https://github.com/mgdm/htmlq) - Use CSS to extract content from HTML via a CLI
brew "htmlq"
# @brew [hyperfine](https://github.com/sharkdp/hyperfine) - Command-line benchmarking tool
brew "hyperfine"
# @brew [jo](https://github.com/jpmens/jo) - JSON output from scripts
brew "jo"
# @brew [jq](https://stedolan.github.io/jq/) - Lightweight and flexible command-line JSON processor
brew "jq"
# @brew [kubectx](https://github.com/ahmetb/kubectx) - A tool for switching between Kubernetes clusters and namespaces
brew "kubectx"
# @brew [kubernetes-cli](https://kubernetes.io/docs/reference/kubectl/kubectl/) - The CLI for Kubernetes (also known as kubectl)
brew "kubernetes-cli"
# @brew [mc](https://github.com/minio/mc) - Replacement for ls, cp and other commands that are compatible with file-system-mounted S3 buckets
tap "minio/stable"
brew "minio/stable/mc"
# @brew [mkcert](https://github.com/FiloSottile/mkcert) - Simple tool to make locally trusted development certificates
brew "mkcert"
# @brew [node](https://nodejs.org/) - A JavaScript engine, based on the ultra fast V8-engine
brew "node"
# @brew [openssh](https://www.openssh.com/) - OpenBSD freely-licensed SSH connectivity tools
brew "openssh"
# @brew [ots](https://ots.sniptt.com) - Share end-to-end encrypted secrets with others via a one-time URL
brew "ots"
# @brew [oq](https://blacksmoke16.github.io/oq) - Performant, and portable jq wrapper that supports formats other than JSON
brew "oq"
# @brew [php](https://www.php.net/) - General-purpose scripting language
# brew "php", restart_service: false
# @brew [poetry](https://python-poetry.org/) - A Python project package management tool and more
brew "poetry"
# @brew [pup](https://github.com/EricChiang/pup) - Parse HTML with a CLI
brew "pup"
# @brew [python](https://www.python.org/) - Interpreted, interactive, object-oriented programming language
brew "python@3.10"
# @brew [rsync](https://rsync.samba.org/) - Tool to do fast, incremental file transfers
brew "rsync"
# @brew [ruby](https://www.ruby-lang.org/) - Powerful, clean, object-oriented scripting language
brew "ruby"
# @brew [sshpass](https://github.com/hudochenkov/homebrew-sshpass) - Library that allows Ansible to connect over SSH with a password
tap "hudochenkov/sshpass"
brew "hudochenkov/sshpass/sshpass"
# @brew [sysbench](https://github.com/akopytov/sysbench) - System performance benchmark tool
brew "sysbench"
# @brew [task](https://github.com/go-task/homebrew-tap) - A parallel task runner
tap "go-task/tap"
brew "go-task/tap/go-task"
# @brew [teleport](https://github.com/bbatsche/homebrew-teleport) - An identity-aware SSH client for teams
brew "teleport"
# @brew [terraform](https://www.terraform.io/) - An infrastructure-as-code tool that allows you to define both cloud and on-prem resources
brew "terraform"
# @brew [tokei](https://github.com/XAMPPRocky/tokei) - Count and display the lines of code and the language used in a project
brew "tokei"
# @brew [trivy](https://aquasecurity.github.io/trivy/v0.18.3/) - Scan images for vulnerabilities
tap "aquasecurity/trivy"
brew "aquasecurity/trivy/trivy"
# @brew [up](https://github.com/akavel/up) - Write Linux pipes with an instant live preview
brew "up"
# @brew [waypoint](https://www.waypointproject.io/) - Tool to build, deploy, and release any application on any platform
tap "hashicorp/tap"
brew "hashicorp/tap/waypoint"
# @brew [wireshark](https://www.wireshark.org) - Graphical network analyzer and capture tool (CLI)
if OS.linux?
brew "wireshark"
end
# @brew [yarn](https://yarnpkg.com/) - JavaScript package manager from Facebook
brew "yarn"
# @brew [yq](https://github.com/mikefarah/yq) - Process and manipulate YAML documents
brew "yq"
# @brew [coreutils](https://www.gnu.org/software/coreutils) - A suite of basic UNIX tools published to improve compatibility between Linux and macOS scripts
if OS.mac?
brew "coreutils"
end
# Homebrew Casks (only available on macOS)
# e.g. `brew install --cask <program>`
# @cask [altair](https://altair.sirmuel.design/) - GraphQL GUI client
cask "altair"
# @cask [balenaetcher](https://balena.io/etcher) - Tool to flash OS images to SD cards & USB drives
cask "balenaetcher"
# @cask [bitwarden](https://bitwarden.com/) - Desktop client for BitWarden
cask "bitwarden"
# @cask [docker](https://docker.com) - The desktop GUI for Docker, a virtualization platform for containers and microservices
cask "docker"
# @cask [firefox](https://www.mozilla.org/firefox/) - A popular web browser
cask "firefox"
# @cask [gimp](https://www.gimp.org/) - Free and open-source image editor
cask "gimp"
# @cask [google-chrome](https://www.google.com/chrome/) - Sandbox-based web browser published by Google
cask "google-chrome"
# @cask [gcloud](https://cloud.google.com/sdk/gcloud) - The official Google Cloud Platform SDK CLI tool
cask "google-cloud-sdk"
# @cask [iterm2](https://www.iterm2.com/) - An improved terminal for macOS
cask "iterm2"
# @cask [java](https://www.java.com/en/) - Libraries required for running and developing Java applications
cask "java" unless system "/usr/libexec/java_home --failfast"
# @cask [lens](https://k8slens.dev/) - An IDE for Kubernetes
cask "lens"
# @cask [microsoft-teams](https://teams.microsoft.com/downloads) - Meet, chat, call, and collaborate in just one place
cask "microsoft-teams"
# @cask [osxfuse](https://github.com/osxfuse/osxfuse) - Extends macOS by adding support for user space file systems
cask "osxfuse"
# @cask [postman](https://www.postman.com/) - Collaboration platform for API development
cask "postman"
# @cask [slack](https://slack.com/) - Team communication and collaboration software
cask "slack"
# @cask [skype](https://www.skype.com/) - Video chat, voice call, and instant messaging application
cask "skype"
# @cask [teamviewer](https://www.teamviewer.com/) - Remote access and connectivity software focused on security
cask "teamviewer"
# @cask [vagrant](https://www.vagrantup.com/) - Command-line, configuration-driven CLI for launching virtualization tools
cask "vagrant"
# @cask [virtualbox](https://www.virtualbox.org/) - A popular virtualization platform for virtual machines
cask "virtualbox"
# @cask [visual-studio-code](https://code.visualstudio.com/) - Open source code editor
cask "visual-studio-code"
# @cask [vmware-fusion](https://www.vmware.com/products/fusion.html) - Create, manage, and run virtual machines
cask "vmware-fusion"
# @cask [wireshark](https://www.wireshark.org) - Graphical network analyzer and capture tool
cask "wireshark"
# Examples below
# 'brew install --with-rmtp', 'brew services restart' on version changes
# brew "denji/nginx/nginx-full", args: ["with-rmtp"], restart_service: :changed
# 'brew install', always 'brew services restart', 'brew link', 'brew unlink mysql' (if it is installed)
# brew "mysql@5.6", restart_service: true, link: true, conflicts_with: ["mysql"]
# 'brew install --cask'
# cask "google-chrome"
# 'brew install --cask --appdir=~/my-apps/Applications'
# cask "firefox", args: { appdir: "~/my-apps/Applications" }
# always upgrade auto-updated or unversioned cask to latest version even if already installed
# cask "opera", greedy: true
# 'brew install --cask' only if '/usr/libexec/java_home --failfast' fails
# cask "java" unless system "/usr/libexec/java_home --failfast"
# 'mas install'
# mas "1Password", id: 443987910
# 'whalebrew install'
# whalebrew "whalebrew/wget"

33
.config/ansible-lint.yml Normal file
View file

@ -0,0 +1,33 @@
---
enable_list:
- fqcn-builtins
- no-log-password
- no-same-owner
exclude_paths:
- ../.autodoc/
- ../.cache/
- ../.common/
- ../.config/
- ../.git/
- ../.github/
- ../.gitlab/
- ../.husky/
- ../.modules/
- ../.npm/
- ../.pnpm-store/
- ../.shared/
- ../.task/
- ../.venv/
- ../.vscode/
- ../build/
- ../dist/
- ../molecule/
- ../node_modules/
- ../pnpm-lock.yaml
- ../roles/
- ../venv/
offline: true
skip_list: []

85
.config/bash/try-catch.sh Normal file
View file

@ -0,0 +1,85 @@
#!/usr/bin/env bash
# Try / catch in bash
#
# ````
# #!/bin/bash
# export AnException=100
# export AnotherException=101
#
# # start with a try
# try
# ( # open a subshell !!!
# echo "do something"
# [ someErrorCondition ] && throw $AnException
#
# echo "do something more"
# executeCommandThatMightFail || throw $AnotherException
#
# throwErrors # automaticatly end the try block, if command-result is non-null
# echo "now on to something completely different"
# executeCommandThatMightFail
#
# echo "it's a wonder we came so far"
# executeCommandThatFailsForSure || true # ignore a single failing command
#
# ignoreErrors # ignore failures of commands until further notice
# executeCommand1ThatFailsForSure
# local result = $(executeCommand2ThatFailsForSure)
# [ result != "expected error" ] && throw $AnException # ok, if it's not an expected error, we want to bail out!
# executeCommand3ThatFailsForSure
#
# # make sure to clear $ex_code, otherwise catch * will run
# # echo "finished" does the trick for this example
# echo "finished"
# )
# # directly after closing the subshell you need to connect a group to the catch using ||
# catch || {
# # now you can handle
# case $ex_code in
# $AnException)
# echo "AnException was thrown"
# ;;
# $AnotherException)
# echo "AnotherException was thrown"
# ;;
# *)
# echo "An unexpected exception was thrown"
# throw $ex_code # you can rethrow the "exception" causing the script to exit if not caught
# ;;
# esac
# }
# ```
# Source: https://stackoverflow.com/a/25180186`
# shellcheck disable=SC2034
Logger="${BASH_SOURCE[0]}../log"
# @description Turn on fail on errors mode
function try() {
[[ $- = *e* ]]; SAVED_OPT_E=$?
set +e
}
# @description Turn on fail on errors mode
function throw() {
exit "$1"
}
# @description Turn on fail on errors mode
function catch() {
export ex_code=$?
# shellcheck disable=SC2004
(( $SAVED_OPT_E )) && set +e
return $ex_code
}
# @description Turn on fail on errors mode
function throwErrors() {
set -e
}
# @description Do not fail on errors mode
function ignoreErrors() {
set +e
}

50
.config/codeclimate.yml Normal file
View file

@ -0,0 +1,50 @@
---
version: '2'
plugins:
ansible-lint:
enabled: true
editorconfig:
enabled: true
eslint:
enabled: true
jscpd:
enabled: true
shellcheck:
enabled: true
yamllint:
enabled: true
exclude_patterns:
- _generated_/
- .common/
- .config/
- .git/
- .go/
- .modules/
- .npm/
- .pnpm-store/
- .task/
- .travis.yml
- .venv/
- .vscode/
- '*.hbs.yml'
- '**/*_test.go'
- '**/*.d.ts'
- '**/node_modules/'
- '**/spec/'
- '**/test/'
- '**/tests/'
- '**/vendor/'
- build/
- config/
- db/
- deprecated/
- dist/
- features/
- pnpm-lock.yaml
- roles/
- script/
- test-output/
- testdata/
- Tests/

3
.config/commitlintrc.cjs Normal file
View file

@ -0,0 +1,3 @@
module.exports = {
extends: ['@commitlint/config-conventional']
}

View file

@ -0,0 +1,23 @@
{
"keywords": [
"ansible",
"ansible-playbook",
"archlinux",
"centos",
"debian",
"doctor",
"fedora",
"install",
"installdoc",
"installdoctor",
"macos",
"mblabs",
"megabytelabs",
"molecule",
"playbook",
"professormanhattan",
"ubuntu",
"washingtondc",
"windows"
]
}

306
.config/cspell.json Normal file
View file

@ -0,0 +1,306 @@
{
"$schema": "https://raw.githubusercontent.com/streetsidesoftware/cspell/master/cspell.schema.json",
"flagWords": [],
"ignorePaths": [
".autodoc/**",
".cache/**",
".common/**",
".config/**",
".git/**",
".github/**",
".gitlab/**",
".husky/**",
".modules/**",
".npm/**",
".pnpm-store/**",
".shared/**",
".task/**",
".venv/**",
".vscode/**",
"build/**",
"dist/**",
"package.json",
"package-lock.json",
"slim.report.json",
"yarn.lock",
"tsconfig.json",
"node_modules/**",
"pnpm-lock.yaml",
"roles/**",
"venv/**"
],
"language": "en",
"version": "0.1",
"words": [
"Kompose",
"Kubuntu",
"Malwarebytes",
"Portainer",
"Privoxy",
"Qubes",
"Remmina",
"Suricata",
"Tizen",
"Wireshark",
"XBMC",
"Zalewski",
"androidsdk",
"androidstudio",
"ansible",
"ansibler",
"appium",
"appnest",
"aptcacherng",
"archlinux",
"argparse",
"autodoc",
"autojump",
"autokey",
"awscli",
"azurecli",
"backdoors",
"balena",
"bandizip",
"bandwhich",
"bcrypt",
"bento",
"bgblack",
"bgblue",
"bgcyan",
"bggreen",
"bgmagenta",
"bgred",
"bgwhite",
"bgyellow",
"bitwarden",
"blazingly",
"boilerplates",
"bravebrowser",
"brewfile",
"broot",
"brotli",
"browserslist",
"buildr",
"caniuse",
"catfs",
"centos",
"certbot",
"chdir",
"choco",
"chokidar",
"circleci",
"cloudflared",
"cmds",
"cocoapods",
"codecov",
"cointop",
"commitizen",
"commitlint",
"commondocs",
"concat",
"consultemplate",
"cosmiconfig",
"cpus",
"debloat",
"defaultbrowser",
"deno",
"deps",
"diffsofancy",
"direnv",
"dmginstall",
"dnsmasq",
"dockerhub",
"dockerpushrm",
"dockerslim",
"donothing",
"dotenv",
"dotfile",
"dotfiles",
"easyengine",
"editorconfig",
"elasticagent",
"enablerepo",
"endlessh",
"epel",
"esbuild",
"esnext",
"exiftool",
"favicons",
"ffsend",
"filebrowser",
"filezilla",
"fontinstall",
"freemium",
"ghorg",
"gitdocker",
"gitextras",
"gitfilterrepo",
"githubbinary",
"githubcli",
"gitlab",
"gitlabrunner",
"gitlfs",
"gitmoji",
"gitomatic",
"gitsecret",
"gitsome",
"gitstats",
"goofys",
"googleassistant",
"googlecloudsdk",
"googler",
"gping",
"gvisor",
"heyhey",
"hostnames",
"httpie",
"hyperv",
"hyperv",
"idempotence",
"inkscape",
"installdoc",
"installdoctor",
"intellij",
"iterm",
"jenv",
"jetpack",
"jsdoc",
"koalaman",
"kodi",
"kubernetes",
"leasot",
"ledgerlive",
"libc",
"liquidjs",
"lpass",
"lxdc",
"mailspring",
"makepkg",
"mblabs",
"mcfly",
"megabytelabs",
"microsoftedge",
"minikube",
"minipass",
"mjml",
"mkdir",
"modifyvm",
"monero",
"motrix",
"multipass",
"mvdan",
"natdnshostresolver",
"netaddr",
"netdata",
"nextcloud",
"nmap",
"noconfirm",
"noqa",
"noqa",
"normit",
"onionshare",
"opencollective",
"optionator",
"pacman",
"pacman",
"pagespeed",
"pandoc",
"peco",
"pfsense",
"pgcli",
"pihole",
"pino",
"pino",
"pipx",
"platformtools",
"plex",
"pnpm",
"pnpx",
"portout",
"posix",
"postcss",
"prebuild",
"preload",
"prepended",
"prfssr",
"professormanhattan",
"proselint",
"pushrm",
"pyenv",
"pypi",
"pywinrm",
"qbittorrent",
"qlplugins",
"qubes",
"rclone",
"readlink",
"recoverpy",
"remotedesktop",
"restic",
"rimraf",
"ripgrep",
"rkhunter",
"sandboxed",
"sbin",
"scrcpy",
"screencast",
"screencasts",
"sdkman",
"sdkmanage",
"seconion",
"serializers",
"sharex",
"shdoc",
"shellcheck",
"shfmt",
"shotcut",
"shotwell",
"signale",
"sindresorhus",
"slackterm",
"sleekfast",
"sshtarpit",
"sshvault",
"submodule",
"submodules",
"switchhosts",
"symlinking",
"sysdig",
"taskfile",
"taskfiles",
"teamviewer",
"terminalizer",
"tfenv",
"tflint",
"tmpfs",
"transpiled",
"trec",
"tsdoc",
"typedoc",
"typeof",
"unsanitized",
"untracked",
"venv",
"vfile",
"videoblobs",
"virtualbox",
"washingtondc",
"wazuh",
"webp",
"windowsadmincenter",
"windowspowertoys",
"wireshark",
"wkhtmltopdf",
"wpcli",
"xmlbuilder",
"xrdp",
"yamllint",
"yargs",
"yarnhook",
"yocto",
"youtubedl",
"zalewski",
"zorin",
"zoxide"
]
}

View file

@ -0,0 +1 @@
FROM megabytelabs/devcontainer:latest

View file

@ -0,0 +1,109 @@
{
"build": {
"args": {
"DOCKER_VERSION": "latest",
"ENABLE_NONROOT_DOCKER": "true",
"INSTALL_ZSH": "true",
"UPGRADE_PACKAGES": "true",
"USERNAME": "megabyte",
"USE_MOBY": "true"
}
},
"dockerFile": "Dockerfile",
"extensions": [
"Angular.ng-template",
"attilabuti.vscode-mjml",
"bierner.markdown-emoji",
"ChakrounAnas.turbo-console-log",
"ChFlick.firecode",
"chrmarti.regex",
"CoenraadS.bracket-pair-colorizer",
"cweijan.vscode-mysql-client2",
"DavidAnson.vscode-markdownlint",
"dbaeumer.vscode-eslint",
"denoland.vscode-deno",
"dracula-theme.theme-dracula",
"drewbourne.vscode-remark-lint",
"eamodio.gitlens",
"EditorConfig.EditorConfig",
"esbenp.prettier-vscode",
"ericadamski.carbon-now-sh",
"firefox-devtools.vscode-firefox-debug",
"firsttris.vscode-jest-runner",
"formulahendry.auto-rename-tag",
"formulahendry.code-runner",
"foxundermoon.shell-format",
"GitHub.vscode-pull-request-github",
"GitLab.gitlab-workflow",
"GoogleCloudTools.cloudcode",
"golang.Go",
"HashiCorp.terraform",
"hediet.vscode-drawio",
"IBM.output-colorizer",
"johnpapa.vscode-peacock",
"Kelvin.vscode-sshfs",
"KnisterPeter.vscode-commitizen",
"kruemelkatze.vscode-dashboard",
"mads-hartmann.bash-ide-vscode",
"mechatroner.rainbow-csv",
"msjsdiag.debugger-for-chrome",
"msjsdiag.debugger-for-edge",
"ms-azuretools.vscode-docker",
"ms-kubernetes-tools.vscode-kubernetes-tools",
"ms-vscode-remote.remote-containers",
"ms-vscode-remote.remote-ssh",
"ms-vscode-remote.remote-wsl",
"ms-python.python",
"ms-vscode.PowerShell",
"ms-vscode.vscode-typescript-tslint-plugin",
"MS-vsliveshare.vsliveshare",
"MS-vsliveshare.vsliveshare-audio",
"njpwerner.autodocstring",
"nrwl.angular-console",
"paulvarache.vscode-taskfile",
"philnash.ngrok-for-vscode",
"PKief.material-icon-theme",
"pnp.polacode",
"pranaygp.vscode-css-peek",
"quicktype.quicktype",
"RandomFractalsInc.vscode-data-preview",
"rbbit.typescript-hero",
"redhat.ansible",
"redhat.vscode-yaml",
"richie5um2.vscode-sort-json",
"Rubymaniac.vscode-paste-and-indent",
"salbert.comment-ts",
"shd101wyy.markdown-preview-enhanced",
"snipsnapdev.snipsnap-vscode",
"softwaredotcom.swdc-vscode",
"steoates.autoimport",
"stylelint.vscode-stylelint",
"TabNine.tabnine-vscode",
"timonwong.shellcheck",
"toba.vsfire",
"tyriar.sort-lines",
"usernamehw.errorlens",
"valentjn.vscode-ltex",
"VisualStudioExptTeam.vscodeintellicode",
"vsciot-vscode.vscode-arduino",
"vsls-contrib.codetour",
"vsls-contrib.gistfs",
"WallabyJs.quokka-vscode",
"wayou.vscode-todo-highlight",
"wix.vscode-import-cost",
"yatki.vscode-surround"
],
"forwardPorts": [2222, 5901, 6080, 8001, 8014],
"hostRequirements": {
"cpus": 2,
"memory": "8gb",
"storage": "16gb"
},
"mounts": ["source=dind-var-lib-docker,target=/var/lib/docker,type=volume"],
"name": "Megabyte Labs DevContainer Code Rocket Pack",
"overrideCommand": false,
"postCreateCommand": "task start",
"remoteUser": "megabyte",
"runArgs": ["--init", "--privileged", "--shm-size=4g"],
"settings": {}
}

80
.config/docs/README.md Normal file
View file

@ -0,0 +1,80 @@
<div align="center">
<center>
<a href="https://gitlab.com/megabyte-labs/documentation">
<img width="140" height="140" alt="Documentation logo" src="https://gitlab.com/megabyte-labs/documentation/shared/-/raw/master/logo.png" />
</a>
</center>
</div>
<div align="center">
<center><h1>Common Documentation</h1></center>
<center><h4 style="color: #18c3d1;">Documentation partials and JSON variables for generating sweet READMEs and templated files for hundreds of repositories</h4></center>
</div>
[![-----------------------------------------------------](https://gitlab.com/megabyte-labs/assets/-/raw/master/png/aqua-divider.png)](#table-of-contents)
## Table of Contents
- [➤ Summary](#summary)
- [➤ Repository Types](#repository-types)
- [➤ Requirements](#repository-pipeline-order)
- [➤ Flow Summary](#flow-summary)
- [➤ `common.json`](#common-json)
[![-----------------------------------------------------](https://gitlab.com/megabyte-labs/assets/-/raw/master/png/aqua-divider.png)](#summary)
## Summary
In all of our projects, we strive to maintain useful and informative documentation. However, with hundreds of projects and limited man power, it can be tricky. To solve this problem, we re-use documentation partials to generate the documentation in each of our repositories.
There are two repositories responsible for generating the documentation for each project:
1. **[Shared documentation repository](https://gitlab.com/megabyte-labs/documentation/shared):** This repository contains documentation partials that are used throughout all of our repositories.
2. **Project-type documentation repository:** This repository is where we store documentation that is specific to the type of project that downstream repository is. For example, if the downstream project is an Ansible role, then the repositories that will be used to generate the documentation will be the shared documentation repository and the [Ansible documentation repository](https://gitlab.com/megabyte-labs/documentation/ansible).
[![-----------------------------------------------------](https://gitlab.com/megabyte-labs/assets/-/raw/master/png/aqua-divider.png)](#repository-types)
## Repository Types
We currently use this method to scaffold our projects of the following types:
1. [Angular](https://gitlab.com/megabyte-labs/documentation/angular)
2. [Ansible](https://gitlab.com/megabyte-labs/documentation/ansible)
3. [Dockerfile](https://gitlab.com/megabyte-labs/documentation/dockerfile)
4. [Go](https://gitlab.com/megabyte-labs/documentation/go)
5. [NPM](https://gitlab.com/megabyte-labs/documentation/npm)
6. [Packer](https://gitlab.com/megabyte-labs/documentation/packer)
7. [Python](https://gitlab.com/megabyte-labs/documentation/python)
[![-----------------------------------------------------](https://gitlab.com/megabyte-labs/assets/-/raw/master/png/aqua-divider.png)](#repository-pipeline-order)
## Repository Pipeline Order
Whenever a change is made to the shared documentation repository, the pipeline for the project-specific repositories will trigger (unless it is configured not to do so). Part of that pipeline includes cloning the shared documentation repository into the project-specific repository. When this happens, the `common/` folder in the shared repository is copied over to the project-specific repository.
After the `common/` folder is copied over, the project-specific repository will trigger the pipeline for the project-specific common files repository (e.g. [Ansible common files repository](https://gitlab.com/megabyte-labs/common/ansible)). When this is happens, the project-specific documentation repository is added to the project-specific common files repository in the `docs/` folder.
Finally, after the project-specific common files repository is up-to-date, the files it contains are propagated out to the individual projects that all of these repositories are for. This whole process allows us to update, say, a spelling error in the documentation to every project in our eco-system without an repetition.
[![-----------------------------------------------------](https://gitlab.com/megabyte-labs/assets/-/raw/master/png/aqua-divider.png)](#flow-summary)
## Flow Summary
To summarize, the order of the flow is:
1. [Shared documentation repository](https://gitlab.com/megabyte-labs/documentation/shared)
2. Project-specific documentation repository (e.g. [Ansible documentation](https://gitlab.com/megabyte-labs/documentation/ansible))
3. Project-specific common files repository (e.g. [Ansible common files](https://gitlab.com/megabyte-labs/common/ansible))
4. Individual project repository (e.g. [Ansible role for Android Studio](https://gitlab.com/megabyte-labs/ansible-roles/androidstudio))
So, with synchronization turned on, a change to the shared documentation repository would trigger updates for the most repositories since it is the highest upstream repository.
[![-----------------------------------------------------](https://gitlab.com/megabyte-labs/assets/-/raw/master/png/aqua-divider.png)](#common-json)
## `common.json`
In both the shared documentation repository and the project-specific documentation repositories there is a file called `common.json` in the root of the projects. These files contain variables that are used to dynamically inject variables into the documentation and other files. The `common.json` files in both repositories are merged when there are updates to create the `variables.json` file that is in each project-specific documentation repository. During this process, the variables in the project-specific `common.json` file takes precedence over the variables in the shared `common.json` file. There are a few other steps that are made to create the final version of the `.variables.json` that each project uses to generate documentation and other files. In order of precedence, the variables are acquired from:
1. The variables in the `"blueprint"` section of the `package.json` file that is located in each downstream project
2. The variables stored in the `common.{{ project_subgroup }}.json` file stored in the common files repository for each project type (e.g. the [Android Studio Ansible project](https://gitlab.com/megabyte-labs/ansible-roles/androidstudio) uses the `common.role.json` file in the [Ansible common files repository](https://gitlab.com/megabyte-labs/common/ansible) since the project subtype is a role)
3. The `common.json` file in the project-type-specific documentation repository (e.g. for the Android Studio Ansible role this would be the [Ansible documentation repository](https://gitlab.com/megabyte-labs/documentation/ansible))
4. The `common.json` file in the [shared documentation repository](https://gitlab.com/megabyte-labs/documentation/shared)

View file

@ -0,0 +1,14 @@
{{ load:.config/docs/common/contributing/header.md }}
{{ template:toc }}
{{ load:.config/docs/common/contributing/code-of-conduct.md }}
{{ load:.config/docs/contributing/philosophy.md }}
{{ load:.config/docs/contributing/supported-os.md }}
{{ load:.config/docs/contributing/dev-environment.md }}
{{ load:.config/docs/contributing/pull-requests.md }}
{{ load:.config/docs/contributing/code-format.md }}
{{ load:.config/docs/contributing/code-style.md }}
{{ load:.config/docs/contributing/commenting.md }}
{{ load:.config/docs/contributing/docs.md }}
{{ load:.config/docs/contributing/testing.md }}
{{ load:.config/docs/contributing/linting.md }}
{{ load:.config/docs/common/contributing/troubleshooting.md }}

View file

@ -0,0 +1,15 @@
{{ load:.config/docs/common/readme/header.md }}
{{ load:.config/docs/readme-playbook/subheader.md }}
{{ load:.config/docs/readme-playbook/quick-description.md }}
{{ template:toc }}
{{ load:.config/docs/readme-playbook/introduction.md }}
{{ load:.config/docs/readme-playbook/quick-start.md }}
{{ load:.config/docs/readme-playbook/supported-os.md }}
{{ load:.config/docs/readme-playbook/dependencies.md }}
{{ load:.config/docs/readme-playbook/software.md }}
{{ load:.config/docs/readme-playbook/web-apps.md}}
{{ load:.config/docs/readme-playbook/philosophy.md }}
{{ load:.config/docs/readme-playbook/architecture.md }}
{{ load:.config/docs/readme-playbook/managing-environments.md }}
{{ load:.config/docs/common/readme/contribute.md }}
{{ load:.config/docs/common/readme/license.md }}

View file

@ -0,0 +1,16 @@
{{ load:.config/docs/common/readme/header.md }}
{{ load:.config/docs/readme-role/subheader.md }}
{{ load:.config/docs/readme-role/quick-description.md }}
{{ template:toc }}
{{ load:.config/docs/readme-role/overview.md }}
{{ load:.autodoc/ansible_actions.md }}
{{ load:.config/docs/readme-role/quick-start.md }}
{{ load:.autodoc/ansible_variables.md }}
{{ load:.config/docs/readme-role/supported-os.md }}
{{ load:.config/docs/readme-role/dependencies.md }}
{{ load:.autodoc/collection_dependencies.md }}
{{ load:.config/docs/readme-role/example.md }}
{{ load:.autodoc/ansible_tags.md }}
{{ load:.config/docs/common/readme/contribute.md }}
{{ load:.autodoc/ansible_todo.md }}
{{ load:.config/docs/common/readme/license.md }}

View file

@ -0,0 +1,3 @@
## Code of Conduct
This project and everyone participating in it is governed by the [Code of Conduct]({{ repository.github }}{{ repository.location.conduct.github }}). By participating, you are expected to uphold this code. Please report unacceptable behavior to [{{ email.help }}](mailto:{{ email.help }}).

View file

@ -0,0 +1,5 @@
## Contributors
Thank you so much to our contributors!
{{ contributors_list }}

View file

@ -0,0 +1,5 @@
<div align="center">
<center><h1 align="center">Contributing Guide</h1></center>
</div>
First of all, thanks for visiting this page 😊 ❤️ ! We are *stoked* that you may be considering contributing to this project. You should read this guide if you are considering creating a pull request or plan to modify the code for your own purposes.

View file

@ -0,0 +1,19 @@
## Style Guides
All code projects have their own style. Coding style will vary from coder to coder. Although we do not have a strict style guide for each project, we do require that you be well-versed in what coding style is most acceptable and _best_. To do this, you should read through style guides that are made available by organizations that have put a lot of effort into studying the reason for coding one way or another.
### Recommended Style Guides
Style guides are generally written for a specific language but a great place to start learning about the best coding practices is on [Google Style Guides](https://google.github.io/styleguide/). Follow the link and you will see style guides for most popular languages. We also recommend that you look through the following style guides, depending on what language you are coding with:
* [Airbnb JavaScript Style Guide](https://github.com/airbnb/javascript)
* [Angular Style Guide](https://angular.io/guide/styleguide)
* [Effective Go](https://go.dev/doc/effective_go)
* [PEP 8 Python Style Guide](https://www.python.org/dev/peps/pep-0008/)
* [Git Style Guide](https://github.com/agis/git-style-guide)
For more informative links, refer to the [GitHub Awesome Guidelines List](https://github.com/Kristories/awesome-guidelines).
### Strict Linting
One way we enforce code style is by including the best standard linters into our projects. We normally keep the settings pretty strict. Although it may seem pointless and annoying at first, these linters will make you a better coder since you will learn to adapt your style to the style of the group of people who spent countless hours creating the linter in the first place.

View file

@ -0,0 +1,39 @@
## Contributing
Contributions, issues, and feature requests are welcome! Feel free to check the [issues page]({{ repository.github }}{{ repository.location.issues.github }}). If you would like to contribute, please take a look at the [contributing guide]({{ repository.github }}{{ repository.location.contributing.github }}).
<details>
<summary><b>Sponsorship</b></summary>
<br/>
<blockquote>
<br/>
Dear Awesome Person,<br/><br/>
{{ sponsorship.text }}
<br/><br/>Sincerely,<br/><br/>
**_{{ sponsorship.author }}_**<br/><br/>
</blockquote>
<a title="Support us on Open Collective" href="{{ profile_link.opencollective }}/{{ profile.opencollective }}" target="_blank">
<img alt="Open Collective sponsors" src="https://img.shields.io/opencollective/sponsors/megabytelabs?logo=opencollective&label=OpenCollective&logoColor=white&style={{ badge_style }}" />
</a>
<a title="Support us on GitHub" href="{{ profile_link.github }}/{{ profile.github }}" target="_blank">
<img alt="GitHub sponsors" src="https://img.shields.io/github/sponsors/{{ profile.github }}?label=GitHub%20sponsors&logo=github&style={{ badge_style }}" />
</a>
<a href="{{ profile_link.patreon }}/{{ profile.patreon }}" title="Support us on Patreon" target="_blank">
<img alt="Patreon" src="https://img.shields.io/badge/Patreon-Support-052d49?logo=patreon&logoColor=white&style={{ badge_style }}" />
</a>
### Affiliates
Below you will find a list of services we leverage that offer special incentives for signing up for their services through our special links:
<a href="http://eepurl.com/h3aEdX" title="Sign up for $30 in MailChimp credits" target="_blank">
<img alt="MailChimp" src="https://cdn-images.mailchimp.com/monkey_rewards/grow-business-banner-2.png" />
</a>
<a href="https://www.digitalocean.com/?refcode=751743d45e36&utm_campaign=Referral_Invite&utm_medium=Referral_Program&utm_source=badge">
<img src="https://web-platforms.sfo2.digitaloceanspaces.com/WWW/Badge%203.svg" alt="DigitalOcean Referral Badge" />
</a>
</details>

View file

@ -0,0 +1,11 @@
<div align="center">
<center>
<a href="{{ repository.github }}">
<img width="148" height="148" alt="{{ name }} logo" src="{{ repository.gitlab }}{{ repository.location.logo.gitlab }}" />
</a>
</center>
</div>
<div align="center">
<center><h1 align="center">{{ docs.header_title_pre }}{{ title }}{{ docs.header_title_post }}</h1></center>
<center><h4 style="color: #18c3d1;">{{ docs.header_description_pre }}<a href="{{ link.home }}" target="_blank">{{ organization }}</a></h4>{{ docs.header_description_post }}</center>
</div>

View file

@ -0,0 +1,3 @@
## License
Copyright © {{ copyright }} [{{ company }}]({{ link.home }}). This project is [{{ license }}]({{ repository.gitlab }}{{ repository.location.license.gitlab }}) licensed.

View file

@ -0,0 +1,48 @@
## Code Format
We try to structure our Ansible task and variable files consistently across all [our Ansible projects]({{ repository.group.ansible_roles }}). This allows us to do things like use RegEx to make ecosystem wide changes. A good way of making sure that your code follows the format we are using is to:
1. Clone the [main playbook repository]({{ project.playbooks }}) (a.k.a. [Install Doctor]({{ link.installdoctor }}))
2. Use Visual Studio Code to search for code examples of how we are performing similar tasks
For example:
- All of our roles use a similar pattern for the `tasks/main.yml` file
- The file names and variable names are consistent across our roles
- Contributors automatically format some parts of their code by leveraging our pre-commit hook (which is installed when you run `bash .start.sh` in the root of a project)
### Code Format Example
To dive a little deeper, take the following block of code that was retrieved from the `tasks/main.yml` file in the [Android Studio role](https://github.com/InstallDoc/androidstudio) as an example:
```yaml
---
- name: Include variables based on the operating system
include_vars: '{{ {{ ansible_os_family }} }}.yml'
- name: Include tasks based on the operating system
become: true
block:
- include_tasks: 'install-{{ {{ ansible_os_family }} }}.yml'
```
If you compare the block of code above to other `tasks/main.yml` files in other roles (which you can find in our [Ansible Roles group]({{ repository.group.ansible_roles }}) or our [main playbook]({{ project.playbooks }})) (a.k.a. [Install Doctor]({{ link.installdoctor }})), you will see that the files are either identical or nearly identical. There is an exception. Some roles will exclude the first task titled "Include variables based on the operating system" when variables are not required for the role. Our goal is to be consistent but not to the point where we are degrading the functionality of our code or including code that is unnecessary.
In general, it is up to the developer to browse through our projects to get a feel for the code format we use. A good idea is to clone [Install Doctor]({{ project.playbooks }}), search for how Ansible modules are used, and then mimic the format. For instance, if you are adding a task that installs a snap package, then you would search for `community.general.snap:` in the main playbook to see the format we are using so you can mimic the style.
### Platform-Specific Roles
If you have a role that only installs software made for Windows 10 then ensure that the tasks are only run when the system is a Windows system by using `when:` in the `tasks/main.yml` file. Take the following `main.yml` as an example:
```yaml
---
- name: Include variables based on the operating system
include_vars: 'ansible_os_family.yml'
when: ansible_os_family == 'Windows'
- name: Include tasks based on the operating system
become: true
block:
- include_tasks: 'install-ansible_os_family.yml'
when: ansible_os_family == 'Windows'
```

View file

@ -0,0 +1,105 @@
## Code Style
We try to follow the same code style across all our Ansible repositories. If something is done one way somewhere, then it should be done the same way elsewhere. It is up to you to [browse through our roles]({{ repository.group.ansible_roles }}) to get a feel for how everything should be styled. You should clone [the main playbooks repository]({{ project.playbooks }}) (a.k.a. [Install Doctor]({{ link.installdoctor }})), initialize all the submodules either via `bash .start.sh` or `git submodule update --init --recursive`, and search through the code base to see how we are _styling_ different task types. Below are some examples:
### Arrays
When there is only one parameter, then you should inline it.
**❌ BAD**
```yaml
when:
- install_minikube
```
**✅ GOOD**
```yaml
when: install_minikube
```
**✅ ALSO GOOD**
```yaml
when:
- install_minikube
- install_hyperv_plugin
```
### Alphabetical Order
Anywhere an array/list is used, the list should be ordered alphabetically (if possible).
**❌ BAD**
```yaml
autokey_dependencies:
- pkg-config
- make
- git
```
**✅ GOOD**
```yaml
autokey_dependencies:
- git
- make
- pkg-config
```
### Dependency Variables
In many cases, a role will require that specific software package dependencies are met before running. These dependencies are usually an array of packages that need to be installed.
Say the application being installed is Android Studio. The dependency array should be assigned to a variable titled `androidstudio_dependencies` (where "androidstudio" is retrieved from the `.galaxy_info.role_name` field in the `meta/main.yml` file) and placed in `vars/main.yml`.
**✅ GOOD example of defining the variable in the `vars/main.yml` file**
```yaml
---
androidstudio_dependencies:
- ffmpeg
- coolpackage
- anotherpackage
```
**❌ BAD example of integrating the variable into a task file:**
```yaml
- name: "Ensure {{ {{ app_name }} }}'s dependencies are installed"
community.general.pacman:
name: '{{ {{ android_studio_deps }} }}'
state: present
```
**✅ GOOD example of integrating the variable into a task file:**
```yaml
- name: "Ensure {{ {{ app_name }} }}'s dependencies are installed"
community.general.pacman:
name: '{{ {{ androidstudio_dependencies }} }}'
state: present
```
If there are dependencies that are specific to a certain OS, then the dependency variable should be titled `{{ {{ .galaxy_info.role_name }} }}_dependencies_{{ {{ os_family }} }}`. For Android Studio, a Fedora-specific dependency list should be named `androidstudio_dependencies_fedora`. In practice, this would look like:
```yaml
- name: "Ensure {{ {{ app_name }} }}'s dependencies are installed (Fedora)"
dnf:
name: '{{ {{ androidstudio_dependencies_fedora }} }}'
state: present
when: ansible_distribution == 'Fedora'
```
### DRY
DRY stands for "Don't Repeat Yourself." Whenever there is code that is duplicated across multiple task files, you should separate it into a different file and then include it like in the following example:
**✅ GOOD**
```yaml
- name: Run generic Linux tasks
include_tasks: install-Linux.yml
```

View file

@ -0,0 +1,140 @@
## Commenting
We strive to make our roles easy to understand. Commenting is a major part of making our roles easier to grasp. Several types of comments are supported in such a way that they are extracted and injected into our documentation. This project uses [mod-ansible-autodoc]({{ link.mod_ansible_autodoc }}) (a pet project of ours and a fork of [ansible-autodoc](https://pypi.org/project/ansible-autodoc/)) to scan through specially marked up comments and generate documentation out of them. The module also allows the use of markdown in comments so feel free to **bold**, _italicize_, and `code_block` as necessary. Although it is perfectly acceptable to use regular comments, in most cases you should use one of the following types of _special_ comments:
- [Variable comments](#variable-comments)
- [Action comments](#action-comments)
- [TODO comments](#todo-comments)
### Variable Comments
It is usually not necessary to add full-fledged comments to anything in the `vars/` folder but the `defaults/main.yml` file is a different story. The `defaults/main.yml` file must be fully commented since it is where we store all the variables that our users can customize. **`defaults/main.yml` is the only place where comments using the following format should be present.**
Each variable in `defaults/main.yml` should be added and documented using the following format:
<!-- prettier-ignore-start -->
```yaml
# @var variable_name: default_value
# The description of the variable which should be no longer than 160 characters per line.
# You can separate the description into new lines so you do not pass the 160 character
# limit
variable_name: default_value
```
<!-- prettier-ignore-end -->
There may be cases where an example is helpful. In these cases, use the following format:
<!-- prettier-ignore-start -->
```yaml
# @var variable_name: []
# The description of the variable which should be no longer than 160 characters per line.
# You can separate the description into new lines so you do not pass the 160 character
# limit
variable_name: []
# @example #
# variable_name:
# - name: jimmy
# param: henry
# - name: albert
# @end
```
<!-- prettier-ignore-end -->
Each variable-comment block in `defaults/main.yml` should be separated by a line return. You can see an example of a `defaults/main.yml` file using this special [variable syntax in the Docker role]({{ link.docker_role }}/blob/master/defaults/main.yml).
### Action Comments
Action comments allow us to describe what the role does. Each action comment should include an action group as well as a description of the feature or "action". Most of the action comments should probably be added to the `tasks/main.yml` file although there could be cases where an action comment is added in a specific task file (like `install-Darwin.yml`, for instance). Action comments allow us to group similar tasks into lists under the action comment's group.
#### Example Action Comment Implementation
The following is an example of the implementation of action comments. You can find the [source here]({{ link.docker_role }}/blob/master/tasks/main.yml) as well as an example of why and how you would include an [action comment outside of the `tasks/main.yml` file here]({{ link.docker_role }}/blob/master/tasks/compose-Darwin.yml).
<!-- prettier-ignore-start -->
```yaml
# @action Ensures Docker is installed
# Installs Docker on the target machine.
# @action Ensures Docker is installed
# Ensures Docker is started on boot.
- name: Include tasks based on the operating system
block:
- include_tasks: 'install-{{ {{ ansible_os_family }} }}.yml'
when: not docker_snap_install
# @action Ensures Docker is installed
# If the target Docker host is a Linux machine and the `docker_snap_install` variable
# is set to true, then Docker will be installed as a snap package.
- name: Install Docker via snap
community.general.snap:
name: docker
when:
- ansible_os_family not in ('Windows', 'Darwin')
- docker_snap_install
# @action Installs Docker Compose
# Installs Docker Compose if the `docker_install_compose` variable is set to true.
- name: Install Docker Compose (based on OS)
block:
- include_tasks: 'compose-{{ {{ ansible_os_family }} }}.yml'
when: docker_install_compose | bool
```
<!-- prettier-ignore-end -->
#### Example Action Comment Generated Output
The block of code above will generate markdown that would look similar to this:
**Ensures Docker is installed**
- Installs Docker on the target machine.
- Ensures Docker is started on boot.
- If the target Docker host is a Linux machine and the `docker_snap_install` variable is set to true, then Docker will be installed as a snap package.
**Installs Docker Compose**
- Installs Docker Compose if the `docker_install_compose` variable is set to true.
#### Action Comment Guidelines
- The wording of each action should be in active tense, describing a capability of the role. So instead of calling an action "Generate TLS certificates," we would call it, "Generates TLS certificates."
- The bulk of the action comments should be placed in the `tasks/main.yml` file. However, there may be use cases for putting an action comment in another file. For instance, if the business logic is different for Windows hosts, then we might add action comments to the `install-Windows.yml` file explaining the different logic.
- The goal of action comments are to present our users with some easy to understand bullet points about exactly what the role does and also elaborate on some of the higher-level technical details.
### TODO Comments
TODO comments are similar to action comments in the sense that through automation similar comments will be grouped together. You should use them anytime you find a bug, think of an improvement, spot something that needs testing, or realize there is a desirable feature missing. Take the following as an example:
#### Example TODO Comment Implementation
<!-- prettier-ignore-start -->
```yaml
# @todo Bug: bug description
# @todo improvement: improvement description
# @todo Bug: another bug description
```
<!-- prettier-ignore-end -->
#### Example TODO Comment Generated Output
The above code will output something that looks like this:
**Bug**
- bug description
- another bug description
**improvement**
- improvement description
Notice how the title for _improvement_ is not capitalized. It should be capitalized so make sure you pay attention to that detail.
#### TODO Comment Guidelines
- A TODO comment can be placed anywhere as long as no lines pass the limit of 160 characters.
- Try using similar TODO comment groups. Nothing is set in stone yet but try to use the following categories unless you really believe we need a new category:
- Bug
- Feature
- Improvement
- Test
- Ensure you capitalize the category

View file

@ -0,0 +1,45 @@
## Setting Up Development Environment
Before contributing to this project, you will have to make sure you have the tools that are utilized. We have made it incredibly easy to get started - just run `bash .start.sh` in the root of the repository. Most of the requirements (listed below) will automatically install (rootlessly) if they are missing from your system when you initialize the project by running `bash .start.sh`.
### Requirements
- **[Task](https://github.com/ProfessorManhattan/ansible-task)**
- **[Python 3](https://github.com/ProfessorManhattan/ansible-python)**, along with the `python3-netaddr` and `python3-pip` libraries (i.e. `sudo apt-get install python3 python3-netaddr python3-pip`)
- **[Docker](https://github.com/ProfessorManhattan/ansible-docker)**
- **[Node.js](https://github.com/ProfessorManhattan/ansible-nodejs)** >=12 which is used for the development environment which includes a pre-commit hook
- **[VirtualBox](https://github.com/ProfessorManhattan/ansible-virtualbox)** which is used for running Molecule tests
Docker and VirtualBox must be installed with root priviledges. If they are missing from your system, running `bash .start.sh` will prompt you for your password and automatically install them. Otherwise, you can follow the official [directions for installing Docker](https://docs.docker.com/get-docker/) and [directions for installing VirtualBox](https://www.virtualbox.org/manual/ch02.html).
### Getting Started
With all the requirements installed, navigate to the root directory and run the following command to set up the development environment which includes installing the Python dependencies and installing the Ansible Galaxy dependencies:
```terminal
bash .start.sh
```
This will install all the dependencies and automatically register a pre-commit hook. More specifically, `bash .start.sh` will:
1. Install Task which provides an easy-to-use interface for performing common tasks while leveraging parallel execution
2. Install missing development tools like Node.js and Python
3. Install the Node.js development environment dependencies
4. Install a pre-commit hook using [husky]({{ misc.husky }})
5. Ensure that meta files and documentation are up-to-date
6. Install the Python 3 requirements
7. Install the Ansible Galaxy requirements
8. Re-generate documentation using the latest sources
9. Perform other miscellaneous tasks depending on the project type
### Tasks Available
With the dependencies installed, you can see a list of the available commands by running `task --list`. This will log a help menu to the console informing you about the available commands and what they do. After running the command, you will see something that looks like this:
```shell
task --list
{{ task_list_output }}
```
Using the information provided above by running `task --list`, we can see that the `task lint:all` command will lint the project with all the available linters. You can see exactly what each command is doing by checking out the `Taskfile.yml` file (and following the imports). You can also get a detailed summary of any task reported by `task --list` by running `task group:task-name --summary`.

View file

@ -0,0 +1,39 @@
## Updating Meta Files and Documentation
Since we have hundreds of Ansible roles to maintain, the majority of the files inside each role are shared across all our Ansible projects. We synchronize these common files across all our repositories with various build tools. When you clone a new repository, the first command you should run is `bash .start.sh`. This will install missing software requirements, run the full update sequence, and ensure everything is up-to-date. To synchronize the project at a later point in time, you can run `task common:update` which runs most of the logic executed by running `bash .start.sh`.
### The `"blueprint" package.json` Field and `@appnest/readme`
In the root of all of our Ansible repositories, we include a file named `package.json`. In the key named `"blueprint"`, there are variables that are used in our build tools. Most of the variables stored in `"blueprint"` are used for generating documentation. All of our documentation is generated using variables and document partials that we feed into a project called `[@appnest/readme]({{ misc.appnest }})` (which is in charge of generating the final README/CONTRIBUTING guides). When `@appnest/readme` is run, it includes the variables stored in `"blueprint"` in the context that it uses to inject variables in the documentation. You can view the documentation partials by checking out the `./.common` folder which is a submodule that is shared across all of our Ansible projects.
For every role that is included in our eco-system, we require certain fields to be filled out in the `"blueprint"` section of the `package.json` file. Lucky for you, most of the fields in the file are auto-generated. The fields that need to be filled out as well as descriptions of what they should contain are listed in the chart below:
{{ blueprint_requirements }}
### `meta/main.yml` Description
The most important piece of text in each of our Ansible projects is the [Ansible Galaxy]({{ profile_link.galaxy }}) description located in `meta/main.yml`. This text is used in search results on Ansible Galaxy and GitHub. It is also spun to generate multiple variants so it has to be worded in a way that makes sense with our different variants. Take the following as an example:
**The `meta/main.yml` description example:**
- Installs Android Studio and sets up Android SDKs on nearly any OS
**Gets spun and used by our automated documentation framework in the following formats:**
- Installs Android Studio and sets up Android SDKs on nearly any OS
- An Ansible role that _installs Android Studio and sets up Android SDKs on nearly any OS_
- This repository is the home of an Ansible role that _installs Ansible Studio and sets up Android SDKs on nearly any OS_.
It is important that all three variants of the `meta/main.yml` description make sense and be proper English. The `meta/main.yml` description should succinctly describe what the role does and possibly even describe what the product does if it is not well-known like Android Studio. An example of a description that includes an overview of the product would be something like, "Installs HTTPie (a user-friendly, command-line HTTP client) on nearly any platform," for the [HTTPie role](https://github.com/ProfessorManhattan/ansible-httpie) or "Installs Packer (an automation tool for building machine images) on nearly any platform" for the [Packer role](https://github.com/ProfessorManhattan/ansible-packer).
### `logo.png`
We include a `logo.png` file in all of our Ansible projects. This image is automatically integrated with GitLab so that a thumbnail appears next to the project. It is also shown in the README to give the user a better idea of what the role does. All roles should include the `logo.png` file. When adding a `logo.png` file please _strictly_ adhere to the steps below:
1. Use Google image search to find a logo that best represents the product. Ensure the image is a `.png` file and that it has a transparent background, if possible. Ideally, the image should be the official logo for software that the Ansible role/project installs. The image should be at least 200x200 pixels.
2. After downloading the image, ensure you have the sharp-cli installed by running `npm install -g sharp-cli`.
3. Resize the image to 200x200 pixels by running `sharp -i file_location.png -o logo.png resize 200 200`.
4. Compress the resized image by dragging and dropping the resized image into the [TinyPNG web application]({{ misc.tinypng }}).
5. Download the compressed image and add it to the root of the Ansible project. Make sure it is named `logo.png`.
Alternatively, you can use our pre-commit hook to automatically take care of steps 2-5 when the `logo.png` file is staged with git.

View file

@ -0,0 +1,36 @@
## Linting
The process of running linters is mostly automated. Molecule is configured to lint so you will see linting errors when you run `molecule test` (note that not all Molecule scenarios include automatic linting). There is also a pre-commit hook that lints your code and performs other validations before allowing a `git commit` to go through. If you followed the [Setting Up Development Environment](#setting-up-development-environment) section, you should be all set to have your code automatically linted before pushing changes to the repository.
**Please note that before creating a pull request, all lint errors should be resolved.** If you would like to view all the steps we take to ensure great code then check out `.husky/pre-commit` and the other files in the `.husky/` folder.
### Fixing Ansible Lint Errors
You can manually run Ansible Lint by executing the following command in the project's root:
```shell
task lint:ansible
```
Most errors will be self-explanatory and simple to fix. Other errors might require testing and research. Below are some tips on fixing the trickier errors.
#### [208] File permissions unset or incorrect
If you get this error, do research to figure out the minimum permissions necessary for the file. After you change the permission, test the role (since changing permissions can easily break things).
#### [301] Command should not change things if nothing needs doing
This error can be solved by telling Ansible what files the command creates or deletes. When you specify what file a `command:` or `shell:` creates and/or deletes, Ansible will check for the presence or absence of the file to determine if the system is already in the desired state. If it is in the desired state, then Ansible skips the task. Refer to the [documentation for ansible.builtin.command](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/command_module.html) for further details.
Here is an example of code that will remove the error:
```yaml
- name: Run command if /path/to/database does not exist
command: /usr/bin/make_database.sh db_user db_name
args:
creates: /path/to/database # If the command deletes something, then you can swap out creates with removes
```
#### [305] Use shell only when shell functionality is required
Only use the Ansible `shell:` task when absolutely necessary. If you get this error then test if replacing `shell:` with `command:` resolves the error. If that does not work and you can not figure out how to properly configure the environment for `command:` to work, then you can add `# noqa 305` at the end of the line that includes the `name:` property. The same is true for other linting errors - `# noqa` followed by the reported lint error code will instruct `ansible-lint` to ignore the error.

View file

@ -0,0 +1,11 @@
## Philosophy
When you are working with one of our Ansible projects, try asking yourself, "**How can this be improved?**" For example, in the case of the [Android Studio role](https://github.com/ProfessorManhattan/ansible-androidstudio), the role installs Android Studio but there may be additional tasks that should be automated. Consider the following examples:
- _The software is installed but is asking for a license key._ - In this case, we should provide an option for automatically installing the license key using a CLI command.
- _The software supports plugins_ - We should provide an option for specifying the plugins that are automatically installed.
- _In the case of Android Studio, many users have to install SDKs before using the software._ - We should offer the capability to automatically install user-specified SDKs.
- _The software has configuration files with commonly tweaked settings._ - We should provide the ability to change these settings via variables stored in `defaults/main.yml`.
- _The software has the capability to integrate with another piece of software in the [main playbook]({{ repository.playbooks }})_. - This integration should be automated.
Ideally, you should use the software installed by the [main playbook]({{ repository.playbooks }}). This is really the only way of testing whether or not the software was installed properly and has all the common settings automated. The software installed by the main playbook is all widely-acclaimed, cross-platform software that many people find useful.

View file

@ -0,0 +1,3 @@
## Pull Requests
All pull requests should be associated with issues. Although not strictly required, the pull requests should be made to [the GitLab repository issues board]({{ repository.gitlab }}{{ repository.location.issues.gitlab }}) instead of the [GitHub mirror repository]({{ repository.github }}{{ repository.location.issues.github}}). This is because we use GitLab as our primary repository and mirror the changes to GitHub for the community.

View file

@ -0,0 +1,44 @@
## Supported Operating Systems
All of our roles should run without error on the following operating systems:
- Archlinux (Latest)
- CentOS 7 and 8
- Debian 9 and 10
- Fedora (Latest)
- Ubuntu (16.04, 18.04, 20.04, and Latest)
- Mac OS X (Latest)
- Windows 10 (Latest)
### Other Operating Systems
Although we do not have a timeline set up, we are considering adding support for the following operating systems:
- **Qubes**
- Elementary OS
- Zorin
- OpenSUSE
- Manjaro
- FreeBSD
- Mint
### Code Style for Platform-Specific Roles
If you have a role that only installs software made for Windows 10 then ensure that the tasks are only run when the system is a Windows system by using `when:` in the `tasks/main.yml` file. Take the following `main.yml` as an example:
```yaml
---
- name: Include variables based on the operating system
include_vars: '{{ ansible_os_family }}.yml'
when: ansible_os_family == 'Windows'
- name: Include tasks based on the operating system
become: true
block:
- include_tasks: 'install-{{ ansible_os_family }}.yml'
when: ansible_os_family == 'Windows'
```
### Preferred Installation Method for Mac OS X
We currently support installing applications with both [Homebrew](https://brew.sh/) casks and [mas](https://github.com/mas-cli/mas). Since mas does not allow automated logins to the App Store (and requires that the application was already installed by the account signed into the App Store GUI), we prefer the use of homebrew casks for installing applications.

View file

@ -0,0 +1,62 @@
## Testing
You can test all of the operating systems we support by running the following command in the root of the project:
```shell
molecule test
```
The command `molecule test` will spin up VirtualBox VMs for all the OSes we support and run the role(s). _Do this before committing code._ If you are committing code for only one OS and can not create the fix or feature for the other operating systems then please, at the very minimum, [file an issue]({{ repository.gitlab }}{{ repository.location.issues }}) so someone else can pick it up.
### Idempotence
It is important to note that `molecule test` tests for idempotence. To pass the idempotence test means that if you run the role twice in a row then Ansible should not report any changes the second time around.
### Debugging
If you would like to shell into a container for debugging, you can do that by running:
```shell
task common:shell
```
### Molecule Documentation
For more information about Ansible Molecule, check out [the docs](https://molecule.readthedocs.io/en/latest/).
### Testing Desktop Environments
Some of our roles include applications like [Android Studio](https://github.com/ProfessorManhattan/ansible-androidstudio). You can not fully test Android Studio from a Docker command line. In cases like this, you should use our desktop scenarios to provision a desktop GUI-enabled VM to test things like:
- Making sure the Android Studio shortcut is in the applications menu
- Opening Android Studio to make sure it is behaving as expected
- Seeing if there is anything we can automate (e.g. if there is a "Terms of Usage" you have to click OK at then we should automate that process if possible)
You can specify which scenario you want to test by passing the `-s` flag with the name of the scenario you want to run. For instance, if you wanted to test on Ubuntu Desktop, you would run the following command:
```shell
molecule test -s ubuntu-desktop
```
This would run the Molecule test on Ubuntu Desktop.
By default, the `molecule test` command will destroy the VM after the test is complete. To run the Ubuntu Desktop test and then open the desktop GUI you would have to:
1. Run `molecule converge -s ubuntu-desktop`
2. Open the VM through the VirtualBox UI (the username and password are both _vagrant_)
You can obtain a list of all possible scenarios by looking in the `molecule/` folder. The `molecule/default/` folder is run when you do not pass a scenario. All the other scenarios can be run by manually specifying the scenario (e.g. `molecule test -s ubuntu-desktop` will run the test using the scenario in `molecule/ubuntu-desktop/`).
### Molecule Scenario Descriptions
The chart below provides a list of the scenarios we include in all of our Ansible projects along with a brief description of what they are included for.
{{ molecule_descriptions }}
### Continuous Integration (CI)
You might have noticed that there are no CI tests in the chart above for macOS and Windows. Due to the limitations of Docker, we use other methods to test macOS and Windows automatically with CI. After a project has passed various linting tests on GitLab CI, the following methods are used to test the Ansible play:
- Linux platforms are tested using Molecule and Docker on GitLab CI in parallel. ([Link to GitLab CI configuration]({{ repository.group.ci }}/-/blob/master/test/molecule.gitlab-ci.yml))
- Windows is tested using GitLab CI without Molecule. ([Link to GitLab CI configuration]({{ repository.group.ci }}/-/blob/master/test/windows-ansible-test.gitlab-ci.yml))
- macOS is tested using GitHub Actions after the code is automatically synchronized between GitLab and GitHub. ([Link to the macOS GitHub Action configuration]({{ repository.github }}/-/blob/master/.github/workflows/macOS.yml))

1
.config/docs/local/package-lock.json generated Normal file

File diff suppressed because one or more lines are too long

1246
.config/docs/local/yarn.lock Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,3 @@
## Architecture
You can find a high-level overview of what each folder and file does in the [ARCHITECTURE.md](docs/ARCHITECTURE.md) file.

View file

@ -0,0 +1,36 @@
## Requirements
- **[Python >=3.7](https://www.python.org/)**
- **[Ansible >=2.9](https://www.ansible.com/)**
- Ansible controller should be a macOS/Linux environment (WSL/Docker can be used on Windows)
### Host Requirements
There are Python and Ansible package requirements need to be installed by running the following command (or equivalent) in the root of this repository:
```
pip3 install -r .config/requirements.txt
ansible-galaxy install requirements.yml
```
#### Easier Method of Installing the Host Requirements
You can also run `bash start.sh` if you do not mind development dependencies being installed as well. This method will even handle installing Python 3 and Ansible.
### Operating System
**This playbook is built and tested to run on fresh installs of Windows, Mac OS X, Ubuntu, Fedora, Debian, CentOS, Archlinux, and Qubes**. It may still be possible to run the playbook on your current machine. However, installing the playbook on a fresh install is the only thing we actively support. That said, if you come across an issue with an environment that already has configurations and software present, please do not hesitate to [open an issue]({{ repository.gitlab }}{{ repository.location.issue.gitlab }}).
### Connection
SSH (or WinRM in the case of Windows) and Python should be available on the target systems you would like to provision. If you are attempting to provision a Windows machine, you can ensure that WinRM is enabled and configured so that you can remotely provision the Windows target by running the following command with PowerShell:
```powershell
Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://install.doctor/windows-client'))
```
### MAS on Mac OS X
We use [mas](https://github.com/mas-cli/mas) to install apps from the App Store in some of our roles. Sadly, automatically signing into the App Store is not possible on OS X 10.13+ via mas. This is because [mas no longer supports login functionality on OS X 10.13+](https://github.com/mas-cli/mas/issues/164).
There is another caveat with mas. In order to install an application using mas, the application has to have already been added via the App Store GUI. This means that the first time around you will have to install the apps via the App Store GUI so they are associated with your App Store account.

View file

@ -0,0 +1,16 @@
## Introduction
Welcome to a new way of doing things. Born out of complete paranoia and a relentless pursuit of the best of GitHub Awesome lists, Gas Station aims to add the capability of being able to completely wipe whole networks and restore them on a regular basis. It takes a unique approach to network provisioning because it supports desktop provisioning as a first-class citizen. By default, without much configuration, it is meant to provision and maintain the state of a network that includes development workstations and servers. One type of user that might benefit from this project is a web developer who wants to start saving the state of their desktop as code. Another type of user is one who wants to start hosting RAM-intensive web applications in their home-lab environment to save huge amounts on cloud costs. This project is also meant to be maintainable by a single person. Granted, if you look through our eco-system you will see we are well-equipped for supporting entire teams as well.
Gas Station a collection of Ansible playbooks, configurations, scripts, and roles meant to provision computers and networks with the "best of GitHub". By leveraging Ansible, you can provision your whole network relatively fast in the event of a disaster or scheduled network reset. This project is also intended to increase the security of your network by allowing you to frequently wipe, reinstall, and re-provision your network, bringing it back to its original state. This is done by backing up container storage volumes (like database files and Docker volumes) to encrypted S3 buckets, storing configurations in encrypted git repositories, and leveraging GitHub-sourced power tools to make the job easy-peasy.
This project started when a certain somebody changed their desktop wallpaper to an _cute_ picture of a cat 🐱 when, all of a sudden, their computer meowed. Well, it actually started before that but no one believes someone who claims that time travelers hacked them on a regular basis. *Tip: If you are stuck in spiritual darkness involving time travelers, save yourself some headaches by adopting an other-people first mentality that may include volunteering, tithing, and surrendering to Jesus Christ.* Anyway, enough preaching!
Gas Station is:
- Highly configurable - most roles come with optional variables that you can configure to change the behavior of the role
- Highly configured - in-depth research is done to ensure each software component is configured with bash completions, plugins that are well-received by the community, and integrated with other software used in the playbook
- Compatible with all major operating systems (i.e. Windows, Mac OS X, Ubuntu, Fedora, CentOS, Debian, and even Archlinux)
- The product of a team of experts
- An amazing way to learn about developer tools that many would consider to be "the best of GitHub"
- Open to new ideas - feel free to [open an issue]({{ repository.gitlab }}{{ repository.location.issues.gitlab }}) or [contribute]({{ repository.github }}{{ repository.location.contributing.github }}) with a [pull request]({{ repository.github }}{{ repository.location.issues.github }})!

View file

@ -0,0 +1,17 @@
## Managing Environments
We accomplish managing different environments by symlinking all the folders that should be unique to each network environment (e.g. `host_vars/`, `group_vars/`, `inventories/`, `files/vpn/`, and `files/ssh/`). In the `environments/` folder, you will see multiple folders. In our case, `environments/dev/` contains sensible configurations for testing the playbook and its' roles. The production environment is a seperate git submodule that links to a private git repository that contains our Ansible-vaulted API keys and passwords. When you are ready to set up your production configurations, you can use this method of storing your environment-specific folders in the `environments/` folder as well. But if you are just starting off, you do not have to worry about this since, by default, this playbook is configured to run with the settings included in the `/environments/dev/` folder.
### Switching Between Environments
If you already have the project bootstrapped (i.e. already ran `bash .config/scripts/start.sh`), you can switch environments with an interactive prompt by running:
```shell
task ansible:playbook:environment
```
Alternatively, you can run the following if you would like to bypass the prompt:
```shell
task ansible:playbook:environment -- environmentName
```

View file

@ -0,0 +1,20 @@
## Philosophy
The philosophy of this project basically boils down to "**_automate everything_**" and include the best development tools that might be useful without over-bloating the machine with services. Automating everything should include tasks like automatically accepting software terms in advance or pre-populating Portainer with certificates of all the Docker hosts you would like to control. One problem we face is that there are so many great tools offered on GitHub. A lot of research has to go into what to include and what to pass on. The decision of whether or not to include a piece of software in the default playbook basically boils down to:
- **Project popularity** - If one project has 10k stars and a similar alternative has 500 stars then 9 times of out 10 the more popular project is selected.
- **Last commit date** - We prefer software that is being actively maintained, for obvious reasons.
- **Cross platform** - Our playbook supports the majority of popular operating systems so we opt for cross-platform software. However, in some cases, we will include software that has limited cross-platform support like Xcode (which is only available on Mac OS X). If a piece of software is too good to pass up, it is added and only installed on the system(s) that support it.
- **Usefulness** - If a tool could potentially improve developer effectiveness then we are more likely to include it.
- **System Impact** - Software that can be run with a small RAM footprint and software that does not need a service to load on boot is much more likely to be included.
One of the goals of this project is to be able to re-provision a network with the click of a button. This might not be feasible since consumer-grade hardware usually does not include features like IPMI (which is a feature included in high-end motherboards that lets you control the power state remotely). However, we aim to reduce the amount of interaction required when re-provisioning an entire network down to the bare minimum. In the worst case scenario, you will have to reformat, reinstall the operating system, and ensure that OpenSSH is running (or WinRM in the case of Windows) on each of the computers in your network. However, the long term goal is to allow the user to reformat and reinstall the operating system used as your Ansible host using an automated USB installer and then automatically re-provision everything else on the network by utilizing IPMI.
You might ask, "But how can I retain application-level configurations?" We currently handle this by:
- Pre-defining dotfiles in a customizable Git repository
- Backing up to encrypted S3 buckets
- Syncing files to private git repositories
- Utilizing tools that synchronize settings like [mackup](https://github.com/lra/mackup) or [macprefs](https://github.com/clintmod/macprefs) in the case of macOS
However, we intentionally keep this synchronization to a minimum (i.e. only back up what is necessary). After all, one of the goals of this project is to be able to regularly flush the bad stuff off a system. By keeping what we back up to a minimum, we reduce the attack surface.

View file

@ -0,0 +1 @@
> <br/><h4 align="center">**A no-stone-unturned Ansible playbook you can use to set up the ultimate home lab or on-premise addition to your cloud!**</h4><br/><br/>

View file

@ -0,0 +1,35 @@
## Quick Start
The easiest way to run the entire playbook, outlined in the `main.yml` file, is to run the appropriate command listed below. These commands will run the playbook on the machine you run the command on. This is probably the best way to get your feet wet before you decide to give us a ⭐ and customize the playbook for your own needs. Ideally, this command should be run on the machine that you plan on running Ansible with to provision the other computers on your network. It is only guaranteed to work on fresh installs so testing it out with [Vagrant](https://www.vagrantup.com/) is highly encouraged.
### Vagrant (Recommended)
To test it out with Vagrant, you can run the following commands which will open up an interactive dialog where you can pick which operating system and virtualization provider you wish to test the installation with:
```shell
bash start.sh && task ansible:test:vagrant
```
### macOS/Linux
```shell
curl -sSL https://install.doctor/quickstart > ./setup.sh && bash ./setup.sh
```
### Windows
In an administrative PowerShell session, run:
```powershell
iex ((New-Object System.Net.WebClient).DownloadString('https://install.doctor/windows-quickstart'))
```
### Qubes
Our playbooks include a specially crafted playbook for Qubes. It will load your VMs with sensible defaults. For more details, check out the [Qubes playbook](https://gitlab.com/megabyte-labs/gas-station/-/blob/master/playbooks/qubes.yml) and [Qubes variables](https://gitlab.com/megabyte-labs/gas-station/-/blob/master/environments/prod/group_vars/qubes). Perhaps most importantly, the "quickstart" [the inventory file](https://gitlab.com/megabyte-labs/gas-station/-/blob/master/environments/prod/inventories/quickstart.yml) details the VM structure that the provisioning script adds to the target system.
To setup Qubes, run the following on a fresh install in dom0:
```shell
qvm-run --pass-io sys-firewall "curl -sSL https://install.doctor/qubes" > ./setup.sh && bash ./setup.sh
```

View file

@ -0,0 +1,82 @@
## Software
This project breaks down software into a role (found in the subdirectories of the `roles/` folder) if the software requires anything other than being added to the `PATH` variable. Below is a quick description of what each role does. Browsing through this list, along with the conditions laid out in `main.yml`, you will be able to get a better picture of what software will be installed by the default `main.yml` playbook.
### Role-Based Software
{{ role_var_chart }}
We encourage you to browse through the repositories that are linked to in the table above to learn about the configuration options they support. Some of the roles are included as roles because they support configurations that rely on user-specific variables like API keys.
### Binaries
A lot of the nifty software we install by default does not require any configuration other than being added to the `PATH` or being installed with an installer like `brew`. For this kind of software that requires no configuration, we list the software we would like installed by the playbook as a variable in `group_vars/` or `host_vars/` as an array of keys assigned to the `software` variable ([example here](environments/prod/group_vars/desktop/vars.yml)). With those keys, we install the software using the [`professormanhattan.genericinstaller`](https://galaxy.ansible.com/professormanhattan/genericinstaller) role which determines how to install the binaries by looking up the keys against the `software_package` object ([example here](environments/prod/group_vars/all/software.yml)).
**NOTE:** The binary packages listed in these charts will attempt to install using the system package manager and then from source if the option is available before resorting to less desirable methods like downloading the binary from GitHub releases. The order of installation method preference that the [`professormanhattan.genericinstaller`](https://galaxy.ansible.com/professormanhattan/genericinstaller) role attempts to use is defined in the `INSERT_VARIABLE_NAME` variable. The default order is:
1. System package managers
2. Compiling from source (via Go, Rust, etc.)
3. Installing via Homebrew
4. Downloading the pre-compiled assets from GitHub releases
For your convienience, we have split the long list of single binary based software into two lists - one for CLIs and one for Applications:
#### Binary Desktop Applications
{{ binaryapp_var_chart }}
#### Binary CLIs / TUIs
{{ binarycli_var_chart }}
### NPM Packages
NPM provides a huge catalog of useful CLIs and libraries so we also include a useful and interesting default set of NPM-hosted CLIs for hosts in the `desktop` group ([defined here](environments/prod/group_vars/desktop/npm-packages.yml), for example):
{{ npm_var_chart }}
### Python Packages
In a similar fashion to the NPM packages, we include a great set of default Python packages that are included by default for the `desktop` group ([defined here](environments/prod/group_vars/desktop/pip-packages.yml)):
{{ pypi_var_chart }}
### Ruby Gems
A handful of Ruby gems are also installed on targets in the `desktop` group ([defined here](environments/prod/group_vars/desktop/ruby-gems.yml)):
{{ gem_var_chart }}
### Visual Studio Code Extensions
A considerable amount of effort has gone into researching and finding the "best" VS Code extensions. They are [defined here](environments/prod/group_vars/desktop/vscode-extensions.yml) and Gas Station also installs a good baseline configuration which includes settings for these extensions:
{{ vscode_var_chart }}
### Chrome Extensions
To reduce the amount of time it takes to configure Chromium-based browsers like Brave, Chromium, and Chrome, we also include the capability of automatically installing Chromium-based browser extensions (via a variable [defined here](environments/prod/group_vars/desktop/chrome-extensions.yml)):
{{ chrome_var_chart }}
### Firefox Add-Ons
Below you can find the Firefox extensions that the base configuration of this playbook will automatically install:
{{ firefox_var_chart }}
### Homebrew Formulae (macOS and Linux only)
Although most of the `brew` installs are handled by the [Binaries](#binaries) installer, some `brew` packages are also installed using [this configuration](environments/prod/group_vars/desktop/homebrew.yml). The default Homebrew formulae include:
{{ brew_var_chart }}
### Homebrew Casks (macOS only)
On macOS, some software is installed using Homebrew casks. These include:
{{ cask_var_chart }}
### Go, Rust, and System-Specific Packages
Go packages, Rust crates, and system-specific packages like `.deb` and `.rpm` bundles are all handled by the [`professormanhattan.genericinstaller`](https://galaxy.ansible.com/professormanhattan/genericinstaller) role described above in the [Binaries](#binaries) section. There are also ways of installing Go and Rust packages directly by using configuration options provided by their corresponding roles outlined in the [Roles](#roles) section.

View file

@ -0,0 +1,38 @@
<div align="center">
<a href="{{ link.home }}" title="{{ organization }} homepage" target="_blank">
<img alt="Homepage" src="https://img.shields.io/website?down_color=%23FF4136&down_message=Down&label=Homepage&logo=home-assistant&logoColor=white&up_color=%232ECC40&up_message=Up&url=https%3A%2F%2Fmegabyte.space&style={{ badge_style }}" />
</a>
<a href="{{ repository.github }}{{ repository.location.contributing.github }}" title="Learn about contributing" target="_blank">
<img alt="Contributing" src="https://img.shields.io/badge/Contributing-Guide-0074D9?logo=github-sponsors&logoColor=white&style={{ badge_style }}" />
</a>
<a href="{{ link.chat }}" title="Chat with us on Slack" target="_blank">
<img alt="Slack" src="https://img.shields.io/badge/Slack-Chat-e01e5a?logo=slack&logoColor=white&style={{ badge_style }}" />
</a>
<a href="{{ link.gitter }}" title="Chat with the community on Gitter" target="_blank">
<img alt="Gitter" src="https://img.shields.io/gitter/room/megabyte-labs/community?logo=gitter&logoColor=white&style={{ badge_style }}" />
</a>
<a href="{{ repository.github }}" title="GitHub mirror" target="_blank">
<img alt="GitHub" src="https://img.shields.io/badge/Mirror-GitHub-333333?logo=github&style={{ badge_style }}" />
</a>
<a href="{{ repository.gitlab }}" title="GitLab repository" target="_blank">
<img alt="GitLab" src="https://img.shields.io/badge/Repo-GitLab-fc6d26?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgAQMAAABJtOi3AAAABlBMVEUAAAD///+l2Z/dAAAAAXRSTlMAQObYZgAAAHJJREFUCNdNxKENwzAQQNEfWU1ZPUF1cxR5lYxQqQMkLEsUdIxCM7PMkMgLGB6wopxkYvAeI0xdHkqXgCLL0Beiqy2CmUIdeYs+WioqVF9C6/RlZvblRNZD8etRuKe843KKkBPw2azX13r+rdvPctEaFi4NVzAN2FhJMQAAAABJRU5ErkJggg==&style={{ badge_style }}" />
</a>
</div>
<br/>
<div align="center">
<a title="Version: {{ pkg.version }}" href="{{ repository.github }}" target="_blank">
<img alt="Version: {{ pkg.version }}" src="https://img.shields.io/badge/version-{{ pkg.version }}-blue.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgAQMAAABJtOi3AAAABlBMVEUAAAD///+l2Z/dAAAAAXRSTlMAQObYZgAAACNJREFUCNdjIACY//+BEp9hhM3hAzYQwoBIAqEDYQrCZLwAAGlFKxU1nF9cAAAAAElFTkSuQmCC&cacheSeconds=2592000&style={{ alt_badge_style }}" />
</a>
<a title="Build status" href="{{ repository.gitlab }}{{ repository.location.commits.gitlab }}" target="_blank">
<img alt="Build status" src="https://img.shields.io/gitlab/pipeline-status/{{ playbook_path }}?branch=master&label=build&logo=gitlab&style={{ alt_badge_style }}">
</a>
<a title="E2E test status for all operating systems" href="{{ repository.gitlab }}{{ repository.location.commits.gitlab_e2e }}" target="_blank">
<img alt="E2E test status" src="https://img.shields.io/gitlab/pipeline-status/{{ playbook_path }}?branch=e2e&label=e2e%20test&logo=virtualbox&style={{ alt_badge_style }}">
</a>
<a title="Documentation" href="{{ link.docs }}/{{ group }}" target="_blank">
<img alt="Documentation" src="https://img.shields.io/badge/documentation-yes-brightgreen.svg?logo=readthedocs&style={{ alt_badge_style }}" />
</a>
<a title="License: {{ license }}" href="{{ repository.github }}{{ repository.location.license.github }}" target="_blank">
<img alt="License: {{ license }}" src="https://img.shields.io/badge/license-{{ license }}-yellow.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgAQMAAABJtOi3AAAABlBMVEUAAAD///+l2Z/dAAAAAXRSTlMAQObYZgAAAHpJREFUCNdjYOD/wMDAUP+PgYHxhzwDA/MB5gMM7AwMDxj4GBgKGGQYGCyAEEgbMDDwAAWAwmk8958xpIOI5zKH2RmOyhxmZjguAiKmgIgtQOIYmFgCIp4AlaQ9OczGkJYCJEAGgI0CGwo2HmwR2Eqw5SBnNIAdBHYaAJb6KLM15W/CAAAAAElFTkSuQmCC&style={{ alt_badge_style }}" />
</a>
</div>

View file

@ -0,0 +1,5 @@
## Supported Operating Systems
The following chart shows the operating systems that have been tested for compatibility using the `environments/dev/` environment. This chart is automatically generated using the Ansible Molecule tests you can view in the `molecule/default/` folder. We currently have logic in place to automatically handle the testing of Windows, Mac OS X, Ubuntu, Fedora, CentOS, Debian, Archlinux, and, of course, Qubes. If your operating system is not listed but is a variant of one of the systems we test (i.e. a Debian-flavored system or a RedHat-flavored system) then it might still work.
{{ compatibility_matrix }}

View file

@ -0,0 +1,35 @@
## Web Applications
This playbook does a bit more than just install software. It also optionally sets up web applications too. If you choose to deploy the default Gas Station web applications on your network, you should probably do it on a computer/server that has a lot of RAM (e.g. 64GB+).
Although a production environment will always be more stable and performant if it is hosted with a major cloud provider, sometimes it makes more sense to locally host web applications. Some applications have abnormally large RAM requirements that could potentially cost thousands per month to host with a legit cloud provider.
We use Kubernetes as the provider for the majority of the applications. It is a production-grade system and although there is a steeper learning curve it is well worth it. Each application we install is packaged as a Helm chart. All of the data is backed up regularly to an encrypted cloud S3 bucket of your choice.
### Helm Charts
The available Helm charts that this playbook completely handles the set up for are listed below.
{{ helm_var_chart }}
### Host Applications
By default, on each computer provisioned using the default settings of Gas Station, several apps are installed on each host. Docker Compose is used to manage the deployment. The default apps include:
{{ hostapp_var_chart }}
You can, of course, disable deploying these apps. However, we include them because they have a small footprint and include useful features. You can also customize the list of apps you wish to include on each host.
#### HTPC
We do not maintain any of the host applications except the ones listed above. However, we do provide the capability of marking a computer being provisioned as an HTPC. Doing this will include a suite of web applications with powerful auto-downloading, organizing, tagging, and media-serving capabilities. Since most people will probably be stepping outside the confines of the law for this, it is not recommended. If you still want to experiment then you can find descriptions of the applications below. The applications are intended to be hosted on a single computer via Docker Compose. The backend for Kodi is included but you should still use the regular installation method for Plex and the front-end of Kodi to view your media collection.
{{ htpc_var_chart }}
### Online Services
Certain parts of the stack rely on cloud-based service providers. All of the providers can be used for free. The providers are generally chosen because their settings need to persist or the functionality that they provide would benefit from a security-hardened SaaS offering.
You can, of course, swap these services out for alternatives. However, our scripts integrate these specific services so if you want to swap them out then some leg work will be necessary.
{{ saas_var_chart }}

View file

@ -0,0 +1,24 @@
## Dependencies
Most of our roles rely on [Ansible Galaxy]({{ profile_link.galaxy }}) collections. Some of our projects are also dependent on other roles and collections that are published on Ansible Galaxy. Before you run this role, you will need to install the collection and role dependencies, as well as the Python requirements, by running:
```yaml
if type poetry &> /dev/null; then poetry install --no-root; else pip3 install -r .config/assets/requirements.txt; fi
ansible-galaxy install -r requirements.yml
```
Alternatively, you can simply run `bash .config/scripts/start.sh` if you are new to Ansible and do not mind the development requirements also being installed. This is the easy way of making sure that everything works properly.
### Python
Although the only tool necessary to run this play on a standard machine is Ansible (a Python package), we include several other Python dependencies that are required for specialized use cases and development. The table below details these packages:
{{ python_role_dependencies }}
### Galaxy Roles
Although most of our roles do not have dependencies, there are some cases where another role has to be installed before the logic can continue. At the beginning of the play, the Ansible Galaxy role dependencies listed in `meta/main.yml` will run. These dependencies are configured to only run once per playbook. If you include more than one of our roles in your playbook that have dependencies in common then the dependency installation will be skipped after the first run. Some of our roles also utilize helper roles directly from the task files which helps keep our [main playbook (Gas Station)]({{ repository.playbooks }}) DRY.
The `requirements.yml` file contains a full list of the Ansible Galaxy dependencies required by this role (i.e. `meta/main.yml` role dependencies, helper roles, collections, etc.). For your convenience, a list of the role dependencies along with quick descriptions is below:
{{ role_dependencies }}

View file

@ -0,0 +1,15 @@
## Example Playbook
With the dependencies installed, all you have to do is add the role to your main playbook. The role handles the `become` behavior so you can simply add the role to your playbook without having to worry about commands that should not be run as root:
```lang-yml
- hosts: all
roles:
- {{ galaxy_info.namespace }}.{{ galaxy_info.role_name }}
```
If you are incorporating this role into a pre-existing playbook, then it might be prudent to copy the requirements outlined in `pyproject.toml` and `requirements.yml` to their corresponding files in the root of your playbook so you only have to worry about installing one set of requirements during future use. Note that the dependencies in `pyproject.toml` can be moved to the more traditional `requirements.txt`, if that is what you are currently using to track Python dependencies.
### Real World Example
You can find an example of a playbook that incorporates this role in our main playbook (a.k.a. [Gas Station]({{ repository.project.playbooks }})). The playbook is an excellent example for someone learning how to use Ansible. It also incorporates a lot of well-thought out build tools that more advanced Ansible users can appreciate. And people who could care less about Ansible can also benefit from it because it allows you to more or less turn your computer (and network) into the ultimate development enivornment. The bottom line is that it is an awesome project that developers should know about!

View file

@ -0,0 +1,3 @@
## Overview
{{ alternative_description }}. {{ overview }}

View file

@ -0,0 +1,3 @@
> </br><h4 align="center">**{{ subheader_description }}**</h4></br>
<!--TERMINALIZE![{{ terminalizer_title }}]({{ repository.group.ansible_roles }}/{{ galaxy_info.role_name }}{{ repository.location.demo }})TERMINALIZE-->

View file

@ -0,0 +1,19 @@
## Quick Start
Looking to install {{ name }} without having to deal with [Ansible](https://www.ansible.com/)? Simply run the following command that correlates to your operating system:
**Linux/macOS:**
```shell
curl -sS {{ link.installdoctor }}/{{ galaxy_info.role_name }} | bash
```
**Windows:**
```powershell
Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://install.doctor/{{ galaxy_info.role_name }}?os=win'))
```
**Important Note:** _Before running the commands above you should probably directly access the URL to make sure the code is legit. We already know it is safe but, before running any script on your computer, you should inspect it._
You can also check out **[Install Doctor]({{ link.installdoctor }})**. It is an app we created that can install any Ansible role with a one-liner. It has some other nifty features too like the ability to install binaries on-the-fly without requiring a password. However, if you would like to incorporate this role into an Ansible playbook (and customize settings) then please continue reading below.

View file

@ -0,0 +1,53 @@
<div align="center">
<a href="{{ link.home }}" title="{{ organization }} homepage" target="_blank">
<img alt="Homepage" src="https://img.shields.io/website?down_color=%23FF4136&down_message=Down&label=Homepage&logo=home-assistant&logoColor=white&up_color=%232ECC40&up_message=Up&url=https%3A%2F%2Fmegabyte.space&style={{ badge_style }}" />
</a>
<a href="{{ repository.github }}{{ repository.location.contributing.github }}" title="Learn about contributing" target="_blank">
<img alt="Contributing" src="https://img.shields.io/badge/Contributing-Guide-0074D9?logo=github-sponsors&logoColor=white&style={{ badge_style }}" />
</a>
<a href="{{ link.chat }}" title="Chat with us on Slack" target="_blank">
<img alt="Slack" src="https://img.shields.io/badge/Slack-Chat-e01e5a?logo=slack&logoColor=white&style={{ badge_style }}" />
</a>
<a href="{{ link.gitter }}" title="Chat with the community on Gitter" target="_blank">
<img alt="Gitter" src="https://img.shields.io/gitter/room/megabyte-labs/community?logo=gitter&logoColor=white&style={{ badge_style }}" />
</a>
<a href="{{ repository.github }}" title="GitHub mirror" target="_blank">
<img alt="GitHub" src="https://img.shields.io/badge/Mirror-GitHub-333333?logo=github&style={{ badge_style }}" />
</a>
<a href="{{ repository.gitlab }}" title="GitLab repository" target="_blank">
<img alt="GitLab" src="https://img.shields.io/badge/Repo-GitLab-fc6d26?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgAQMAAABJtOi3AAAABlBMVEUAAAD///+l2Z/dAAAAAXRSTlMAQObYZgAAAHJJREFUCNdNxKENwzAQQNEfWU1ZPUF1cxR5lYxQqQMkLEsUdIxCM7PMkMgLGB6wopxkYvAeI0xdHkqXgCLL0Beiqy2CmUIdeYs+WioqVF9C6/RlZvblRNZD8etRuKe843KKkBPw2azX13r+rdvPctEaFi4NVzAN2FhJMQAAAABJRU5ErkJggg==&style={{ badge_style }}" />
</a>
</div>
<br/>
<div align="center">
<a title="Ansible Galaxy role: {{ profile.galaxy }}.{{ galaxy_info.role_name }}" href="{{ profile_link.galaxy }}/{{ profile.galaxy }}/{{ galaxy_info.role_name }}" target="_blank">
<img alt="Ansible Galaxy role: {{ profile.galaxy }}.{{ galaxy_info.role_name }}" src="https://img.shields.io/ansible/role/{{ ansible_galaxy_project_id }}?logo=ansible&style={{ alt_badge_style }}" />
</a>
<a title="Version: {{ pkg.version }}" href="{{ repository.github }}" target="_blank">
<img alt="Version: {{ pkg.version }}" src="https://img.shields.io/badge/version-{{ pkg.version }}-blue.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgAQMAAABJtOi3AAAABlBMVEUAAAD///+l2Z/dAAAAAXRSTlMAQObYZgAAACNJREFUCNdjIACY//+BEp9hhM3hAzYQwoBIAqEDYQrCZLwAAGlFKxU1nF9cAAAAAElFTkSuQmCC&cacheSeconds=2592000&style={{ alt_badge_style }}" />
</a>
<a title="GitLab build status" href="{{ repository.gitlab }}{{ repository.location.commits.gitlab }}" target="_blank">
<img alt="Build status" src="https://img.shields.io/gitlab/pipeline-status/{{ repository.group.ansible_roles_path }}/{{ galaxy_info.role_name }}?branch=master&label=build&logo=gitlab&logoColor=white&style={{ alt_badge_style }}">
</a>
<a title="Windows 11 test status on GitHub" href="{{ repository.github }}/actions/workflows/Windows.yml" target="_blank">
<img alt="Windows 11 test status" src="https://img.shields.io/github/workflow/status/{{ profile.github }}/{{ repository.prefix.github }}{{ galaxy_info.role_name }}/Windows%20Ansible%20Role%20Test/master?color=cyan&label=windows&logo=windows&style={{ alt_badge_style }}">
</a>
<a title="macOS test status on GitLab" href="{{ repository.gitlab }}{{ repository.location.commits.gitlab }}" target="_blank">
<img alt="macOS test status" src="https://img.shields.io/gitlab/pipeline-status/{{ repository.group.ansible_roles_path }}/{{ galaxy_info.role_name }}?branch=test%2Fdarwin&label=osx&logo=apple&style={{ alt_badge_style }}">
</a>
<a title="Linux Molecule test status on GitLab" href="{{ repository.gitlab }}{{ repository.location.commits.gitlab }}" target="_blank">
<img alt="Linux Molecule test status" src="https://img.shields.io/gitlab/pipeline-status/{{ repository.group.ansible_roles_path }}/{{ galaxy_info.role_name }}?branch=test%2Flinux&label=linux&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgBAMAAACBVGfHAAAAElBMVEUAAAAwPEEuOEIxOzswPj7///91+pI+AAAABXRSTlMANRkNJejDPNcAAAB+SURBVCjPddHBDYAgDIXhGtMRHMG7S3hvTP79VxFIQVq1wOVLm7wU8QIJpSThC2wGwwJoPQFKRdiAAIhGsAykZNSZAOVNMx4BMjwtpySgr6CDJdB/MAdJwAvSiFoE5aABHUb0ch0WHNQq+KPAOgCgrbEnbjAHArjGz3jr3hpumrQpvwi66rkAAAAASUVORK5CYII=&style={{ alt_badge_style }}">
</a>
<a title="Ansible Galaxy quality score (out of 5)" href="{{ profile_link.galaxy }}/{{ profile.galaxy }}/{{ galaxy_info.role_name }}" target="_blank">
<img alt="Ansible Galaxy quality score" src="https://img.shields.io/ansible/quality/{{ ansible_galaxy_project_id }}?logo=ansible&style={{ alt_badge_style }}" />
</a>
<a title="Ansible Galaxy download count" href="{{ profile_link.galaxy }}/{{ profile.galaxy }}/{{ galaxy_info.role_name }}" target="_blank">
<img alt="Ansible Galaxy download count" src="https://img.shields.io/ansible/role/d/{{ ansible_galaxy_project_id }}?logo=ansible&label=downloads&style={{ alt_badge_style }}">
</a>
<a title="Documentation" href="{{ link.docs }}/{{ group }}" target="_blank">
<img alt="Documentation" src="https://img.shields.io/badge/documentation-yes-brightgreen.svg?logo=readthedocs&logoColor=white&style={{ alt_badge_style }}" />
</a>
<a title="License: {{ license }}" href="{{ repository.github }}{{ repository.location.license.github }}" target="_blank">
<img alt="License: {{ license }}" src="https://img.shields.io/badge/license-{{ license }}-yellow.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgAQMAAABJtOi3AAAABlBMVEUAAAD///+l2Z/dAAAAAXRSTlMAQObYZgAAAHpJREFUCNdjYOD/wMDAUP+PgYHxhzwDA/MB5gMM7AwMDxj4GBgKGGQYGCyAEEgbMDDwAAWAwmk8958xpIOI5zKH2RmOyhxmZjguAiKmgIgtQOIYmFgCIp4AlaQ9OczGkJYCJEAGgI0CGwo2HmwR2Eqw5SBnNIAdBHYaAJb6KLM15W/CAAAAAElFTkSuQmCC&style={{ alt_badge_style }}" />
</a>
</div>

View file

@ -0,0 +1,9 @@
## Supported Operating Systems
The chart below shows the operating systems that we have tested this role on. It is automatically generated using the Ansible Molecule tests located in the `molecule/` folder. There is CI logic in place to automatically handle the testing of Windows, macOS, Ubuntu, Fedora, CentOS, Debian, and Archlinux. If your operating system is not listed but is a variant of one of the systems we test (i.e. a Debian-flavored system or a RedHat-flavored system) then it is possible that the role will still work.
{{ compatibility_matrix }}
**_What does idempotent mean?_** Idempotent means that if you run this role twice in row then there will be no changes to the system the second time around.
We spent a lot of time perfecting our CI configurations and build tools. If you are interested in learning more about how we perfected our process then you might find our [Ansible common files](https://gitlab.com/megabyte-labs/common/ansible) and [Ansible documentation](https://gitlab.com/megabyte-labs/documentation/ansible) repositories interesting. See the [CONTRIBUTING.md](docs/CONTRIBUTING.md) guide for more details.

592
.config/docs/variables.json Normal file
View file

@ -0,0 +1,592 @@
{
"SPACE": "",
"alt_badge_style": "flat-square",
"author": {
"email": "brian@megabyte.space",
"name": "Brian Zalewski"
},
"autodoc_actions_description": "",
"autodoc_tags_description": "",
"autodoc_todo_description": "",
"autodoc_variables_description": "",
"badge_style": "for-the-badge",
"blueprint_requirements": [
["Variable Name", "Variable Description"],
[
"`description`",
"Short description of the role, worded in such a way that it makes sense by itself and with 'An Ansible role that ' prepended to it"
],
["`group`", "This should always be set to 'ansible' for Ansible roles"],
[
"`name`",
"This should be the official name for the product that the role installs/configures. It is used in the title of the repository and throughout the documentation to refer to the product."
],
[
"`overview`",
"This variable should be a description of what the role installs. You can usually find a good description by Googling, \"What is Android Studio,\" for example if you were populating this variable for the [Android Studio role]({{ repository.group.ansible_roles }}/androidstudio). This text is shown at the top of the README, right below the header section and before the table of contents. Whenever possible, key products/terms should be linked to using markdown. You can see an example of us hyperlinking in this variable by checking out the [Android Studio role]({{ repository.group.ansible_roles }}/androidstudio). The idea is to make it as easy as possible for our users to figure out exactly what the role does."
],
["`repository.github`", "The HTTPS URL of the GitHub mirror"],
["`repository.gitlab`", "The HTTPS URL of the GitLab repository"],
[
"`slug`",
"This should generally be the ending slug of the GitHub mirror. It is used for things like filling in the package.json name."
],
["`subgroup`", "This should always be set to 'role' for Ansible roles"],
["`title`", "The title of the README.md"]
],
"commit_help_url": "https://megabyte.space/docs/contributing/commit-guidelines",
"company": "Megabyte LLC",
"copyright": "2020-2021",
"description_emojis": "👨🏻‍💻 🩺",
"docker_label_authors": "Brian Zalewski <brian@megabyte.space>",
"docs": {
"header_description_post": "<i></i>",
"header_description_pre": "<i></i>",
"header_title_post": "<i></i>",
"header_title_pre": "<i></i>",
"link": "https://megabyte.space/docs/common/ansible"
},
"downloadLinks": {
"fedora": "https://download.fedoraproject.org/pub/fedora/linux/releases/35/Workstation/x86_64/iso/Fedora-Workstation-Live-x86_64-35-1.2.iso",
"kali": "https://cdimage.kali.org/kali-2022.1/kali-linux-2022.1-installer-amd64.iso",
"qubes": "https://ftp.qubes-os.org/iso/Qubes-R4.1.0-x86_64.iso",
"tails": "https://ftp.osuosl.org/pub/tails/stable/tails-amd64-4.29/tails-amd64-4.29.iso",
"ubuntu": "https://mirror.arizona.edu/ubuntu-releases/21.10/ubuntu-21.10-desktop-amd64.iso",
"windows": "https://software.download.prss.microsoft.com/db/Win11_English_x64.iso?t=c15e0cba-9c8d-4984-b30f-43c42425733d&e=1650582458&h=960d57d2c6a0243e32d3106c0d8f82387966ddf9a9bfce82f89e66866457c014"
},
"email": {
"help": "help@megabyte.space"
},
"emoji_beginnings": ["🚀 ", "🔥👉 ", "👉 ", "😉 ", "🆓 ", "🐴 ", "👀 ", "🎉 ", "", "", "", "", "", "", "", ""],
"emoji_endings": [" 🚀", " 🔥🔥🔥", " 👏", " 😉", " 🐙", " 🐴", " 👀", " 🎟", " 🎉🎉", "", "", "", "", "", "", ""],
"github_prefix": "<i></i>",
"gitlab_pipelines": [
{
"active": true,
"cron": "0 5 1 * *",
"description": "Monthly Repository Update",
"ref": "synchronize",
"variable": {
"REPOSITORY_UPDATE": true
}
}
],
"gomodProxy": true,
"group": "ansible",
"groups": {
"angular": ["app", "website"],
"ansible": ["playbook", "role"],
"docker": ["ansible-molecule", "app", "ci-pipeline", "codeclimate", "software"],
"go": ["cli", "library"],
"npm": ["app", "cli", "config", "library", "plugin"],
"packer": ["desktop", "server"],
"python": ["cli", "library"]
},
"homebrew": {
"folder": "Formula",
"name": "homebrew-tap",
"owner": "installdoc"
},
"hostapp_var_chart": [
["App", "Description", "GitHub&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"],
[
"**[Authelia](https://www.authelia.com/)**",
"An authentication portal that supports SSO and 2FA (_[Homepage](https://www.authelia.com/) | [Documentation](https://www.authelia.com/docs/)_)",
"[![GitHub Repo stars](https://img.shields.io/github/stars/authelia/authelia?style=social)](https://github.com/authelia/authelia)"
],
[
"**[Homer](https://github.com/bastienwirtz/homer)**",
"A very simple homepage which is customized by the playbook to automatically include links to the Docker containers you choose to host on the computer (_[Demo](https://homer-demo.netlify.app/)_)",
"[![GitHub Repo stars](https://img.shields.io/github/stars/bastienwirtz/homer?style=social)](https://github.com/bastienwirtz/homer)"
],
[
"**[Portainer](https://www.portainer.io/)**",
"A Docker management tool (_[Homepage](https://www.portainer.io/) | [Documentation](https://docs.portainer.io/) | [Demo](https://github.com/portainer/portainer#demo)_)",
"[![GitHub Repo stars](https://img.shields.io/github/stars/portainer/portainer?style=social)](https://github.com/portainer/portainer)"
],
[
"**[Serve](https://github.com/vercel/serve)**",
"Simple interface for viewing files located or symlinked to in the `/var/www/` folder of the machine",
"[![GitHub Repo stars](https://img.shields.io/github/stars/vercel/serve?style=social)](https://github.com/vercel/serve)"
]
],
"htpc_var_chart": [
["App", "Description", "GitHub&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"],
[
"**[WireGuard](https://docs.linuxserver.io/images/docker-wireguard)**",
"Dedicated WireGuard VPN for the HTPC applications which is configured in *our docker-compose.yml* file to be used as the internet connection for all the containers (_[Homepage](https://www.wireguard.com/)_)",
"[![GitHub Repo stars](https://img.shields.io/github/stars/linuxserver/docker-wireguard?style=social)](https://github.com/linuxserver/docker-wireguard)"
],
[
"**[Bazarr](https://docs.linuxserver.io/images/docker-bazarr)**",
"Manages and automatically downloads subtitles (_[Homepage](https://www.bazarr.media/) | [Documentation](https://wiki.bazarr.media/)_)",
"[![GitHub Repo stars](https://img.shields.io/github/stars/morpheus65535/bazarr?style=social)](https://github.com/morpheus65535/bazarr)"
],
[
"**[Heimdall](https://docs.linuxserver.io/images/docker-heimdall)**",
"Simple start page for all the HTPC apps (_[Homepage](https://heimdall.site/)_)",
"[![GitHub Repo stars](https://img.shields.io/github/stars/linuxserver/Heimdall?style=social)](https://github.com/linuxserver/Heimdall)"
],
[
"**[Jackett](https://docs.linuxserver.io/images/docker-jackett)**",
"Request proxy server for Radarr and Sonarr which helps speed things up",
"[![GitHub Repo stars](https://img.shields.io/github/stars/Jackett/Jackett?style=social)](https://github.com/Jackett/Jackett)"
],
[
"**[Kodi Headless](https://hub.docker.com/r/linuxserver/kodi-headless)**",
"Backend for Kodi used to host a centralized database for Kodi instances (_[Homepage](https://kodi.tv/) | [Documentation](https://kodi.wiki/view/Main_Page)_)",
"[![GitHub Repo stars](https://img.shields.io/github/stars/xbmc/xbmc?style=social)](https://github.com/xbmc/xbmc)"
],
[
"**[Lidarr](https://docs.linuxserver.io/images/docker-lidarr)**",
"Music collection manager that automatically downloads from BitTorrent and Usenet (_[Homepage](https://lidarr.audio/) | [Documentation](https://wiki.servarr.com/en/lidarr)_)",
"[![GitHub Repo stars](https://img.shields.io/github/stars/Lidarr/Lidarr?style=social)](https://github.com/Lidarr/Lidarr)"
],
[
"**[NZBGet](https://docs.linuxserver.io/images/docker-nzbget)**",
"NZBGet is a Usenet download manager used to download from NewsGroups which are supposedly more secure than torrents. **NOTE: Viruses are still prevalent on both NewsGroups and torrents - make sure you don't run anything with admin / sudo privileges.** (_[Homepage](https://nzbget.net/) | [Documentation](https://nzbget.net/documentation)_)",
"[![GitHub Repo stars](https://img.shields.io/github/stars/nzbget/nzbget?style=social)](https://github.com/nzbget/nzbget)"
],
[
"**[Ombi](https://docs.linuxserver.io/images/docker-ombi)**",
"Plex media request and user management system which can be used to allow users who use your HTPC server to request movies, TV shows, and other media (_[Homepage](https://ombi.io/) | [Documentation](https://docs.ombi.app/) | [Demo](https://app.ombi.io/)_)",
"[![GitHub Repo stars](https://img.shields.io/github/stars/Ombi-app/Ombi?style=social)](https://github.com/Ombi-app/Ombi)"
],
[
"**[Organizr](https://docs.linuxserver.io/images/docker-htpcmanager)**",
"Front end for HTPC web applications with a full-featured user interface that is full of eye candy (_[Homepage](https://organizr.app/) | [Documentation](https://docs.organizr.app/) | [Demo](https://docs.organizr.app/)_)",
"[![GitHub Repo stars](https://img.shields.io/github/stars/causefx/Organizr?style=social)](https://github.com/causefx/Organizr)"
],
[
"**[Radarr](https://docs.linuxserver.io/images/docker-radarr)**",
"Automatic movie downloader that can even be configured to download lists including the Top 250 IMBD movies (_[Homepage](https://radarr.video/) | [Documentation](https://wiki.servarr.com/radarr)_)",
"[![GitHub Repo stars](https://img.shields.io/github/stars/Radarr/Radarr?style=social)](https://github.com/Radarr/Radarr)"
],
[
"**[Sonarr](https://docs.linuxserver.io/images/docker-sonarr)**",
"Automatic TV show downloader with tons of ways to easily and automatically download TV shows (_[Homepage](https://sonarr.tv/) | [Documentation](https://wiki.servarr.com/en/sonarr)_)",
"[![GitHub Repo stars](https://img.shields.io/github/stars/Sonarr/Sonarr?style=social)](https://github.com/Sonarr/Sonarr)"
],
[
"**[Tautulli](https://docs.linuxserver.io/images/docker-tautulli)**",
"Metrics and monitoring dashboard for Plex (_[Homepage](https://tautulli.com/) | [Documentation](https://wiki.bazarr.media/)_)",
"[![GitHub Repo stars](https://img.shields.io/github/stars/Tautulli/Tautulli?style=social)](https://github.com/Tautulli/Tautulli)"
],
[
"**[Transmission](https://docs.linuxserver.io/images/docker-transmission)**",
"BitTorrent client that can be used in conjunction with or as an alternative to using NewsGroups via NZBGet (_[Homepage](https://transmissionbt.com/) | [Documentation](https://github.com/transmission/transmission/blob/main/docs/README.md)_)",
"[![GitHub Repo stars](https://img.shields.io/github/stars/transmission/transmission?style=social)](https://github.com/transmission/transmission)"
]
],
"idPost": "megabyte.space",
"json_top_keys": "",
"license": "MIT",
"link": {
"chat": "https://app.slack.com/client/T01ABCG4NK1/C01NN74H0LW/details/",
"docker_role": "https://github.com/ProfessorManhattan/ansible-docker",
"docs": "https://megabyte.space/docs",
"gitter": "https://gitter.im/megabyte-labs/community",
"home": "https://megabyte.space",
"installdoctor": "https://install.doctor",
"mod_ansible_autodoc": "https://pypi.org/project/mod-ansible-autodoc/",
"privacy": "https://megabyte.space/privacy",
"shield": "https://shields.io",
"terms": "https://megabyte.space/terms"
},
"misc": {
"appnest": "https://github.com/andreasbm/readme",
"husky": "https://www.npmjs.com/package/husky",
"tinypng": "https://tinypng.com/"
},
"molecule_descriptions": [
["Scenario", "Description"],
["`default`", "Uses VirtualBox to run tests for all platforms in parallel."],
["`docker`", "Uses Docker to run tests for all Linux platforms and versions in parallel."],
[
"`docker-snap`",
"The same as the `docker` scenario except it excludes platforms that have trouble installing snap packages on Docker."
],
["`archlinux-desktop`", "Runs the test on the latest version of Archlinux desktop using VirtualBox."],
["`centos-desktop`", "Runs the test on the latest version of CentOS desktop using VirtualBox."],
["`debian-desktop`", "Runs the test on the latest version of Debian desktop using VirtualBox."],
["`fedora-desktop`", "Runs the test on the latest version of Fedora desktop using VirtualBox."],
["`macos-desktop`", "Runs the test on the latest version of macOS desktop using VirtualBox."],
["`ubuntu-desktop`", "Runs the test on the latest version of Ubuntu desktop using VirtualBox."],
["`windows-desktop`", "Runs the test on the latest version of Windows desktop using VirtualBox."],
["`ci-docker-archlinux`", "Uses Docker to test Archlinux."],
["`ci-docker-centos`", "Uses Docker to test multiple versions of CentOS."],
["`ci-docker-debian`", "Uses Docker to test multiple versions of Debian."],
[
"`ci-docker-debian-snap`",
"Uses Docker to test Debian just like `ci-docker-debian` except it excludes versions that cannot install snap packages."
],
["`ci-docker-fedora`", "Uses Docker to test multiple versions of Fedora."],
["`ci-docker-ubuntu`", "Uses Docker to test multiple versions of Ubuntu."]
],
"name": "[[ package.json .blueprint.name - See CONTRIBUTING.md ]]",
"newProjectTemplates": {
"angular-app": {
"group": "angular",
"subgroup": "app",
"title": "Angular App"
},
"angular-website": {
"group": "angular",
"subgroup": "website",
"title": "Angular Website"
},
"ansible-role": {
"group": "ansible",
"subgroup": "role",
"title": "Ansible Role"
},
"docker-app": {
"group": "docker",
"subgroup": "app",
"title": "Dockerfile (App)"
},
"docker-ci-pipeline": {
"group": "docker",
"subgroup": "ci-pipeline",
"title": "CI/CD Pipeline Dockerfile"
},
"docker-codeclimate": {
"group": "docker",
"subgroup": "codeclimate",
"title": "CodeClimate Engine / Linter Dockerfile"
},
"docker-docker-compose": {
"group": "docker",
"subgroup": "docker-compose",
"title": "Docker Compose"
},
"go-cli": {
"group": "go",
"subgroup": "cli",
"title": "Go CLI"
},
"go-library": {
"group": "go",
"subgroup": "library",
"title": "Go Library"
},
"misc": {
"group": "misc",
"subgroup": "misc",
"title": "Miscellaneous project"
},
"npm-app": {
"group": "npm",
"subgroup": "app",
"title": "Node.js App"
},
"npm-cli": {
"group": "npm",
"subgroup": "cli",
"title": "Node.js CLI"
},
"npm-configs": {
"group": "npm",
"subgroup": "configs",
"title": "NPM Config Package"
},
"npm-library": {
"group": "npm",
"subgroup": "library",
"title": "Node.js Library"
},
"npm-plugin": {
"group": "npm",
"subgroup": "plugin",
"title": "NPM Plugin"
},
"npm-web-component": {
"group": "npm",
"subgroup": "web-component",
"title": "Web Component"
},
"packer-desktop": {
"group": "packer",
"subgroup": "desktop",
"title": "Packer (Desktop)"
},
"packer-server": {
"group": "packer",
"subgroup": "server",
"title": "Packer (Server)"
},
"python-cli": {
"group": "python",
"subgroup": "cli",
"title": "Python CLI"
},
"python-library": {
"group": "python",
"subgroup": "library",
"title": "Python Library"
},
"website": {
"group": "npm",
"subgroup": "website",
"title": "Website"
}
},
"npm_publish_config_access": "public",
"npm_standard_version_prerelease": "git add --all",
"npm_type": "module",
"organization": "Megabyte Labs",
"overview": "[[ This is a new repository without the details filled in yet. Look for the section about blueprint data in the CONTRIBUTING.md to set up the project. ]]",
"playbook_path": "megabyte-labs/gas-station",
"profile": {
"dockerHubUser": "professormanhattan",
"dockerhub": "megabytelabs",
"galaxy": "professormanhattan",
"github": "ProfessorManhattan",
"githubOrg": "megabyte-labs",
"linkedin": "blzalewski",
"npmjs": "thisismyfirstday",
"npmjs_organization": "installdoc",
"opencollective": "megabytelabs",
"patreon": "ProfessorManhattan",
"pypi": "ProfessorManhattan",
"replit": "ProfessorMegaby",
"stackblitz": "ProfessorManhattan",
"twitter": "MegabyteLabs",
"vagrant": "ProfessorManhattan"
},
"profile_link": {
"dockerhub": "https://hub.docker.com/u",
"galaxy": "https://galaxy.ansible.com",
"github": "https://github.com",
"linkedin": "https://www.linkedin.com/in/",
"npmjs": "https://www.npmjs.com/~",
"opencollective": "https://opencollective.com",
"patreon": "https://www.patreon.com",
"pypi": "https://pypi.org/user",
"replit": "https://repl.it/@",
"stackblitz": "https://stackblitz.com/@",
"twitter": "MegabyteLabs",
"vagrant": "https://app.vagrantup.com"
},
"python_role_dependencies": [
["Package", "Description", "Required"],
[
"<b><a href=\"https://pypi.org/project/ansible/\" title=\"ansible on pypi.org\" target=\"_blank\">ansible</a></b>",
"A configuration management system that can remotely configure computers",
"<div align=\"center\">✔️</div>"
],
[
"<b><a href=\"https://pypi.org/project/docker/\" title=\"docker on pypi.org\" target=\"_blank\">docker</a></b>",
"Enables the capability of provisioning Docker containers with Ansible",
"<div align=\"center\">✔️</div>"
],
[
"<b><a href=\"https://pypi.org/project/python-vagrant/\" title=\"python-vagrant on pypi.org\" target=\"_blank\">python-vagrant</a></b>",
"Required for provisioning Vagrant VMs",
"<div align=\"center\">✔️</div>"
],
[
"<b><a href=\"https://pypi.org/project/pywinrm/\" title=\"pywinrm on pypi.org\" target=\"_blank\">pywinrm</a></b>",
"Required for provisioning Windows machines that are using WinRM",
"<div align=\"center\">✔️</div>"
],
[
"<b><a href=\"https://pypi.org/project/ansible-lint/\" title=\"ansible-lint on pypi.org\" target=\"_blank\">ansible-lint</a></b>",
"Linting tool for Ansible files",
""
],
[
"<b><a href=\"https://pypi.org/project/ansibler/\" title=\"ansibler on pypi.org\" target=\"_blank\">ansibler</a></b>",
"Custom tool used to generate advanced documentation (e.g. it generates the compatibility chart and some other charts)",
""
],
[
"<b><a href=\"https://pypi.org/project/black/\" title=\"black on pypi.org\" target=\"_blank\">black</a></b>",
"Python file auto-formatter included in case project utilizes Python test scripts",
""
],
[
"<b><a href=\"https://pypi.org/project/blocklint/\" title=\"blocklint on pypi.org\" target=\"_blank\">blocklint</a></b>",
"Linting tool that prevents certain words from entering the code base",
""
],
[
"<b><a href=\"https://pypi.org/project/flake8/\" title=\"flake8 on pypi.org\" target=\"_blank\">flake8</a></b>",
"Python linter that reports Python syntax and style errors",
""
],
[
"<b><a href=\"https://pypi.org/project/mod-ansible-autodoc/\" title=\"mod-ansible-autodoc on pypi.org\" target=\"_blank\">mod-ansible-autodoc</a></b>",
"Custom fork of [ansible-autodoc](https://pypi.org/project/ansible-autodoc/0.5.1.1/) which allows us to auto-generate documentation based on comments in the role's YAML files",
""
],
[
"<b><a href=\"https://pypi.org/project/molecule/\" title=\"molecule on pypi.org\" target=\"_blank\">molecule</a></b>",
"Test framework for Ansible",
""
],
[
"<b><a href=\"https://pypi.org/project/molecule-docker/\" title=\"molecule-docker on pypi.org\" target=\"_blank\">molecule-docker</a></b>",
"Molecule plugin for provisioning Docker containers",
""
],
[
"<b><a href=\"https://pypi.org/project/molecule-vagrant/\" title=\"molecule-vagrant on pypi.org\" target=\"_blank\">molecule-vagrant</a></b>",
"Molecule plugin for provisioning Vagrant VMs",
""
],
[
"<b><a href=\"https://pypi.org/project/pre-commit-hooks/\" title=\"pre-commit-hooks on pypi.org\" target=\"_blank\">pre-commit-hooks</a></b>",
"Suite of tools useful for linting",
""
],
[
"<b><a href=\"https://pypi.org/project/proselint/\" title=\"proselint on pypi.org\" target=\"_blank\">proselint</a></b>",
"Linter used to generate English-language improvements (used to improve documentation)",
""
],
[
"<b><a href=\"https://pypi.org/project/yamllint/\" title=\"yamllint on pypi.org\" target=\"_blank\">yamllint</a></b>",
"Linter for YAML files that ensures proper syntax and styling is used",
""
]
],
"redditApplicationId": "O3UxD7HlPpcN88gpEkPIXQ",
"redditUsername": "tsgangster",
"repository": {
"github": "",
"gitlab": "",
"gitlabBaseUrl": "https://gitlab.com/megabyte-labs",
"group": {
"ansible_roles": "https://gitlab.com/megabyte-labs/ansible-roles",
"ansible_roles_path": "megabyte-labs/ansible-roles",
"apps": "https://gitlab.com/megabyte-labs/apps",
"apps_path": "megabyte-labs/apps",
"ci": "https://gitlab.com/megabyte-labs/ci",
"ci_path": "megabyte-labs/ci",
"cloud": "https://gitlab.com/megabyte-labs/cloud",
"cloud_path": "megabyte-labs/cloud",
"common": "https://gitlab.com/megabyte-labs/common",
"common_path": "megabyte-labs/common",
"cryptocurrency": "https://gitlab.com/megabyte-labs/cryptocurrency",
"cryptocurrency_path": "megabyte-labs/cryptocurrency",
"docker_compose": "https://gitlab.com/megabyte-labs/docker-compose",
"docker_compose_path": "megabyte-labs/docker-compose",
"dockerfile": "https://gitlab.com/megabyte-labs/docker",
"dockerfile_path": "megabyte-labs/docker",
"documentation": "https://gitlab.com/megabyte-labs/documentation",
"documentation_path": "megabyte-labs/documentation",
"go": "https://gitlab.com/megabyte-labs/go",
"go_path": "megabyte-labs/go",
"kubernetes": "https://gitlab.com/megabyte-labs/kubernetes",
"kubernetes_path": "megabyte-labs/kubernetes_path",
"npm": "https://gitlab.com/megabyte-labs/npm",
"npm_path": "megabyte-labs/npm",
"packer": "https://gitlab.com/megabyte-labs/packer",
"packer_path": "megabyte-labs/packer",
"python": "https://gitlab.com/megabyte-labs/python",
"python_cli_path": "megabyte-labs/python/cli",
"python_path": "megabyte-labs/python",
"software": "https://gitlab.com/megabyte-labs/software",
"software_path": "megabyte-labs/software",
"web_components": "https://gitlab.com/megabyte-labs/web-components",
"web_components_path": "megabyte-labs/web-components"
},
"location": {
"commits": {
"github": "/commits/master",
"gitlab": "/-/commits/master",
"gitlab_e2e": "/-/commits/e2e"
},
"conduct": {
"github": "/blob/master/docs/CODE_OF_CONDUCT.md",
"gitlab": "/-/blob/master/docs/CODE_OF_CONDUCT.md"
},
"contributing": {
"github": "/blob/master/docs/CONTRIBUTING.md",
"gitlab": "/-/blob/master/docs/CONTRIBUTING.md"
},
"demo": {
"github": "/raw/master/docs/demo.gif",
"gitlab": "/-/raw/master/docs/demo.gif"
},
"issues": {
"github": "/issues",
"gitlab": "/-/issues"
},
"license": {
"github": "/blob/master/LICENSE",
"gitlab": "/-/blob/master/LICENSE"
},
"logo": {
"github": "/raw/master/logo.png",
"gitlab": "/-/raw/master/logo.png"
},
"readme": {
"github": "/blob/master/README.md",
"gitlab": "/-/blob/master/README.md"
}
},
"namespace": "",
"prefix": "ansible-",
"project": {
"assets": "https://gitlab.com/megabyte-labs/assets/-/raw/master",
"autodoc": "https://github.com/AndresBott/ansible-autodoc",
"playbooks": "https://github.com/ProfessorManhattan/Gas-Station",
"wrangler": "https://gitlab.com/megabyte-labs/wrangler"
}
},
"saas_var_chart": [
["Service", "Description", "Price"],
[
"**[CloudFlare](https://www.cloudflare.com/)**",
"CloudFlare is a DNS provider, edge network, and much more. Some day it might be able to replace all the services in this list but until then CloudFlare is the preferred provider for anything it offers a product for. In our configurations, CloudFlare is used for DNS, encrypted tunnels via [cloudflared](https://github.com/cloudflare/cloudflared), [CloudFlare WARP](https://1.1.1.1/), and [CloudFlare Teams](https://blog.cloudflare.com/introducing-cloudflare-for-teams/). On top of that, CloudFlare provides some other great features that can be utilized to make lightning-fast web apps. (_[Documentation](https://developers.cloudflare.com/docs/)_)",
"**Free** for the services we integrate"
],
[
"**[Digital Ocean](https://m.do.co/c/751743d45e36)**",
"Digital Ocean is a cloud hosting provider. Anytime CloudFlare's offerings are not enough to satisfy requirements, Digital Ocean is used. The service has a clean and simple web UI, a wide variety of CLIs/SDKs available on GitHub, and the company has been around since 2011. Digital Ocean is primarily used by our stack to host Kubernetes, S3 buckets, and cheap virtual private servers. (_[Documentation](https://docs.digitalocean.com/)_)",
"**~$40/month** for a Kubernetes cluster, S3 bucket, and a general-purpose VPS"
],
[
"**[Wasabi](https://wasabi.com/)**",
"Wasabi is the cheapest S3 bucket provider available. It is used as a secondary backup for any data that is backed up / saved to an S3 bucket. (_[Documentation](https://wasabi.com/help/docs/)_)",
"**$5.99/month** for S3 bucket"
],
[
"**[Ory](https://www.ory.sh/)**",
"Ory is the only identity platform that can scale indefinitely and is based entirely on open source. Ory is leveraged to provide a feature-rich and programmable single sign-on platform. It includes support for hardware-based tokens. (_[Documentation](https://www.ory.sh/docs/welcome)_)",
"**Free** for the developer edition"
],
[
"**[Proton](https://proton.me/)**",
"Proton Mail is an end-to-end encrypted email service founded in 2013 in Geneva, Switzerland. Proton Mail and ProtonVPN are used in our stack to provide secure e-mail and configure VPN profiles using ProtonVPN's unique security features. With the Business plan, you can get custom domain branded e-mail and enough VPN connections to configure your router / VPN profiles on each of your devices. (_[Documentation](https://proton.me/support)_)",
"**$12.99/month** for the Business edition"
],
[
"**[GMail](https://mail.google.com)**",
"GMail is a free e-mail service offered by Google. In some cases, we leverage GMail's SMTP capabilities to send notification e-mails. (_[Documentation](https://support.google.com/mail/?hl=en#topic=7065107)_)",
"**Free**"
]
],
"scriptsBuild": "task donothing",
"scriptsHelp": "task --menu",
"scriptsPrepare": "npm run start && (test -f Taskfile.yml && task common:husky) || true",
"scriptsReplaceThis": "\"",
"scriptsReplaceWith": "\\\"",
"scriptsStart": "bash start.sh",
"scriptsTest": "task donothing",
"sharp_instructions": [],
"slackNotificationChannel": "#misc",
"slackNotificationIcon": "https://gitlab.com/megabyte-labs/misc/assets/-/raw/master/logo/megabytelabs-color-icon-350x350.png",
"slackNotificationUsername": "Megabyte Labs Release Notification Bot",
"sponsorship": {
"author": "Brian Zalewski",
"text": "I create open source projects out of love. Although I have a job, shelter, and as much fast food as I can handle, it would still be pretty cool to be appreciated by the community for something I have spent a lot of time and money on. Please consider sponsoring me! Who knows? Maybe I will be able to quit my job and publish open source full time."
},
"subgroup": "[[ Needs to be setup. Set this in the common repository for this type of project ]]",
"teamsNotificationColor": "#1DA1F2",
"teamsNotificationIcon": "https://gitlab.com/megabyte-labs/misc/assets/-/raw/master/logo/megabytelabs-color-icon-350x350.png",
"title": "[[ package.json .blueprint.title - See CONTRIBUTING.md ]]",
"version": "0.0.1"
}

4
.config/flake8.toml Normal file
View file

@ -0,0 +1,4 @@
[flake8]
exclude = .autodoc, .cache, .common, .config, .git, .github, .gitlab, .husky, .modules, .npm, .pnpm-store, .shared, .task, .venv, .vscode, build, dist, node_modules, roles, venv
ignore = E402
max-line-length = 120

8
.config/hadolint.yml Normal file
View file

@ -0,0 +1,8 @@
---
ignored:
# Last USER should not be root
- DL3002
# Do not use sudo
- DL3004
# Do not use `latest` images
- DL3007

70
.config/hbs.cjs Normal file
View file

@ -0,0 +1,70 @@
const fs = require('fs')
const { execSync } = require('child_process')
function getTaskIncludeKey(path) {
return path
.replace('.config/taskfiles/', '')
.replace('local/', '')
.replace('/Taskfile-', ':')
.replace('/Taskfile.yml', '')
.replace('Taskfile-', '')
.replace('.yml', '')
}
module.exports.register = function (Handlebars) {
/**
* Import [handlebars-helpers](https://github.com/helpers/handlebars-helpers)
*/
require('handlebars-helpers')({
handlebars: Handlebars
})
/**
* Used to generate the includes: section of the main Taskfile.yml
* in the root of every repository
*/
Handlebars.registerHelper('bodegaIncludes', (pattern, options) => {
const readdir = Handlebars.helpers.readdir
const files = readdir('.config/taskfiles/')
const tasks = Handlebars.helpers.each([...files, './local'], {
fn: (file) => {
if (fs.lstatSync(file).isDirectory()) {
return readdir(file).filter((taskfile) => taskfile.match(/.*Taskfile.*.yml/gu))
} else {
return []
}
}
})
return tasks
.replaceAll('.config/taskfiles/', ',.config/taskfiles/')
.replaceAll('local/', ',local/')
.split(',')
.map((path) => ({
key: getTaskIncludeKey(path),
taskPath: './' + path,
optional: path.includes('local/Taskfile-')
}))
.filter((x) => !!x.key)
.sort((a, b) => a.key.localeCompare(b.key))
})
/**
* Used for returning input from synchronous commands (i.e. bash commands)
*/
Handlebars.registerHelper('execSync', function (input, options) {
const output = execSync(input)
return output
})
/**
* Used for generating Homebrew resource stanzas for Python packages.
* For more information, see: https://github.com/tdsmith/homebrew-pypi-poet
*/
Handlebars.registerHelper('poet', function (input, options) {
const formulae = execSync('poetry run poet -f ' + input)
return formulae
})
}

1
.config/husky/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
_

34
.config/husky/commit-msg Executable file
View file

@ -0,0 +1,34 @@
#!/bin/sh
# shellcheck disable=SC1090,SC1091,SC2016
# @file .config/husky/commit-msg
# @brief A git hook script for the `commit-msg` hook
# @arg $1 Path to a temporary file that contains the commit message written by the developer (e.g. .git/COMMIT_EDITMSG)
[ -f .config/log ] && chmod +x .config/log
if [ -f "$(dirname "$0")/_/husky.sh" ]; then
. "$(dirname "$0")/_/husky.sh"
# Attempt to register Bodega/Task from common places if it is not in PATH
if ! type task > /dev/null; then
PATH="$PATH:$HOME/.local/go/bin:$HOME/.local/bin:$HOME/bin:$HOME/go/bin:$HOME/.asdf/shims"
if ! type task > /dev/null; then
for DOTFILE in .profile .bashrc .bash_profile .zshrc; do
. "$HOME/$DOTFILE"
if type task > /dev/null; then
break
fi
done
fi
fi
# Show warning if Bodega/Task is still not registered/installed, else proceed with hook
if ! type task > /dev/null; then
.config/log warn 'Bodega `task` does not appear to be installed or is not registered in the `PATH` variable - please manually include it'
.config/log info 'Get Bodega here -> `https://github.com/megabyte-labs/Bodega`'
else
task git:hook:commit-msg -- "$1"
fi
else
.config/log warn 'Husky pre-commit hooks are currently not properly setup.'
fi

36
.config/husky/post-checkout Executable file
View file

@ -0,0 +1,36 @@
#!/bin/sh
# shellcheck disable=SC1090,SC1091,SC2016
# @file .config/husky/post-checkout
# @brief A git hook script for the `post-checkout` hook
# @arg $1 The ref of the previous HEAD (e.g. f693bc50756b490f7ad067eb455338b634d01036)
# @arg $2 The ref of the new HEAD
# @arg $3 Equal to 1 if changing branches
[ -f .config/log ] && chmod +x .config/log
if [ -f "$(dirname "$0")/_/husky.sh" ]; then
. "$(dirname "$0")/_/husky.sh"
# Attempt to register Bodega/Task from common places if it is not in PATH
if ! type task > /dev/null; then
PATH="$PATH:$HOME/.local/go/bin:$HOME/.local/bin:$HOME/bin:$HOME/go/bin:$HOME/.asdf/shims"
if ! type task > /dev/null; then
for DOTFILE in .profile .bashrc .bash_profile .zshrc; do
. "$HOME/$DOTFILE"
if type task > /dev/null; then
break
fi
done
fi
fi
# Show warning if Bodega/Task is still not registered/installed, else proceed with hook
if ! type task > /dev/null; then
.config/log warn 'Bodega `task` does not appear to be installed or is not registered in the `PATH` variable - please manually include it'
.config/log info 'Get Bodega here -> `https://github.com/megabyte-labs/Bodega`'
else
task git:hook:post-checkout
fi
else
.config/log warn 'Husky pre-commit hooks are currently not properly setup.'
fi

34
.config/husky/post-commit Executable file
View file

@ -0,0 +1,34 @@
#!/bin/sh
# shellcheck disable=SC1090,SC1091,SC2016
# @file .config/husky/post-commit
# @brief A git hook script for the `post-commit` hook. There are no parameters but you can easily get the
# last commit by running `git log -1 HEAD`. Generally, this script is used for notifications or something similar.
[ -f .config/log ] && chmod +x .config/log
if [ -f "$(dirname "$0")/_/husky.sh" ]; then
. "$(dirname "$0")/_/husky.sh"
# Attempt to register Bodega/Task from common places if it is not in PATH
if ! type task > /dev/null; then
PATH="$PATH:$HOME/.local/go/bin:$HOME/.local/bin:$HOME/bin:$HOME/go/bin:$HOME/.asdf/shims"
if ! type task > /dev/null; then
for DOTFILE in .profile .bashrc .bash_profile .zshrc; do
. "$HOME/$DOTFILE"
if type task > /dev/null; then
break
fi
done
fi
fi
# Show warning if Bodega/Task is still not registered/installed, else proceed with hook
if ! type task > /dev/null; then
.config/log warn 'Bodega `task` does not appear to be installed or is not registered in the `PATH` variable - please manually include it'
.config/log info 'Get Bodega here -> `https://github.com/megabyte-labs/Bodega`'
else
task git:hook:post-commit
fi
else
.config/log warn 'Husky pre-commit hooks are currently not properly setup.'
fi

34
.config/husky/post-merge Executable file
View file

@ -0,0 +1,34 @@
#!/bin/sh
# shellcheck disable=SC1090,SC1091,SC2016
# @file .config/husky/post-merge
# @brief A git hook script for the `post-merge` hook
# @arg $1 A status flag specifying whether or not the merge being done was a squash merge
[ -f .config/log ] && chmod +x .config/log
if [ -f "$(dirname "$0")/_/husky.sh" ]; then
. "$(dirname "$0")/_/husky.sh"
# Attempt to register Bodega/Task from common places if it is not in PATH
if ! type task > /dev/null; then
PATH="$PATH:$HOME/.local/go/bin:$HOME/.local/bin:$HOME/bin:$HOME/go/bin:$HOME/.asdf/shims"
if ! type task > /dev/null; then
for DOTFILE in .profile .bashrc .bash_profile .zshrc; do
. "$HOME/$DOTFILE"
if type task > /dev/null; then
break
fi
done
fi
fi
# Show warning if Bodega/Task is still not registered/installed, else proceed with hook
if ! type task > /dev/null; then
.config/log warn 'Bodega `task` does not appear to be installed or is not registered in the `PATH` variable - please manually include it'
.config/log info 'Get Bodega here -> `https://github.com/megabyte-labs/Bodega`'
else
task git:hook:post-merge
fi
else
.config/log warn 'Husky pre-commit hooks are currently not properly setup.'
fi

35
.config/husky/post-rewrite Executable file
View file

@ -0,0 +1,35 @@
#!/bin/sh
# shellcheck disable=SC1090,SC1091,SC2016
# @file .config/husky/post-rewrite
# @brief A git hook script for the `post-rewrite` hook. It is called when running commands
# that rewrite commits (e.g. git pull origin master --rebase)
# @arg $1 Denotes the command it was invoked by: currently one of amend or rebase
[ -f .config/log ] && chmod +x .config/log
if [ -f "$(dirname "$0")/_/husky.sh" ]; then
. "$(dirname "$0")/_/husky.sh"
# Attempt to register Bodega/Task from common places if it is not in PATH
if ! type task > /dev/null; then
PATH="$PATH:$HOME/.local/go/bin:$HOME/.local/bin:$HOME/bin:$HOME/go/bin:$HOME/.asdf/shims"
if ! type task > /dev/null; then
for DOTFILE in .profile .bashrc .bash_profile .zshrc; do
. "$HOME/$DOTFILE"
if type task > /dev/null; then
break
fi
done
fi
fi
# Show warning if Bodega/Task is still not registered/installed, else proceed with hook
if ! type task > /dev/null; then
.config/log warn 'Bodega `task` does not appear to be installed or is not registered in the `PATH` variable - please manually include it'
.config/log info 'Get Bodega here -> `https://github.com/megabyte-labs/Bodega`'
else
task git:hook:post-rewrite
fi
else
.config/log warn 'Husky pre-commit hooks are currently not properly setup.'
fi

49
.config/husky/pre-commit Executable file
View file

@ -0,0 +1,49 @@
#!/bin/sh
# shellcheck disable=SC1090,SC1091,SC2016
# @file .config/husky/pre-commit
# @brief A git hook script for the `pre-commit` hook
[ -f .config/log ] && chmod +x .config/log
if [ -f "$(dirname "$0")/_/husky.sh" ]; then
. "$(dirname "$0")/_/husky.sh"
# Attempt to register Bodega/Task from common places if it is not in PATH
if ! type task > /dev/null; then
PATH="$PATH:$HOME/.local/go/bin:$HOME/.local/bin:$HOME/bin:$HOME/go/bin:$HOME/.asdf/shims"
if ! type task > /dev/null; then
for DOTFILE in .profile .bashrc .bash_profile .zshrc; do
. "$HOME/$DOTFILE"
if type task > /dev/null; then
break
fi
done
fi
fi
# Show warning if Bodega/Task is still not registered/installed, else proceed with hook
if ! type task > /dev/null; then
.config/log warn 'Bodega `task` does not appear to be installed or is not registered in the `PATH` variable - please manually include it'
.config/log info 'Get Bodega here -> `https://github.com/megabyte-labs/Bodega`'
else
.config/log info "Performing various pre-commit tasks on staged files (like autofixing, detecting private keys, etc.)"
STAGED_FILES=$(git diff --cached --name-only)
for FILE in "$STAGED_FILES"; do
if [ -f "$1" ]; then
task git:hook:pre-commit -- "$FILE" &
fi
done
wait
.config/log info 'Linting and fixing the staged files with `lint-staged`'
task lint:lint-staged
.config/log info 'Reporting possible spelling errors in the staged files with `cspell`'
task lint:spelling
.config/log success 'Pre-commit validation complete!'
fi
else
.config/log warn 'Husky pre-commit hooks are currently not properly setup.'
fi

35
.config/husky/pre-push Executable file
View file

@ -0,0 +1,35 @@
#!/bin/sh
# shellcheck disable=SC1090,SC1091,SC2016
# @file .config/husky/pre-push
# @brief A git hook script for the `pre-push` hook
# @arg $1 The name of the remote (e.g. origin)
# @arg $2 The location of the remote (e.g. git@gitlab.com:megabyte-labs/common/angular.git)
[ -f .config/log ] && chmod +x .config/log
if [ -f "$(dirname "$0")/_/husky.sh" ]; then
. "$(dirname "$0")/_/husky.sh"
# Attempt to register Bodega/Task from common places if it is not in PATH
if ! type task > /dev/null; then
PATH="$PATH:$HOME/.local/go/bin:$HOME/.local/bin:$HOME/bin:$HOME/go/bin:$HOME/.asdf/shims"
if ! type task > /dev/null; then
for DOTFILE in .profile .bashrc .bash_profile .zshrc; do
. "$HOME/$DOTFILE"
if type task > /dev/null; then
break
fi
done
fi
fi
# Show warning if Bodega/Task is still not registered/installed, else proceed with hook
if ! type task > /dev/null; then
.config/log warn 'Bodega `task` does not appear to be installed or is not registered in the `PATH` variable - please manually include it'
.config/log info 'Get Bodega here -> `https://github.com/megabyte-labs/Bodega`'
else
task git:hook:pre-push -- "$1 $2"
fi
else
.config/log warn 'Husky pre-commit hooks are currently not properly setup.'
fi

View file

@ -0,0 +1,28 @@
#!/bin/sh
# shellcheck disable=SC1091,SC2016
# @file .config/husky/prepare-commit-msg
# @brief A git hook script for the `prepare-commit-msg` hook. Add the `-n` flag to bypass.
# @arg $1 string The full path to the MERGE_MSG
# @arg $2 string The type of the `prepare-commit-msg` event. For a `git pull origin master`
# command, the event type is 'merge'.
[ -f .config/log ] && chmod +x .config/log
if [ -f "$(dirname "$0")/_/husky.sh" ]; then
. "$(dirname "$0")/_/husky.sh"
if [ "$2" != 'merge' ]; then
.config/log info 'This git hook is configured to run even when --no-verify is used. In order to bypass this prompt, use the -n flag instead.'
.config/log info 'Opening a `git commit` dialog'
if ! type pnpx > /dev/null && type npm > /dev/null; then
npm install -g pnpm
elif ! type pnpx > /dev/null; then
.config/log error '`pnpm` or `npm` must be installed'
fi
if ! type git-cz &> /dev/null; then
pnpm install -g commitizen
fi
exec < /dev/tty && (git-cz --hook || true)
fi
else
.config/log warn 'Husky pre-commit hooks are currently not properly setup.'
fi

393
.config/log Executable file
View file

@ -0,0 +1,393 @@
#!/usr/bin/env bash
# @file .config/log
# @brief Logger / prompt library that logs pretty console messages and provides several prompt methods
# @description
# This file contains several functions that log content in different formats. It also provides an
# interface for [Gum](https://github.com/charmbracelet/gum) based prompts. The available methods include:
#
# * `choose` - Prompt user with choices
# * `confirm` - Fancy Yes/No confirmation prompt
# * `error` - Logs an error message
# * `filter` - Filterable list of choices (with choices passed in as a line-return seperated file)
# * `info` - Logs a regular message
# * `input` - Prompt for a text input
# * `md` - Render a markdown file with [Glow](https://github.com/charmbracelet/glow)
# * `password` - Prompt for text that is masked by the prompt
# * `prompt` - Log a description for a prompt that follows
# * `spin` - Show a spinner while background job completes
# * `star` - Logs a message with a star icon at the beginning
# * `start` - Log a job start message
# * `success` - Logs a success message
# * `warn` - Logs a warning message
# * `write` - Multi-line input prompt
#
# If the `docker` environment variable is not set, the script / library will ensure both Gum and Glow are installed.
# @description Installs glow (a markdown renderer) from GitHub releases
# @example installGlow
installGlow() {
# TODO: Add support for other architecture types
if [ -d '/Applications' ] && [ -d '/Library' ] && [ -d '/Users' ]; then
GLOW_DOWNLOAD_URL="https://github.com/charmbracelet/glow/releases/download/v1.4.1/glow_1.4.1_Darwin_x86_64.tar.gz"
elif [ -f '/etc/ubuntu-release' ] || [ -f '/etc/debian_version' ] || [ -f '/etc/redhat-release' ] || [ -f '/etc/SuSE-release' ] || [ -f '/etc/arch-release' ] || [ -f '/etc/alpine-release' ]; then
GLOW_DOWNLOAD_URL="https://github.com/charmbracelet/glow/releases/download/v1.4.1/glow_1.4.1_linux_x86_64.tar.gz"
fi
if type curl &> /dev/null; then
if { [ -d '/Applications' ] && [ -d '/Library' ] && [ -d '/Users' ]; } || [ -f '/etc/ubuntu-release' ] || [ -f '/etc/debian_version' ] || [ -f '/etc/redhat-release' ] || [ -f '/etc/SuSE-release' ] || [ -f '/etc/arch-release' ] || [ -f '/etc/alpine-release' ]; then
TMP="$(mktemp)"
TMP_DIR="$(dirname "$TMP")"
curl -sSL "$GLOW_DOWNLOAD_URL" > "$TMP"
tar -xzf "$TMP" -C "$TMP_DIR"
if [ -n "$HOME" ]; then
if mkdir -p "$HOME/.local/bin" && mv "$TMP_DIR/glow" "$HOME/.local/bin/glow"; then
GLOW_PATH="$HOME/.local/bin/glow"
else
GLOW_PATH="$(dirname "${BASH_SOURCE[0]}")/glow"
mv "$TMP_DIR/gum" "$GLOW_PATH"
fi
chmod +x "$GLOW_PATH"
else
echo "WARNING: The HOME environment variable is not set! (Glow)"
fi
else
echo "WARNING: Unable to detect system type. (Glow)"
fi
fi
}
# @description Installs gum (a logging CLI) from GitHub releases
# @example installGum
installGum() {
# TODO: Add support for other architecture types
if [ -d '/Applications' ] && [ -d '/Library' ] && [ -d '/Users' ]; then
GUM_DOWNLOAD_URL="https://github.com/charmbracelet/gum/releases/download/v0.4.0/gum_0.4.0_Darwin_x86_64.tar.gz"
elif [ -f '/etc/ubuntu-release' ] || [ -f '/etc/debian_version' ] || [ -f '/etc/redhat-release' ] || [ -f '/etc/SuSE-release' ] || [ -f '/etc/arch-release' ] || [ -f '/etc/alpine-release' ]; then
GUM_DOWNLOAD_URL="https://github.com/charmbracelet/gum/releases/download/v0.4.0/gum_0.4.0_linux_x86_64.tar.gz"
fi
if type curl &> /dev/null; then
if { [ -d '/Applications' ] && [ -d '/Library' ] && [ -d '/Users' ]; } || [ -f '/etc/ubuntu-release' ] || [ -f '/etc/debian_version' ] || [ -f '/etc/redhat-release' ] || [ -f '/etc/SuSE-release' ] || [ -f '/etc/arch-release' ] || [ -f '/etc/alpine-release' ]; then
TMP="$(mktemp)"
TMP_DIR="$(dirname "$TMP")"
curl -sSL "$GUM_DOWNLOAD_URL" > "$TMP"
tar -xzf "$TMP" -C "$TMP_DIR"
if [ -n "$HOME" ]; then
if mkdir -p "$HOME/.local/bin" && mv "$TMP_DIR/gum" "$HOME/.local/bin/gum"; then
GUM_PATH="$HOME/.local/bin/gum"
else
GUM_PATH="$(dirname "${BASH_SOURCE[0]}")/gum"
mv "$TMP_DIR/gum" "$GLOW_PATH"
fi
chmod +x "$GUM_PATH"
else
echo "WARNING: The HOME environment variable is not set! (Gum)"
fi
else
echo "WARNING: Unable to detect system type. (Gum)"
fi
fi
}
# @description Configure the logger to use echo or gum
if [ "${container:=}" != 'docker' ]; then
# Acquire gum's path or attempt to install it
if type gum &> /dev/null; then
GUM_PATH="$(which gum)"
elif [ -f "$HOME/.local/bin/gum" ]; then
GUM_PATH="$HOME/.local/bin/gum"
elif [ -f "$(dirname "${BASH_SOURCE[0]}")/gum" ]; then
GUM_PATH="$(dirname "${BASH_SOURCE[0]}")/gum"
elif type brew &> /dev/null; then
brew install gum
GUM_PATH="$(which gum)"
else
installGum
fi
# If gum's path was set, then turn on enhanced logging
if [ -n "$GUM_PATH" ]; then
chmod +x "$GUM_PATH"
ENHANCED_LOGGING=true
fi
fi
# @description Disable logging for Semantic Release because it tries to parse it as JSON
if [ -n "$SEMANTIC_RELEASE" ]; then
NO_LOGGING=true
fi
# @description Logs using Node.js
# @example logger info "An informative log"
logger() {
if [ "$1" == 'error' ]; then
"$GUM_PATH" style --border="thick" "$("$GUM_PATH" style --foreground="#ff0000" "✖") $("$GUM_PATH" style --bold --background="#ff0000" --foreground="#ffffff" " ERROR ") $("$GUM_PATH" style --bold "$(format "$2")")"
elif [ "$1" == 'info' ]; then
"$GUM_PATH" style " $("$GUM_PATH" style --foreground="#00ffff" "○") $2"
elif [ "$1" == 'md' ]; then
# @description Ensure glow is installed
if [ "${container:=}" != 'docker' ]; then
if type glow &> /dev/null; then
GLOW_PATH="$(which glow)"
elif [ -f "$HOME/.local/bin/glow" ]; then
GLOW_PATH="$HOME/.local/bin/glow"
elif [ -f "$(dirname "${BASH_SOURCE[0]}")/glow" ]; then
GLOW_PATH="$(dirname "${BASH_SOURCE[0]}")/glow"
elif type brew &> /dev/null; then
brew install glow
GLOW_PATH="$(which glow)"
else
installGlow
fi
if [ -n "$GLOW_PATH" ]; then
chmod +x "$GLOW_PATH"
ENHANCED_LOGGING=true
fi
fi
"$GLOW_PATH" "$2"
elif [ "$1" == 'prompt' ]; then
"$GUM_PATH" style " $("$GUM_PATH" style --foreground="#00008b" "▶") $("$GUM_PATH" style --bold "$(format "$2")")"
elif [ "$1" == 'star' ]; then
"$GUM_PATH" style " $("$GUM_PATH" style --foreground="#d1d100" "◆") $("$GUM_PATH" style --bold --underline "$(format "$2")")"
elif [ "$1" == 'start' ]; then
"$GUM_PATH" style " $("$GUM_PATH" style --foreground="#00ff00" "▶") $("$GUM_PATH" style --bold "$(format "$2")")"
elif [ "$1" == 'success' ]; then
"$GUM_PATH" style "$("$GUM_PATH" style --foreground="#00ff00" "✔") $("$GUM_PATH" style --bold "$(format "$2")")"
elif [ "$1" == 'warn' ]; then
"$GUM_PATH" style " $("$GUM_PATH" style --foreground="#d1d100" "◆") $("$GUM_PATH" style --bold --background="#ffff00" --foreground="#000000" " WARNING ") $("$GUM_PATH" style --bold --italic "$(format "$2")")"
else
echo "WARNING: Unknown log type"
echo "$2"
fi
}
format() {
# shellcheck disable=SC2001,SC2016
ANSI_STR="$(echo "$1" | sed 's/^\([^`]*\)`\([^`]*\)`/\1\\u001b[47;1;30m \2 \\e[0;39m/')"
if [[ $ANSI_STR == *'`'*'`'* ]]; then
ANSI_STR="$(format "$ANSI_STR")"
fi
echo -e "$ANSI_STR"
}
# @description Display prompt that allows you to choose between options
# @example RESPONSE="$(.config/log choose "file.png" "another-file.jpg")"
choose() {
if type gum &> /dev/null; then
CHOOSE_ARGS="gum choose"
for CURRENT_VAR in "$@"; do
CHOOSE_ARGS="$CHOOSE_ARGS \"$CURRENT_VAR\""
done
eval $CHOOSE_ARGS
else
echo "ERROR: gum is not installed!"
fi
}
# @description Display a confirmation prompt that returns an exit code if "No" is selected
# @example RESPONSE="$(.config/log confirm "Are you sure?" "Yeah" "Naa")"
confirm() {
if type gum &> /dev/null; then
GUM_OPTS=""
if [ -n "$2" ]; then
# shellcheck disable=SC089
GUM_OPTS="$GUM_OPTS --affirmative=""'$2'"
fi
if [ -n "$3" ]; then
GUM_OPTS="$GUM_OPTS --negative=""'$3'"
fi
if [ -n "$1" ]; then
if [ -n "$GUM_OPTS" ]; then
gum confirm "$1" "$GUM_OPTS"
else
gum confirm "$1"
fi
else
gum confirm
fi
else
echo "ERROR: gum is not installed!"
fi
}
# @description Logs an error message
# @example .config/log error "Something happened!"
error() {
if [ -z "$NO_LOGGING" ]; then
if [ -n "$ENHANCED_LOGGING" ]; then
logger error "$1"
else
echo -e "\e[1;41m ERROR \e[0m $(format "$1")\e[0;39m"
fi
fi
}
# @description Display a filterable prompt that is populated with options from a text file
# @example echo Strawberry >> flavors.text && echo Banana >> flavors.text && RESPONSE="$(.config/log filter flavors.txt)"
filter() {
if type gum &> /dev/null; then
TMP="$(mktemp)"
gum filter < "$1"
else
echo "ERROR: gum is not installed!"
fi
}
# @description Logs an info message
# @example .config/log info "Here is some information"
info() {
if [ -z "$NO_LOGGING" ]; then
if [ -n "$ENHANCED_LOGGING" ]; then
logger info "$1"
else
echo -e "\e[1;46m INFO \e[0m $(format "$1")\e[0;39m"
fi
fi
}
# @description Displays an input with masked characters
# @example INPUT="$(.config/log input 'Enter the value..')"
input() {
if type gum &> /dev/null; then
if [ -n "$1" ]; then
gum input --placeholder="$1"
else
gum input
fi
else
echo "ERROR: gum is not installed!"
fi
}
# @description Logs a message written in markdown
# @example .config/log md "[styled_link](https://google.com)"
# @example .config/log md mymarkdown/file.md
md() {
if [ ! -f "$1" ]; then
echo "ERROR: A markdown file must be passed in as the parameter" && exit 1
fi
if [ -n "$ENHANCED_LOGGING" ]; then
logger md "$1"
fi
}
# @description Displays an input with masked characters
# @example PASSWORD="$(.config/log password 'Enter the Ansible vault password')"
password() {
if type gum &> /dev/null; then
if [ -n "$1" ]; then
gum input --password --placeholder="$1"
else
gum input --password
fi
else
echo "ERROR: gum is not installed!"
fi
}
# @description Logs a message that describes a prompt
# @example .config/log prompt "Enter text into the following prompt"
prompt() {
if [ -z "$NO_LOGGING" ]; then
if [ -n "$ENHANCED_LOGGING" ]; then
logger prompt "$1"
else
echo -e "\e[1;104m PROMPT \e[0m $(format "$1")\e[0;39m"
fi
fi
}
# @description Display a spinner that stays until a command is completed
# @example .config/log spin "brew install yq" "Installing yq..")"
spin() {
if type gum &> /dev/null; then
if [ -n "$1" ] && [ -n "$2" ]; then
gum spin --title="$2" "$1"
elif [ -n "$1" ]; then
gum spin "$1"
else
gum input
fi
else
echo "ERROR: gum is not installed!"
fi
}
# @description Logs a message that starts with a star emoji
# @example .config/log star "Congratulations"
star() {
if [ -z "$NO_LOGGING" ]; then
if [ -n "$ENHANCED_LOGGING" ]; then
logger star "$1"
else
echo -e "\e[1;104m LINK \e[0m $(format "$1")\e[0;39m"
fi
fi
}
# @description Logs a message at the beginning of a task
# @example .config/log start "Starting the process.."
start() {
if [ -z "$NO_LOGGING" ]; then
if [ -n "$ENHANCED_LOGGING" ]; then
logger start "$1"
else
echo -e "\e[1;46m START \e[0m $(format "$1")\e[0;39m"
fi
fi
}
# @description Logs a success message
# @example .config/log success "Yay!"
success() {
if [ -z "$NO_LOGGING" ]; then
if [ -n "$ENHANCED_LOGGING" ]; then
logger success "$1"
else
echo -e "\e[1;42m SUCCESS \e[0m $(format "$1")\e[0;39m"
fi
fi
}
# @description Logs a warning message
# @example .config/log warn "Just so you know.."
warn() {
if [ -z "$NO_LOGGING" ]; then
if [ -n "$ENHANCED_LOGGING" ]; then
logger warn "$1"
else
echo -e "\e[1;43m WARNING \e[0m $(format "$1")\e[0;39m"
fi
fi
}
# @description Displays a multi-line prompt for text input
# @example .config/log write "Write something..")"
write() {
if type gum &> /dev/null; then
if [ -n "$1" ]; then
gum write --placeholder="$1"
else
gum write
fi
else
echo "ERROR: gum is not installed!"
fi
}
if [ -n "$1" ] && [ -n "$2" ]; then
# Public functions that require at least two parameters to be used
if [ "$1" == 'warn' ] || [ "$1" == 'success' ] || [ "$1" == 'star' ] || [ "$1" == 'info' ] \
|| [ "$1" == 'error' ] || [ "$1" == 'md' ] || [ "$1" == 'write' ] || [ "$1" == 'start' ] \
|| [ "$1" == 'spin' ] || [ "$1" == 'prompt' ] || [ "$1" == 'filter' ] || [ "$1" == 'input' ] \
|| [ "$1" == 'confirm' ] || [ "$1" == 'password' ]; then
"$1" "$2"
elif [[ "$1" == 'choose' ]]; then
"$@"
fi
elif [ -n "$1" ]; then
# Public functions that can run with only one argument passed to .config/log (i.e. `.config/log password`)
if [ "$1" == 'write' ] || [ "$1" == 'password' ] || [ "$1" == 'confirm' ] || [ "$1" == 'input' ]; then
"$1"
fi
fi

View file

@ -0,0 +1,27 @@
---
dependency:
name: shell
command: |
if type task > /dev/null; then
task ansible:test:molecule:dependencies
else
ansible-galaxy install --ignore-errors -r requirements.yml
fi
provisioner:
name: ansible
options:
vvv: true
playbooks:
converge: ../converge.yml
prepare: ../../.config/molecule/prepare.yml
docker:
create: ../../.config/molecule/docker.create.yml
destroy: ../../.config/molecule/docker.destroy.yml
gce:
create: ../../.config/molecule/gce.create.yml
destroy: ../../.config/molecule/gce.destroy.yml
vagrant:
create: ../../.config/molecule/vagrant.create.yml
destroy: ../../.config/molecule/vagrant.destroy.yml
verifier:
name: ansible

View file

@ -0,0 +1,179 @@
---
# yamllint disable rule:line-length
- name: Update platforms
hosts: localhost
tasks:
- name: Filtering platforms list using the group defined in the MOLECULE_GROUP environment variable
set_fact:
molecule_yml: "{{ molecule_yml | combine({'platforms': (molecule_yml.platforms | selectattr('groups', 'contains', lookup('env', 'MOLECULE_GROUP')))}) }}"
when: ansible_env.MOLECULE_GROUP is defined
- name: Create
hosts: localhost
connection: local
gather_facts: false
no_log: '{{ molecule_no_log }}'
vars:
molecule_labels:
owner: molecule
tasks:
- name: Log into a Docker registry
community.docker.docker_login:
username: '{{ item.registry.credentials.username }}'
password: '{{ item.registry.credentials.password }}'
email: '{{ item.registry.credentials.email | default(omit) }}'
registry: '{{ item.registry.url }}'
docker_host: "{{ item.docker_host | default(lookup('env', 'DOCKER_HOST') or 'unix://var/run/docker.sock') }}"
cacert_path: "{{ item.cacert_path | default((lookup('env', 'DOCKER_CERT_PATH') + '/ca.pem') if lookup('env', 'DOCKER_CERT_PATH') else omit) }}"
cert_path: "{{ item.cert_path | default((lookup('env', 'DOCKER_CERT_PATH') + '/cert.pem') if lookup('env', 'DOCKER_CERT_PATH') else omit) }}"
key_path: "{{ item.key_path | default((lookup('env', 'DOCKER_CERT_PATH') + '/key.pem') if lookup('env', 'DOCKER_CERT_PATH') else omit) }}"
tls_verify: "{{ item.tls_verify | default(lookup('env', 'DOCKER_TLS_VERIFY')) or false }}"
with_items: '{{ molecule_yml.platforms }}'
when:
- item.registry is defined
- item.registry.credentials is defined
- item.registry.credentials.username is defined
no_log: true
- name: Check presence of custom Dockerfiles
ansible.builtin.stat:
path: "{{ molecule_scenario_directory + '/' + (item.dockerfile | default( 'Dockerfile.j2')) }}"
loop: '{{ molecule_yml.platforms }}'
register: dockerfile_stats
- name: Create Dockerfiles from image names
ansible.builtin.template:
# when using embedded playbooks the dockerfile is alonside them
src: >-
{%- if dockerfile_stats.results[i].stat.exists -%}
{{ molecule_scenario_directory + '/' + (item.dockerfile | default( 'Dockerfile.j2')) }}
{%- else -%}
{{ playbook_dir + '/Dockerfile.j2' }}
{%- endif -%}
dest: "{{ molecule_ephemeral_directory }}/Dockerfile_{{ item.image | regex_replace('[^a-zA-Z0-9_]', '_') }}"
mode: '0600'
loop: '{{ molecule_yml.platforms }}'
loop_control:
index_var: i
when: not item.pre_build_image | default(false)
register: platforms
- name: Discover local Docker images
community.docker.docker_image_info:
name: 'molecule_local/{{ item.item.name }}'
docker_host: "{{ item.item.docker_host | default(lookup('env', 'DOCKER_HOST') or 'unix://var/run/docker.sock') }}"
cacert_path: "{{ item.cacert_path | default((lookup('env', 'DOCKER_CERT_PATH') + '/ca.pem') if lookup('env', 'DOCKER_CERT_PATH') else omit) }}"
cert_path: "{{ item.cert_path | default((lookup('env', 'DOCKER_CERT_PATH') + '/cert.pem') if lookup('env', 'DOCKER_CERT_PATH') else omit) }}"
key_path: "{{ item.key_path | default((lookup('env', 'DOCKER_CERT_PATH') + '/key.pem') if lookup('env', 'DOCKER_CERT_PATH') else omit) }}"
tls_verify: "{{ item.tls_verify | default(lookup('env', 'DOCKER_TLS_VERIFY')) or false }}"
with_items: '{{ platforms.results }}'
when:
- not item.pre_build_image | default(false)
register: docker_images
- name: Build an Ansible compatible image (new) # noqa: no-handler
when:
- platforms.changed or docker_images.results | map(attribute='images') | select('equalto', []) | list | count >= 0
- not item.item.pre_build_image | default(false)
community.docker.docker_image:
build:
path: '{{ molecule_ephemeral_directory }}'
dockerfile: '{{ item.invocation.module_args.dest }}'
pull: '{{ item.item.pull | default(true) }}'
network: '{{ item.item.network_mode | default(omit) }}'
args: '{{ item.item.buildargs | default(omit) }}'
name: 'molecule_local/{{ item.item.image }}'
docker_host: "{{ item.item.docker_host | default(lookup('env', 'DOCKER_HOST') or 'unix://var/run/docker.sock') }}"
cacert_path: "{{ item.cacert_path | default((lookup('env', 'DOCKER_CERT_PATH') + '/ca.pem') if lookup('env', 'DOCKER_CERT_PATH') else omit) }}"
cert_path: "{{ item.cert_path | default((lookup('env', 'DOCKER_CERT_PATH') + '/cert.pem') if lookup('env', 'DOCKER_CERT_PATH') else omit) }}"
key_path: "{{ item.key_path | default((lookup('env', 'DOCKER_CERT_PATH') + '/key.pem') if lookup('env', 'DOCKER_CERT_PATH') else omit) }}"
tls_verify: "{{ item.tls_verify | default(lookup('env', 'DOCKER_TLS_VERIFY')) or false }}"
force_source: '{{ item.item.force | default(true) }}'
source: build
with_items: '{{ platforms.results }}'
loop_control:
label: 'molecule_local/{{ item.item.image }}'
no_log: false
register: result
until: result is not failed
retries: 3
delay: 30
- name: Create docker network(s)
ansible.builtin.include_tasks: tasks/create_network.yml
with_items: '{{ molecule_yml.platforms | molecule_get_docker_networks(molecule_labels) }}'
loop_control:
label: '{{ item.name }}'
no_log: false
- name: Determine the CMD directives
ansible.builtin.set_fact:
command_directives_dict: >-
{{ command_directives_dict | default({}) |
combine({ item.name: item.command | default('bash -c "while true; do sleep 10000; done"') })
}}
with_items: '{{ molecule_yml.platforms }}'
when: item.override_command | default(true)
- name: Create molecule instance(s)
community.docker.docker_container:
name: '{{ item.name }}'
docker_host: "{{ item.docker_host | default(lookup('env', 'DOCKER_HOST') or 'unix://var/run/docker.sock') }}"
cacert_path: "{{ item.cacert_path | default((lookup('env', 'DOCKER_CERT_PATH') + '/ca.pem') if lookup('env', 'DOCKER_CERT_PATH') else omit) }}"
cert_path: "{{ item.cert_path | default((lookup('env', 'DOCKER_CERT_PATH') + '/cert.pem') if lookup('env', 'DOCKER_CERT_PATH') else omit) }}"
key_path: "{{ item.key_path | default((lookup('env', 'DOCKER_CERT_PATH') + '/key.pem') if lookup('env', 'DOCKER_CERT_PATH') else omit) }}"
tls_verify: "{{ item.tls_verify | default(lookup('env', 'DOCKER_TLS_VERIFY')) or false }}"
hostname: '{{ item.hostname | default(item.name) }}'
image: "{{ item.pre_build_image | default(false) | ternary('', 'molecule_local/') }}{{ item.image }}"
pull: '{{ item.pull | default(omit) }}'
memory: '{{ item.memory | default(omit) }}'
memory_swap: '{{ item.memory_swap | default(omit) }}'
state: started
recreate: false
log_driver: json-file
command: '{{ (command_directives_dict | default({}))[item.name] | default(omit) }}'
command_handling: "{{ item.command_handling | default('compatibility') }}"
user: '{{ item.user | default(omit) }}'
pid_mode: '{{ item.pid_mode | default(omit) }}'
privileged: '{{ item.privileged | default(omit) }}'
security_opts: '{{ item.security_opts | default(omit) }}'
devices: '{{ item.devices | default(omit) }}'
links: '{{ item.links | default(omit) }}'
volumes: '{{ item.volumes | default(omit) }}'
mounts: '{{ item.mounts | default(omit) }}'
tmpfs: '{{ item.tmpfs | default(omit) }}'
capabilities: '{{ item.capabilities | default(omit) }}'
sysctls: '{{ item.sysctls | default(omit) }}'
exposed_ports: '{{ item.exposed_ports | default(omit) }}'
published_ports: '{{ item.published_ports | default(omit) }}'
ulimits: '{{ item.ulimits | default(omit) }}'
networks: '{{ item.networks | default(omit) }}'
network_mode: '{{ item.network_mode | default(omit) }}'
networks_cli_compatible: '{{ item.networks_cli_compatible | default(true) }}'
purge_networks: '{{ item.purge_networks | default(omit) }}'
dns_servers: '{{ item.dns_servers | default(omit) }}'
etc_hosts: '{{ item.etc_hosts | default(omit) }}'
env: '{{ item.env | default(omit) }}'
restart_policy: '{{ item.restart_policy | default(omit) }}'
restart_retries: '{{ item.restart_retries | default(omit) }}'
tty: '{{ item.tty | default(omit) }}'
labels: '{{ molecule_labels | combine(item.labels | default({})) }}'
container_default_behavior: "{{ item.container_default_behavior | default('compatibility'
if ansible_version.full is version_compare('2.10', '>=') else omit) }}"
stop_signal: '{{ item.stop_signal | default(omit) }}'
kill_signal: '{{ item.kill_signal | default(omit) }}'
register: server
with_items: '{{ molecule_yml.platforms }}'
loop_control:
label: '{{ item.name }}'
no_log: false
async: 7200
poll: 0
- name: Wait for instance(s) creation to complete
ansible.builtin.async_status:
jid: '{{ item.ansible_job_id }}'
register: docker_jobs
until: docker_jobs.finished
retries: 300
with_items: '{{ server.results }}'

View file

@ -0,0 +1,53 @@
---
# yamllint disable rule:line-length
- name: Update platforms
hosts: localhost
tasks:
- name: Filtering platforms list using the group defined in the MOLECULE_GROUP environment variable
set_fact:
molecule_yml: "{{ molecule_yml | combine({'platforms': (molecule_yml.platforms | selectattr('groups', 'contains', lookup('env', 'MOLECULE_GROUP')))}) }}"
when: ansible_env.MOLECULE_GROUP is defined
- name: Destroy
hosts: localhost
connection: local
gather_facts: false
no_log: '{{ molecule_no_log }}'
tasks:
- name: Destroy molecule instance(s)
community.docker.docker_container:
name: '{{ item.name }}'
docker_host: "{{ item.docker_host | default(lookup('env', 'DOCKER_HOST') or 'unix://var/run/docker.sock') }}"
cacert_path: "{{ item.cacert_path | default((lookup('env', 'DOCKER_CERT_PATH') + '/ca.pem') if lookup('env', 'DOCKER_CERT_PATH') else omit) }}"
cert_path: "{{ item.cert_path | default((lookup('env', 'DOCKER_CERT_PATH') + '/cert.pem') if lookup('env', 'DOCKER_CERT_PATH') else omit) }}"
key_path: "{{ item.key_path | default((lookup('env', 'DOCKER_CERT_PATH') + '/key.pem') if lookup('env', 'DOCKER_CERT_PATH') else omit) }}"
tls_verify: "{{ item.tls_verify | default(lookup('env', 'DOCKER_TLS_VERIFY')) or false }}"
state: absent
force_kill: '{{ item.force_kill | default(true) }}'
keep_volumes: '{{ item.keep_volumes | default(true) }}'
container_default_behavior: "{{ item.container_default_behavior | default('compatibility' if
ansible_version.full is version_compare('2.10', '>=') else omit) }}"
register: server
loop: '{{ molecule_yml.platforms }}'
loop_control:
label: '{{ item.name }}'
no_log: false
async: 7200
poll: 0
- name: Wait for instance(s) deletion to complete
ansible.builtin.async_status:
jid: '{{ item.ansible_job_id }}'
register: docker_jobs
until: docker_jobs.finished
retries: 300
loop: '{{ server.results }}'
loop_control:
label: '{{ item.item.name }}'
- name: Delete docker networks(s)
include_tasks: tasks/delete_network.yml
loop: '{{ molecule_yml.platforms | molecule_get_docker_networks() }}'
loop_control:
label: '{{ item.name }}'
no_log: false

View file

@ -0,0 +1,224 @@
#!/usr/bin/env python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import copy
import datetime
import json
import time
import argparse
# PyCrypto library: https://pypi.python.org/pypi/pycrypto
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from Crypto.Util.number import long_to_bytes
# Google API Client Library for Python:
# https://developers.google.com/api-client-library/python/start/get_started
import google.auth
from googleapiclient.discovery import build
def GetCompute():
"""Get a compute object for communicating with the Compute Engine API."""
credentials, project = google.auth.default()
compute = build("compute", "v1", credentials=credentials)
return compute
def GetInstance(compute, instance, zone, project):
"""Get the data for a Google Compute Engine instance."""
cmd = compute.instances().get(instance=instance, project=project, zone=zone)
return cmd.execute()
def GetKey():
"""Get an RSA key for encryption."""
# This uses the PyCrypto library
key = RSA.generate(2048)
return key
def GetModulusExponentInBase64(key):
"""Return the public modulus and exponent for the key in bas64 encoding."""
mod = long_to_bytes(key.n)
exp = long_to_bytes(key.e)
modulus = base64.b64encode(mod)
exponent = base64.b64encode(exp)
return modulus, exponent
def GetExpirationTimeString():
"""Return an RFC3339 UTC timestamp for 5 minutes from now."""
utc_now = datetime.datetime.utcnow()
# These metadata entries are one-time-use, so the expiration time does
# not need to be very far in the future. In fact, one minute would
# generally be sufficient. Five minutes allows for minor variations
# between the time on the client and the time on the server.
expire_time = utc_now + datetime.timedelta(minutes=5)
return expire_time.strftime("%Y-%m-%dT%H:%M:%SZ")
def GetJsonString(user, modulus, exponent, email):
"""Return the JSON string object that represents the windows-keys entry."""
converted_modulus = modulus.decode("utf-8")
converted_exponent = exponent.decode("utf-8")
expire = GetExpirationTimeString()
data = {
"userName": user,
"modulus": converted_modulus,
"exponent": converted_exponent,
"email": email,
"expireOn": expire,
}
return json.dumps(data)
def UpdateWindowsKeys(old_metadata, metadata_entry):
"""Return updated metadata contents with the new windows-keys entry."""
# Simply overwrites the "windows-keys" metadata entry. Production code may
# want to append new lines to the metadata value and remove any expired
# entries.
new_metadata = copy.deepcopy(old_metadata)
new_metadata["items"] = [{"key": "windows-keys", "value": metadata_entry}]
return new_metadata
def UpdateInstanceMetadata(compute, instance, zone, project, new_metadata):
"""Update the instance metadata."""
cmd = compute.instances().setMetadata(
instance=instance, project=project, zone=zone, body=new_metadata
)
return cmd.execute()
def GetSerialPortFourOutput(compute, instance, zone, project):
"""Get the output from serial port 4 from the instance."""
# Encrypted passwords are printed to COM4 on the windows server:
port = 4
cmd = compute.instances().getSerialPortOutput(
instance=instance, project=project, zone=zone, port=port
)
output = cmd.execute()
return output["contents"]
def GetEncryptedPasswordFromSerialPort(serial_port_output, modulus):
"""Find and return the correct encrypted password, based on the modulus."""
# In production code, this may need to be run multiple times if the output
# does not yet contain the correct entry.
converted_modulus = modulus.decode("utf-8")
output = serial_port_output.split("\n")
for line in reversed(output):
try:
entry = json.loads(line)
if converted_modulus == entry["modulus"]:
return entry["encryptedPassword"]
except ValueError:
pass
def DecryptPassword(encrypted_password, key):
"""Decrypt a base64 encoded encrypted password using the provided key."""
decoded_password = base64.b64decode(encrypted_password)
cipher = PKCS1_OAEP.new(key)
password = cipher.decrypt(decoded_password)
return password
def Arguments():
# Create the parser
args = argparse.ArgumentParser(description="List the content of a folder")
# Add the arguments
args.add_argument(
"--instance", metavar="instance", type=str, help="compute instance name"
)
args.add_argument("--zone", metavar="zone", type=str, help="compute zone")
args.add_argument("--project", metavar="project", type=str, help="gcp project")
args.add_argument("--username", metavar="username", type=str, help="username")
args.add_argument("--email", metavar="email", type=str, help="email")
# return arguments
return args.parse_args()
def main():
config_args = Arguments()
# Setup
compute = GetCompute()
key = GetKey()
modulus, exponent = GetModulusExponentInBase64(key)
# Get existing metadata
instance_ref = GetInstance(
compute, config_args.instance, config_args.zone, config_args.project
)
old_metadata = instance_ref["metadata"]
# Create and set new metadata
metadata_entry = GetJsonString(
config_args.username, modulus, exponent, config_args.email
)
new_metadata = UpdateWindowsKeys(old_metadata, metadata_entry)
# Get Serial output BEFORE the modification
serial_port_output = GetSerialPortFourOutput(
compute, config_args.instance, config_args.zone, config_args.project
)
UpdateInstanceMetadata(
compute,
config_args.instance,
config_args.zone,
config_args.project,
new_metadata,
)
# Get and decrypt password from serial port output
# Monitor changes from output to get the encrypted password as soon as it's generated, will wait for 30 seconds
i = 0
new_serial_port_output = serial_port_output
while i <= 20 and serial_port_output == new_serial_port_output:
new_serial_port_output = GetSerialPortFourOutput(
compute, config_args.instance, config_args.zone, config_args.project
)
i += 1
time.sleep(3)
enc_password = GetEncryptedPasswordFromSerialPort(new_serial_port_output, modulus)
password = DecryptPassword(enc_password, key)
converted_password = password.decode("utf-8")
# Display only the password
print(format(converted_password))
if __name__ == "__main__":
main()

View file

@ -0,0 +1,37 @@
"""Embedded ansible filter used by Molecule Docker driver create playbook."""
def get_docker_networks(data, labels={}):
"""Get list of docker networks."""
network_list = []
network_names = []
for platform in data:
if "docker_networks" in platform:
for docker_network in platform["docker_networks"]:
if "labels" not in docker_network:
docker_network["labels"] = {}
for key in labels:
docker_network["labels"][key] = labels[key]
if "name" in docker_network:
network_list.append(docker_network)
network_names.append(docker_network["name"])
# If a network name is defined for a platform but is not defined in
# docker_networks, add it to the network list.
if "networks" in platform:
for network in platform["networks"]:
if "name" in network:
name = network["name"]
if name not in network_names:
network_list.append({"name": name, "labels": labels})
return network_list
class FilterModule(object):
"""Core Molecule filter plugins."""
def filters(self):
return {
"molecule_get_docker_networks": get_docker_networks,
}

View file

@ -0,0 +1,39 @@
---
# yamllint disable rule:line-length
- name: Update platforms
hosts: localhost
tasks:
- name: Filtering platforms list using the group defined in the MOLECULE_GROUP environment variable
set_fact:
molecule_yml: "{{ molecule_yml | combine({'platforms': (molecule_yml.platforms | selectattr('groups', 'contains', lookup('env', 'MOLECULE_GROUP')))}) }}"
when: ansible_env.MOLECULE_GROUP is defined
- name: Create
hosts: localhost
connection: local
gather_facts: false
no_log: '{{ molecule_no_log }}'
vars:
ssh_identity_file: "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}/ssh_key"
gcp_project_id: "{{ molecule_yml.driver.project_id | default(lookup('env', 'GCE_PROJECT_ID')) }}"
tasks:
- name: Make sure the group contains only Linux hosts or Windows hosts
ansible.builtin.assert:
that:
- molecule_yml.driver.instance_os_type | lower == "linux" or
molecule_yml.driver.instance_os_type | lower == "windows"
fail_msg: instance_os_type is possible only to specify linux or windows either
- name: Include create_linux_instance tasks
ansible.builtin.include_tasks: tasks/create_linux_instance.yml
when:
- molecule_yml.driver.instance_os_type | lower == "linux"
- name: Include create_windows_instance tasks
ansible.builtin.include_tasks: tasks/create_windows_instance.yml
when:
- molecule_yml.driver.instance_os_type | lower == "windows"
handlers:
- name: Import main handler tasks
ansible.builtin.import_tasks: handlers/main.yml

View file

@ -0,0 +1,46 @@
---
# yamllint disable rule:line-length
- name: Update platforms
hosts: localhost
tasks:
- name: Filtering platforms list using the group defined in the MOLECULE_GROUP environment variable
set_fact:
molecule_yml: "{{ molecule_yml | combine({'platforms': (molecule_yml.platforms | selectattr('groups', 'contains', lookup('env', 'MOLECULE_GROUP')))}) }}"
when: ansible_env.MOLECULE_GROUP is defined
- name: Destroy
hosts: localhost
connection: local
gather_facts: false
no_log: '{{ molecule_no_log }}'
tasks:
- name: Destroy molecule instance(s)
google.cloud.gcp_compute_instance:
name: '{{ item.name }}'
state: absent
zone: "{{ item.zone | default(molecule_yml.driver.region + '-b') }}"
project: "{{ molecule_yml.driver.project_id | default(lookup('env', 'GCE_PROJECT_ID')) }}"
scopes: "{{ molecule_yml.driver.scopes | default(['https://www.googleapis.com/auth/compute'], True) }}"
service_account_email: '{{ molecule_yml.driver.service_account_email | default (omit, true) }}'
service_account_file: '{{ molecule_yml.driver.service_account_file | default (omit, true) }}'
auth_kind: '{{ molecule_yml.driver.auth_kind | default(omit, true) }}'
register: async_results
loop: '{{ molecule_yml.platforms }}'
async: 7200
poll: 0
notify:
- Wipe out instance config
- Dump instance config
- name: Wait for instance(s) deletion to complete
ansible.builtin.async_status:
jid: '{{ item.ansible_job_id }}'
register: server
until: server.finished
retries: 300
delay: 10
loop: '{{ async_results.results }}'
handlers:
- name: Import main handler tasks
ansible.builtin.import_tasks: handlers/main.yml

View file

@ -0,0 +1,51 @@
---
# yamllint disable rule:line-length
- name: Populate instance config dict Linux
ansible.builtin.set_fact:
instance_conf_dict:
instance: '{{ instance_info.name }}'
address: '{{ instance_info.networkInterfaces.0.accessConfigs.0.natIP if molecule_yml.driver.external_access else instance_info.networkInterfaces.0.networkIP }}'
user: "{{ lookup('env','USER') }}"
port: '22'
identity_file: '{{ ssh_identity_file }}'
instance_os_type: '{{ molecule_yml.driver.instance_os_type }}'
loop: '{{ server.results }}'
loop_control:
loop_var: instance_info
no_log: true
register: instance_conf_dict
- name: Populate instance config dict Windows
ansible.builtin.set_fact:
instance_conf_dict:
instance: '{{ instance_info.name }}'
address: '{{ instance_info.networkInterfaces.0.accessConfigs.0.natIP if molecule_yml.driver.external_access else instance_info.networkInterfaces.0.networkIP }}'
user: molecule_usr
password: '{{ instance_info.password }}'
port: '{{ instance_info.winrm_port | default(5986) }}'
winrm_transport: "{{ molecule_yml.driver.winrm_transport | default('ntlm') }}"
winrm_server_cert_validation: "{{ molecule_yml.driver.winrm_server_cert_validation | default('ignore') }}"
instance_os_type: '{{ molecule_yml.driver.instance_os_type }}'
loop: '{{ win_instances }}'
loop_control:
loop_var: instance_info
no_log: true
register: instance_conf_dict
- name: Wipe out instance config
ansible.builtin.set_fact:
instance_conf: {}
- name: Convert instance config dict to a list
ansible.builtin.set_fact:
instance_conf: "{{ instance_conf_dict.results | map(attribute='ansible_facts.instance_conf_dict') | list }}"
- name: Dump instance config
ansible.builtin.copy:
content: '{{ instance_conf }}'
dest: '{{ molecule_instance_config }}'
mode: '0600'

View file

@ -0,0 +1,20 @@
---
- name: Prepare
hosts: "{{ lookup('env', 'MOLECULE_GROUP') | default('all', true) }}"
gather_facts: false
tasks:
- become: true
changed_when: false
name: Bootstrap Python for Ansible
raw: |
command -v python3 python || (
command -v apk >/dev/null && sudo apk add --no-progress --update python3 ||
(test -e /usr/bin/dnf && sudo dnf install -y python3) ||
(test -e /usr/bin/apt && (apt -y update && apt install -y python-minimal)) ||
(test -e /usr/bin/yum && sudo yum -y -qq install python3) ||
(test -e /usr/sbin/pkg && sudo env ASSUME_ALWAYS_YES=yes pkg update && sudo env ASSUME_ALWAYS_YES=yes pkg install python3) ||
(test -e /usr/sbin/pkg_add && sudo /usr/sbin/pkg_add -U -I -x python%3.7) ||
echo "Warning: Python not boostrapped due to unknown platform."
)
when:
- ansible_connection != 'winrm'

View file

@ -0,0 +1,64 @@
---
# yamllint disable rule:line-length
- name: create ssh keypair
community.crypto.openssh_keypair:
comment: "{{ lookup('env','USER') }} user for Molecule"
path: '{{ ssh_identity_file }}'
register: keypair
- name: create molecule Linux instance(s)
google.cloud.gcp_compute_instance:
state: present
name: '{{ item.name }}'
machine_type: "{{ item.machine_type | default('n1-standard-1') }}"
metadata:
ssh-keys: "{{ lookup('env','USER') }}:{{ keypair.public_key }}"
scheduling:
preemptible: '{{ item.preemptible | default(false) }}'
disks:
- auto_delete: true
boot: true
initialize_params:
disk_size_gb: '{{ item.disk_size_gb | default(omit) }}'
source_image: "{{ item.image | default('projects/debian-cloud/global/images/family/debian-10') }}"
source_image_encryption_key:
raw_key: '{{ item.image_encryption_key | default(omit) }}'
network_interfaces:
- network:
selfLink: "https://www.googleapis.com/compute/v1/projects/{{ molecule_yml.driver.vpc_host_project | default(gcp_project_id) }}/global/networks/{{ molecule_yml.driver.network_name | default('default') }}"
subnetwork:
selfLink: "https://compute.googleapis.com/compute/v1/projects/{{ molecule_yml.driver.vpc_host_project | default(gcp_project_id) }}/regions/{{ molecule_yml.driver.region }}/subnetworks/{{ molecule_yml.driver.subnetwork_name | default('default') }}"
access_configs: "{{ [{'name': 'instance_ip', 'type': 'ONE_TO_ONE_NAT'}] if molecule_yml.driver.external_access else [] }}"
zone: "{{ item.zone | default(molecule_yml.driver.region + '-b') }}"
project: '{{ gcp_project_id }}'
scopes: "{{ molecule_yml.driver.scopes | default(['https://www.googleapis.com/auth/compute'], True) }}"
service_account_email: '{{ molecule_yml.driver.service_account_email | default (omit, true) }}'
service_account_file: '{{ molecule_yml.driver.service_account_file | default (omit, true) }}'
auth_kind: '{{ molecule_yml.driver.auth_kind | default(omit, true) }}'
register: async_results
loop: '{{ molecule_yml.platforms }}'
loop_control:
pause: 3
async: 7200
poll: 0
- name: Wait for instance(s) creation to complete
ansible.builtin.async_status:
jid: '{{ item.ansible_job_id }}'
loop: '{{ async_results.results }}'
register: server
until: server.finished
retries: 300
delay: 10
notify:
- Populate instance config dict Linux
- Convert instance config dict to a list
- Dump instance config
- name: Wait for SSH
ansible.builtin.wait_for:
port: 22
host: '{{ item.networkInterfaces.0.accessConfigs.0.natIP if molecule_yml.driver.external_access else item.networkInterfaces.0.networkIP }}'
search_regex: SSH
delay: 10
loop: '{{ server.results }}'

View file

@ -0,0 +1,65 @@
---
# yamllint disable rule:line-length
- name: create ssh keypair
community.crypto.openssh_keypair:
comment: "{{ lookup('env','USER') }} user for Molecule"
path: '{{ ssh_identity_file }}'
register: keypair
- name: create molecule Linux instance(s)
google.cloud.gcp_compute_instance:
state: present
name: '{{ item.name }}'
machine_type: "{{ item.machine_type | default('n1-standard-1') }}"
metadata:
ssh-keys: "{{ lookup('env','USER') }}:{{ keypair.public_key }}"
scheduling:
preemptible: '{{ item.preemptible | default(false) }}'
disks:
- auto_delete: true
boot: true
initialize_params:
disk_size_gb: '{{ item.disk_size_gb | default(omit) }}'
source_image: "{{ item.image | default('projects/debian-cloud/global/images/family/debian-10') }}"
source_image_encryption_key:
raw_key: '{{ item.image_encryption_key | default(omit) }}'
network_interfaces:
- network:
selfLink: "https://www.googleapis.com/compute/v1/projects/{{ molecule_yml.driver.vpc_host_project | default(gcp_project_id) }}/global/networks/{{ molecule_yml.driver.network_name | default('default') }}"
subnetwork:
selfLink: "https://compute.googleapis.com/compute/v1/projects/{{ molecule_yml.driver.vpc_host_project | default(gcp_project_id) }}/regions/{{ molecule_yml.driver.region }}/subnetworks/{{ molecule_yml.driver.subnetwork_name | default('default') }}"
access_configs: "{{ [{'name': 'instance_ip', 'type': 'ONE_TO_ONE_NAT'}] if molecule_yml.driver.external_access else [] }}"
zone: "{{ item.zone | default(molecule_yml.driver.region + '-b') }}"
project: '{{ gcp_project_id }}'
scopes: "{{ molecule_yml.driver.scopes | default(['https://www.googleapis.com/auth/compute'], True) }}"
service_account_email: '{{ molecule_yml.driver.service_account_email | default (omit, true) }}'
service_account_file: '{{ molecule_yml.driver.service_account_file | default (omit, true) }}'
auth_kind: '{{ molecule_yml.driver.auth_kind | default(omit, true) }}'
register: async_results
loop: '{{ molecule_yml.platforms }}'
loop_control:
pause: 3
async: 7200
poll: 0
- name: Wait for instance(s) creation to complete
ansible.builtin.async_status:
jid: '{{ item.ansible_job_id }}'
loop: '{{ async_results.results }}'
register: server
until: server.finished
retries: 300
delay: 10
notify:
- Populate instance config dict Linux
- Convert instance config dict to a list
- Dump instance config
- name: Wait for SSH
ansible.builtin.wait_for:
port: 22
host: "{{ item.networkInterfaces.0.accessConfigs.0.natIP if molecule_yml.driver.external_access else (item.name + '.' + item.zone + '.' + molecule_yml.driver.project_id) }}"
search_regex: SSH
delay: 10
loop: '{{ server.results }}'

View file

@ -0,0 +1,63 @@
---
# yamllint disable rule:line-length
- name: Update platforms
hosts: localhost
tasks:
- name: Filtering platforms list using the group defined in the MOLECULE_GROUP environment variable
set_fact:
molecule_yml: "{{ molecule_yml | combine({'platforms': (molecule_yml.platforms | selectattr('groups', 'contains', lookup('env', 'MOLECULE_GROUP')))}) }}"
when: ansible_env.MOLECULE_GROUP is defined
- name: Create
hosts: localhost
connection: local
gather_facts: false
no_log: '{{ molecule_no_log }}'
tasks:
- name: Create molecule instance(s)
vagrant:
instance_name: '{{ item.name }}'
instance_interfaces: '{{ item.interfaces | default(omit) }}'
instance_raw_config_args: '{{ item.instance_raw_config_args | default(omit) }}'
config_options: '{{ item.config_options | default(omit) }}'
platform_box: '{{ item.box | default("generic/alpine310") }}'
platform_box_version: '{{ item.box_version | default(omit) }}'
platform_box_url: '{{ item.box_url | default(omit) }}'
provider_name: '{{ molecule_yml.driver.provider.name | default(omit, true) }}'
provider_memory: '{{ item.memory | default(omit) }}'
provider_cpus: '{{ item.cpus | default(omit) }}'
provider_options: '{{ item.provider_options | default(omit) }}'
provider_raw_config_args: '{{ item.provider_raw_config_args | default(omit) }}'
provider_override_args: '{{ item.provider_override_args | default(omit) }}'
provision: '{{ item.provision | default(omit) }}'
state: up
register: server
with_items: '{{ molecule_yml.platforms }}'
loop_control:
label: '{{ item.name }}'
no_log: false
- name: Run tasks if there were changes while creating the molecule instance(s)
when: server.changed | bool
block:
- name: Populate instance config dict
set_fact:
instance_conf_dict:
instance: '{{ item.Host }}'
address: '{{ item.HostName }}'
user: '{{ item.User }}'
port: '{{ item.Port }}'
identity_file: '{{ item.IdentityFile }}'
with_items: '{{ server.results }}'
register: instance_config_dict
- name: Convert instance config dict to a list
set_fact:
instance_conf: "{{ instance_config_dict.results | map(attribute='ansible_facts.instance_conf_dict') | list }}"
- name: Dump instance config
copy:
content: '{{ instance_conf | to_json | from_json | to_yaml }}'
dest: '{{ molecule_instance_config }}'
mode: 0600

View file

@ -0,0 +1,43 @@
---
# yamllint disable rule:line-length
- name: Update platforms
hosts: localhost
tasks:
- name: Filtering platforms list using the group defined in the MOLECULE_GROUP environment variable
set_fact:
molecule_yml: "{{ molecule_yml | combine({'platforms': (molecule_yml.platforms | selectattr('groups', 'contains', lookup('env', 'MOLECULE_GROUP')))}) }}"
when: ansible_env.MOLECULE_GROUP is defined
- name: Destroy
hosts: localhost
connection: local
gather_facts: false
no_log: '{{ molecule_no_log }}'
tasks:
- name: Destroy molecule instance(s)
vagrant:
instance_name: '{{ item.name }}'
platform_box: '{{ item.box | default(omit) }}'
provider_name: '{{ molecule_yml.driver.provider.name | default(omit, true) }}'
provider_options: '{{ item.provider_options | default(omit) }}'
provider_raw_config_args: '{{ item.provider_raw_config_args | default(omit) }}'
force_stop: '{{ item.force_stop | default(true) }}'
state: destroy
register: server
with_items: '{{ molecule_yml.platforms }}'
loop_control:
label: '{{ item.name }}'
no_log: false
- name: Populate instance config
set_fact:
instance_conf: {}
- name: Dump instance config # noqa 503
copy:
content: |
# Molecule managed
{{ instance_conf | to_json | from_json | to_yaml }}
dest: '{{ molecule_instance_config }}'
mode: 0600
when: server.changed | bool

4
.config/nodemon.json Normal file
View file

@ -0,0 +1,4 @@
{
"exec": "task project:livereload",
"ext": "py,yml"
}

1
.config/package-lock.json generated Normal file

File diff suppressed because one or more lines are too long

26
.config/prettierignore Normal file
View file

@ -0,0 +1,26 @@
.autodoc/
.cache/
.common/
.config/
.git/
.github/
.gitlab/
.gitmodules
.husky/
.modules/
.npm/
.pnpm-store/
.shared/
.task/
.venv/
.vscode/
.variables.json
**/.cache/
**/Dockerfile
**/*.handlebars
build/
coverage/
dist/
node_modules/
pnpm-lock.yaml
venv/

5
.config/proselint.json Normal file
View file

@ -0,0 +1,5 @@
{
"checks": {
"typography.symbols": false
}
}

25
.config/requirements.txt Normal file
View file

@ -0,0 +1,25 @@
ansible-core==2.11.8; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0"
ansible==4.10.0; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0")
certifi==2021.10.8; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
cffi==1.15.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6"
charset-normalizer==2.0.12; python_full_version >= "3.6.0" and python_version >= "3.6"
cryptography==36.0.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6"
docker==5.0.3; python_version >= "3.6"
idna==3.3; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
jinja2==3.0.3; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6"
markupsafe==2.1.0; python_version >= "3.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.7"
ntlm-auth==1.5.0; python_version >= "2.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0"
packaging==21.3; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6"
pycparser==2.21; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6"
pyparsing==3.0.7; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6"
python-vagrant==0.5.15
pywin32==227; sys_platform == "win32" and python_version >= "3.6"
pywinrm==0.4.2
pyyaml==5.4.1; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.6.0"
requests-ntlm==1.1.0
requests==2.27.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
resolvelib==0.5.5; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.5.0"
six==1.16.0; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.3.0"
urllib3==1.26.8; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version < "4" and python_version >= "3.6"
websocket-client==1.2.3; python_version >= "3.6"
xmltodict==0.12.0; python_version >= "2.7" and python_full_version < "3.0.0" or python_full_version >= "3.4.0"

4
.config/run Normal file
View file

@ -0,0 +1,4 @@
#!/usr/bin/env bash
# @file .config/run
# @brief Wrapper for Task (the Taskfile.yml runner)

View file

@ -0,0 +1,3 @@
# Shared Taskfiles
All of the taskfiles in this folder are kept in sync with the [Shared common file repository](https://gitlab.com/megabyte-labs/common/shared). If you need to make a change to any of the taskfiles, open the PR against the Shared common file repository. Once the PR is merged, the taskfiles will propagate down to the project-type specific common file repositories and then down to the project repositories.

View file

@ -0,0 +1,119 @@
---
version: '3'
vars:
MAIN_TASKS_PATH: tasks/main.yml
META_PATH: meta/main.yml
MOLECULE_RESULTS_PATH: molecule/.results/logs
REQUIREMENTS_PATH: requirements.yml
VARIABLES_PATH: .variables.json
tasks:
ansibler:
deps:
- :install:pipx:ansibler
cmds:
- task: compatibility-chart
- task: populate-platforms
- task: tasks:{{.REPOSITORY_SUBTYPE}}
compatibility-chart:
deps:
- :install:software:jq
log:
error: Failed to generate operating system compatibility chart
start: Generating operating system compatibility chart
success: Successfully generated operating system compatibility chart
cmds:
- mkdir -p .cache
- if [ ! -f .cache/compatibility-chart.json ]; then echo "{}" > .cache/compatibility-chart.json; fi
- task: compatibility-chart:generate
- |
TMP="$(mktemp)"
jq -s -S '.[0] + .[1]' '{{.VARIABLES_PATH}}' .cache/compatibility-chart.json > "$TMP"
mv "$TMP" '{{.VARIABLES_PATH}}'
compatibility-chart:ansifilter:
deps:
- :install:software:ansifilter
cmds:
- |
for LOG in {{.MOLECULE_RESULTS_PATH}}; do
if [ "$LOG" != '{{.MOLECULE_RESULTS_PATH}}' ]; then
TMP="$(mktemp)" && cat "$LOG" | ansifilter > "$TMP" && mv "$TMP" "$LOG"
fi
done
sources:
- '{{.MOLECULE_RESULTS_PATH}}/*'
compatibility-chart:generate:
deps:
- :install:pipx:ansibler
cmds:
- task: compatibility-chart:ansifilter
- >
PATH="$PATH:$HOME/.local/bin"
{{.PYTHON_HANDLE}}ansibler --generate-compatibility-chart --molecule-results-dir '{{.MOLECULE_RESULTS_PATH}}'
--json-file .cache/compatibility-chart.json
sources:
- '{{.MOLECULE_RESULTS_PATH}}/*'
generates:
- .cache/compatibility-chart.json
populate-platforms:
deps:
- :install:pipx:ansibler
log:
error: Failed to populate platforms in `meta/main.yml
start: Populating the supported platforms listed in `meta/main.yml` based on the compatibility chart data
success: Successfully populated `meta/main.yml` platforms
cmds:
- cmd: |
PATH="$PATH:$HOME/.local/bin"
{{.PYTHON_HANDLE}}ansibler --populate-platforms --json-file .cache/compatibility-chart.json
ignore_error: true
sources:
- .cache/compatibility-chart.json
- meta/main.yml
role-dependencies:
deps:
- :install:software:jq
log:
error: Failed to acquire role dependency information
start: Gathering information about role dependencies
success: Acquired role dependency information
cmds:
- mkdir -p .cache
- if [ ! -f .cache/role-dependencies.json ]; then echo "{}" > .cache/role-dependencies.json; fi
- task: role-dependencies:generate
- if [ -f role-dependencies.json ]; then mv role-dependencies.json .cache/role-dependencies.json; fi
- |
TMP="$(mktemp)"
jq -s -S '.[0] + .[1]' '{{.VARIABLES_PATH}}' .cache/role-dependencies.json > "$TMP"
mv "$TMP" '{{.VARIABLES_PATH}}'
role-dependencies:generate:
deps:
- :install:pipx:ansibler
cmds:
- cmd: |
PATH="$PATH:$HOME/.local/bin"
{{.PYTHON_HANDLE}}ansibler --role-dependencies --json-file .cache/role-dependencies.json
ignore_error: true
sources:
- '{{.REQUIREMENTS_PATH}}'
generates:
- .cache/role-dependencies.json
tasks:playbook:
deps:
- :ansible:collection-dependencies
- role-dependencies
tasks:role:
deps:
- :ansible:collection-dependencies
- populate-platforms
- role-dependencies

View file

@ -0,0 +1,515 @@
---
version: '3'
tasks:
collections:download:
cmds:
- |
PATH="$PATH:$HOME/.local/bin"
ansible-galaxy collection download -r requirements.yml
docs:
deps:
- docs:roles
- docs:tags
docs:roles:
deps:
- :install:software:jq
- :install:software:yq
log:
error: Failed to acquire README chart data for the roles
start: Scanning roles folder and generating chart data
success: Finished populating roles folder chart data
cmds:
- |
TMP="$(mktemp)"
jq --arg name "Role Name" --arg description "Description" --arg github 'GitHub&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;' \
'.role_var_chart = [[$name, $description, $github]]' .variables.json > "$TMP"
mv "$TMP" .variables.json
for ROLE_PATH in roles/*/*; do
if [[ "$ROLE_PATH" != *"roles/deprecated/"* ]] && [[ "$ROLE_PATH" != *"roles/cloud/"* ]] && [[ "$ROLE_PATH" != *"roles/helpers/"* ]]; then
if [ "$(yq e '.galaxy_info.project.documentation' "${ROLE_PATH}/meta/main.yml")" != 'null' ]; then
DOCUMENTATION_LINK="*[Documentation]($(yq e '.galaxy_info.project.documentation' "${ROLE_PATH}/meta/main.yml" | sed 's/null//'))* | "
else
DOCUMENTATION_LINK=""
fi
if [ "$(yq e '.galaxy_info.project.homepage' "${ROLE_PATH}/meta/main.yml")" != 'null' ]; then
HOMEPAGE_LINK="*[Homepage]($(yq e '.galaxy_info.project.homepage' "${ROLE_PATH}/meta/main.yml"))* | "
else
HOMEPAGE_LINK=""
fi
GITHUB_URL="$(yq e '.galaxy_info.project.github' "${ROLE_PATH}/meta/main.yml")"
if [ "$GITHUB_URL" == 'Not open-source' ]; then
GITHUB_SHIELD="**❌ Closed source**"
elif [ "$GITHUB_URL" != 'null' ]; then
GITHUB_PATH="$(echo "$GITHUB_URL" | sed 's/https:\/\/github.com\///' | sed 's/\/$//')"
GITHUB_OWNER="$(echo "$GITHUB_PATH" | sed 's/\/.*$//')"
GITHUB_PROJECT_SLUG="$(echo "$GITHUB_PATH" | sed 's/^.*\///')"
GITHUB_SHIELD="[![GitHub Repo stars](https://img.shields.io/github/stars/$GITHUB_OWNER/$GITHUB_PROJECT_SLUG?style=social)]($GITHUB_URL)"
else
UPSTREAM_URL="$(yq e '.galaxy_info.project.upstream' "${ROLE_PATH}/meta/main.yml")"
if [ "$UPSTREAM_URL" != 'null' ]; then
GITHUB_SHIELD="**[✅ Open-source]($UPSTREAM_URL)**"
else
GITHUB_SHIELD="*N/A*"
fi
fi
SOFTWARE_NAME="$(jq -r '.blueprint.name' "${ROLE_PATH}/package.json")"
DESCRIPTION="$(jq -r '.blueprint.overview' "${ROLE_PATH}/package.json")"
ROLE_GITHUB_LINK="*[Role on GitHub]($(jq -r '.blueprint.repository.github' "${ROLE_PATH}/package.json"))*"
ANSIBLE_GALAXY_NAMESPACE="$(yq e '.galaxy_info.namespace' "${ROLE_PATH}/meta/main.yml")"
ANSIBLE_GALAXY_ROLE_NAME="$(yq e '.galaxy_info.role_name' "${ROLE_PATH}/meta/main.yml")"
ROLE_ANSIBLE_GALAXY_URL="https://galaxy.ansible.com/${ANSIBLE_GALAXY_NAMESPACE}/${ANSIBLE_GALAXY_ROLE_NAME}"
if [ "$ANSIBLE_GALAXY_NAMESPACE" != 'null' ] && [ "$ANSIBLE_GALAXY_ROLE_NAME" != 'null' ]; then
URL_LINK="**[${SOFTWARE_NAME}](${ROLE_ANSIBLE_GALAXY_URL})**"
else
URL_LINK="**${SOFTWARE_NAME}**"
fi
if [ "$(jq -r '.blueprint.ansible_galaxy_project_id' "${ROLE_PATH}/package.json")" == 'null' ]; then
ROLE_GITHUB_LINK=""
fi
DESCRIPTION_LINKS="(${HOMEPAGE_LINK}${DOCUMENTATION_LINK}${ROLE_GITHUB_LINK})"
if [ "$DESCRIPTION_LINKS" != "()" ]; then
DESCRIPTION_LINKS=" $(echo "$DESCRIPTION_LINKS" | sed 's/ | )$/)/')"
else
DESCRIPTION_LINKS=""
fi
TMP="$(mktemp)"
jq --arg name "${URL_LINK}" --arg description "${DESCRIPTION}${DESCRIPTION_LINKS}" --arg github "${GITHUB_SHIELD}" \
'.role_var_chart = .role_var_chart + [[$name, $description, $github]]' .variables.json > "$TMP"
mv "$TMP" .variables.json
fi
done
docs:tags:
deps:
- :install:npm:leasot
summary: |
```shell
# @description Processes leasot data and returns .variables.json data including charts written in @appnest/readme format
#
# @arg $1 The file that the leasot JSON was written to
# @arg $2 The tag being processed
function populateChartVar() {
...
}
```
vars:
DOC_IDS: '@binaryapp,@binarycli,@brew,@cask,@chrome,@firefox,@gem,@helm,@npm,@pypi,@vscode'
log:
error: Failed to acquire package information from comments via `leasot`
start: Scanning and acquiring package information in comments via `leasot`
success: Acquired package information from comments
cmds:
- |
function populateChartVar() {
CHART='[["Package", "Description", "GitHub&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"]'
jq --arg tag "$(echo $2 | tr '[a-z]' '[A-Z]')" -r '.[] | select(.tag == $tag) | .
| del(. ["file", "ref", "line", "tag"]) | .text' "$1" | while read COMMENT; do
if [ "$CHART" != '[' ]; then
CHART="${CHART},"
fi
LINK="$(echo $COMMENT | sed 's/ - .*//')"
URL_LINK="$(echo "$LINK" | sed 's/@tui //' | sed 's/@service //' | sed 's/@binary //' | sed 's/@cli //' | sed 's/@application //' | sed 's/@menubar //' | sed 's/@webapp //' | sed 's/^\[\([^)]*\)\](\([^)]*\)).*$/\[\1\](\2)/' | sed 's/ $//' | sed 's/\/$//')"
if [[ "$LINK" == *"[GitHub](NO_GITHUB_REPOSITORY_LINK)"* ]] || [[ "$LINK" != *"https://github.com"* ]]; then
GITHUB_SHIELD="*N/A*"
else
GITHUB_PATH="$(echo "$LINK" | sed 's/.*\[GitHub\](https:\/\/github.com\/\([^|]*\)).*/\1/' | \
sed 's/.*\[.*\](https:\/\/github.com\/\([^)]*\)).*/\1/' | sed 's/\/$//')"
GITHUB_OWNER="$(echo "$GITHUB_PATH" | sed 's/\/.*$//')"
GITHUB_PROJECT_SLUG="$(echo "$GITHUB_PATH" | sed 's/^.*\///')"
GITHUB_URL="https://github.com/${GITHUB_OWNER}/${GITHUB_PROJECT_SLUG}"
GITHUB_SHIELD="[![GitHub Repo stars](https://img.shields.io/github/stars/$GITHUB_OWNER/$GITHUB_PROJECT_SLUG?style=social)]($GITHUB_URL)"
fi
if [[ "$LINK" == *"[Documentation]"* ]]; then
DOCUMENTATION_LINK="*[Documentation]("$(echo "$LINK" | sed 's/.*\[Documentation\](\([^)]*\)).*/\1/')")* | "
else
DOCUMENTATION_LINK=""
fi
if [[ "$LINK" == *"[Homepage]"* ]]; then
HOMEPAGE_LINK="*[Homepage]("$(echo "$LINK" | sed 's/.*\[Homepage\](\([^)]*\)).*/\1/' | sed 's/ $//')")* | "
else
HOMEPAGE_LINK=""
fi
if [[ "$LINK" == *"[Helm]"* ]] || [[ "$LINK" == *"[Operator]"* ]]; then
REFERENCE_LINK="*[Helm Reference]("$(echo "$LINK" | sed 's/.*\[Helm\](\([^)]*\)).*/\1/' | sed 's/.*\[Operator\](\([^)]*\)).*/\1/')")* | "
else
REFERENCE_LINK=""
fi
DESCRIPTION_LINKS="(${HOMEPAGE_LINK}${DOCUMENTATION_LINK}${REFERENCE_LINK})"
if [ "$DESCRIPTION_LINKS" != "()" ]; then
DESCRIPTION_LINKS=" $(echo "$DESCRIPTION_LINKS" | sed 's/ | )$/)/')"
else
DESCRIPTION_LINKS=""
fi
DESCRIPTION="$(echo $COMMENT | sed 's/.* - //' | sed 's/\"/\\\"/g')"
CHART="${CHART}[\"**$URL_LINK**\",\"${DESCRIPTION}${DESCRIPTION_LINKS}\",\"$GITHUB_SHIELD\"]"
done
CHART="${CHART}]"
TMP_CHART="$(mktemp)"
KEY="$(echo $2 | sed 's/^@//')"
jq --arg chart "$CHART" --arg key "${KEY}_var_chart" '.[$key] = ($chart | fromjson)' .variables.json > "$TMP_CHART"
mv "$TMP_CHART" .variables.json
}
TMP="$(mktemp)"
leasot --tags '{{.DOC_IDS}}' --reporter json './environments/prod/group_vars/**/*.yml' > "$TMP" || true
VARIABLES_JSON="$(jq '.' .variables.json)"
for ID in {{replace "," " " .DOC_IDS}}; do
populateChartVar "$TMP" "$ID"
done
environment:
desc: Prompts for which environment to use and then symlinks to it
hide: '{{ne (print .REPOSITORY_TYPE "-" .REPOSITORY_SUBTYPE) "ansible-playbook"}}'
summary: |
# Switch environments using an interactive dialogue
Ansible does not really provide any great ways to switch between environments (or sets of
`host_vars/`, `group_vars/` etc.). If you place all the files and folders you wish to constitute
as an environment inside a folder named as the name of the environment then you can use
this task to handle the symlinking and switching between environments.
**Example of opening the interactive prompt:**
`task ansible:environment`
**You can directly switch enironments to `environments/prod/` by running:**
`task ansible:environment -- prod`
cmds:
- task: environment:{{if .CLI_ARGS}}cli{{else}}prompt{{end}}
environment:cli:
log:
error: Encountered an error while switching environments to `{{.CLI_ARGS}}`
start: Switching environment to `{{.CLI_ARGS}}`
success: Successfully switched environment to `{{.CLI_ARGS}}`
cmds:
- |
{{if .CLI_ARGS}}
for ITEM in environments/{{.CLI_ARGS}}/*; do
ITEM="$(echo $ITEM | sed 's/.*\///')"
if test -L "$ITEM" || ! test -e "$ITEM"; then
rm -f "$ITEM"
ln -s "./environments/{{.CLI_ARGS}}/$ITEM" "$ITEM"
.config/log success "Successfully symlinked "'`'"$ITEM"'`'" from ./environments/{{.CLI_ARGS}}/$ITEM"
else
.config/log warn "$ITEM exists in the root and was not a symlink so it was skipped to prevent possible data loss"
fi
done
{{else}}
exit 69
{{end}}
- .config/log finish 'Success `environment:cli`'
environment:prompt:
env:
MARKDOWN: |
# Symlink Environment
Answer the prompt below to switch between environments. Each environment
should be a folder with folders and files you wish to link to from the root
of the project. They should normally consist of a host_vars, group_vars,
inventory, and files folder (but can contain any files/folders you wish to link).
Each environment should have its own folder in the `environments/` folder titled
as the name of the environment. After you select an answer, the script will
symlink all of the items in the environments folder to the root as long as there
is not anything except a symlink to the target location (i.e. it will overwrite
symlinks but not files).
cmds:
- TMP="$(mktemp)" && echo "$MARKDOWN" > "$TMP" && .config/log md "$TMP"
- task: environment:prompt:continue
environment:prompt:continue:
interactive: true
deps:
- :install:software:gum
- :install:software:jq
cmds:
- |
ENVIRONMENT_OPTIONS="$(find ./environments -maxdepth 1 -mindepth 1 | sed 's/\.\/environments\///' | jq -R '[.]' | jq -s -c -r 'add | join(" ")')"
.config/log prompt 'Select an environment from the `environments/` folder to symlink to'
ENV_OPTION="$(gum choose $(echo $ENVIRONMENT_OPTIONS))"
task ansible:playbook:environment:cli -- "$ENV_OPTION"
find-missing:files:
desc: Find roles that are missing files
hide: '{{ne (print .REPOSITORY_TYPE "-" .REPOSITORY_SUBTYPE) "ansible-playbook"}}'
summary: |
# Find roles that are missing any given file
This task scans through all the folders in the roles/ directory and checks
for the presence of a file that you pass in through the CLI.
**Example usage:**
`task find-missing -- logo.png`
The example above will look through all the folders two levels deep (e.g. `./roles/tools/nmap`,
`./roles/system/snapd`) in the roles folder and display any roles that are missing the file.
log:
error: Failed to scan the `/roles/*` folders for roles missing `{{.CLI_ARGS}}`
start: Determining which roles in the `/roles/*` folders are missing `{{.CLI_ARGS}}`
success: Finished scanning for roles missing `{{.CLI_ARGS}}` (if there are any then they should be listed above)
cmds:
- |
FILES=$(find ./roles -mindepth 2 -maxdepth 2 -type d '!' -exec test -e "{}/{{.CLI_ARGS}}" ';' -print)
.config/log info 'Found '"$(echo "$FILES" | wc -l | xargs)"' roles missing {{.CLI_ARGS}}'
echo "$FILES"
preconditions:
- sh: test -d roles
msg: The `roles/` folder is missing. Is the project set up right?
find-missing:roles:
summary: |
# Find roles that are not in main.yml
This task will collect all the role paths in the `roles/` folder and return a list
of roles that are not present in the `main.yml` file.
If you want to scan something other than `main.yml`, you can pass the file in as a
CLI argument like so:
**Example scanning file other than `main.yml`:**
`task ansible:playbook:find-missing:roles -- playbooks/qubes.yml`
cmds:
- |
TMP="$(mktemp)"
RESULTS="$(mktemp)"
while read ROLE; do
ROLE_TITLE="$(echo "$ROLE" | sed 's/^.\///')"
if ! grep "$ROLE_TITLE" '{{if .CLI_ARGS}}{{.CLI_ARGS}}{{else}}main.yml{{end}}' > /dev/null; then
echo "$ROLE_TITLE" >> "$RESULTS"
fi
done< <(find ./roles -maxdepth 2 -mindepth 2)
if grep ".*" "$RESULTS"; then
.config/log info 'The roles missing from `{{if .CLI_ARGS}}{{.CLI_ARGS}}{{else}}main.yml{{end}}` are:'
cat "$RESULTS"
else
.config/log info 'All of the roles are included in `{{if .CLI_ARGS}}{{.CLI_ARGS}}{{else}}main.yml{{end}}`'
fi
remotes:
deps:
- :install:software:git
- :install:software:jq
summary: |
# Ensures each role is added as a remote and a sub-repo (if applicable)
This task cycles through all the roles in the `/roles` folder and ensures
they are added as remotes. This helps with managing the git trees used
for combining the many role repositories into the playbook mono-repository.
It also adds a remote for a private submodule that is intended to store
files that are not meant to be shared. The remote is named `private`.
run: once
log:
error: Failed to set remotes for one or more of the roles in the `/roles/*` folders
start: Adding git remotes for all of the roles in the `/roles/*` folders
success: Successfully added git remotes for all of the roles in the `/roles/*` folders
cmds:
- git init -q
- |
for ROLE_RELATIVE_PATH in roles/*/*; do
if [ -f "${ROLE_RELATIVE_PATH}/package.json" ]; then
ROLE_FOLDER="$(basename $ROLE_RELATIVE_PATH)"
ROLE_HTTPS_REPO="$(jq -r '.blueprint.repository.gitlab' $ROLE_RELATIVE_PATH/package.json)"
ROLE_SSH_REPO="$(echo $ROLE_HTTPS_REPO | sed 's/https:\/\/gitlab.com\//git@gitlab.com:/' | sed 's/$/.git/')"
if git config "remote.${ROLE_FOLDER}.url" > /dev/null; then
git remote set-url "$ROLE_FOLDER" "$ROLE_SSH_REPO"
else
git remote add "$ROLE_FOLDER" "$ROLE_SSH_REPO"
fi
if [ -d $ROLE_RELATIVE_PATH/.git ] && [ ! -f $ROLE_RELATIVE_PATH/.gitrepo ]; then
task ansible:playbook:subrepo:init -- $ROLE_RELATIVE_PATH
fi
else
.config/log warn "${ROLE_RELATIVE_PATH}/package.json is missing!"
fi
done
run:
cmds:
- task: run:{{if .CLI_ARGS}}cli{{else}}prompt{{end}}
run:cli:
deps:
- task: :install:python:requirements
env:
INSTALL_OPTIONS: --no-dev
- :symlink:playbook
vars:
PLAYBOOK_MAIN:
sh: |
if [ -n "$PLAYBOOK_MAIN" ]; then
echo "$PLAYBOOK_MAIN"
else
echo "main.yml"
fi
log:
error: Error encounted while running `ansible-playbook -i inventories/{{.CLI_ARGS}} --ask-vault-pass main.yml`
start: Running `ansible-playbook -i inventories/{{.CLI_ARGS}} --ask-vault-pass main.yml`
success: Successfully ran `ansible-playbook -i inventories/{{.CLI_ARGS}} --ask-vault-pass main.yml`
cmds:
- |
PATH="$PATH:$HOME/.local/bin"
if [ -z "$ANSIBLE_VAULT_PASSWORD" ]; then
.config/log info 'The `ANSIBLE_VAULT_PASSWORD` environment variable is not set so you will be prompted for the password'
ansible-playbook --skip-tags "mas" -i {{.CLI_ARGS}} --ask-vault-pass "{{.PLAYBOOK_MAIN}}"
else
echo "$ANSIBLE_VAULT_PASSWORD" > "$HOME/.VAULT_PASSWORD"
export ANSIBLE_VAULT_PASSWORD_FILE="$HOME/.VAULT_PASSWORD"
.config/log info 'Bypassing Ansible Vault password prompt since the `ANSIBLE_VAULT_PASSWORD` environment variable is set'
ansible-playbook --skip-tags "mas" -i {{.CLI_ARGS}} "{{.PLAYBOOK_MAIN}}"
fi
run:prompt:
summary: '{{.MARKDOWN}}'
vars:
MARKDOWN: |
# Run the Playbook
These set of prompts will run the `main.yml` playbook after you specify:
(1) The "environment"
The environment is a collection of folders that should, at the very minimum,
include "files", "group_vars", "host_vars", and "inventories". Each folder in
the "environments" folder constitutes a different environment. By using
environments, you can seperate different sets of variables/files or even seperate
your private variables out into a sub-module.
(2) An inventory file
The Ansible inventory stored in the "inventories" folder. This will generally be
a YML file with host connection information that also correlates the inventory with
the proper host_vars and group_vars. It is assumed that your sudo username and
password are encrypted inside the inventory (via "ansible-vault").
PLAYBOOK_DESCRIPTIONS: |
# Playbook Descriptions
Playbooks are where you store your main logic in Ansible. The `main.yml` file in the
root of the repository is a generic one-size-fits-all approach. You could theoretically
store all your logic in one playbook for multiple scenarios with conditional logic but
it might be easier to create seperate playbooks in some cases. The `main.yml` should work
in most scenarios and is a great starting point.
## Alternate Playbook Descriptions
Alternate playbooks are stored in the `playbooks/` directory. They are described below.
*If you want their description to show up in this message, you will have to edit the appropriate
field in `package.json`.*
env:
MARKDOWN: '{{.MARKDOWN}}'
cmds:
- |
MD_TMP="$(mktemp)"
echo "$MARKDOWN" > "$MD_TMP"
.config/log md "$MD_TMP"
- task: run:prompt:continue
run:prompt:continue:
interactive: true
deps:
- :install:software:jq
cmds:
- task: environment
- echo "\"inventories"$(find ./inventories/ -mindepth 1 -maxdepth 1 | sed 's/\.\/inventories\///' | jq -R '[.]' | jq -s -c -r 'add | join("\" \"inventories")')"\""
- |
INVENTORY_OPTIONS_LENGTH="$(find ./inventories/ -mindepth 1 -maxdepth 1 | sed 's/\.\/inventories\///' | jq -R '[.]' | jq -s -c -r 'add | length')"
if [[ "$INVENTORY_OPTIONS_LENGTH" == '0' ]]; then
.config/log error 'There are no inventory files present in the `inventories/` folder' && exit 1
else
INVENTORY_OPTIONS="$(echo "\""$(find ./inventories/ -mindepth 1 -maxdepth 1 | sed 's/\.\///' | jq -R '[.]' | jq -s -c -r 'add | join("\" \"")')"\"")"
.config/log prompt 'Which inventory would you like to use?'
CHOSEN_INVENTORY="$(.config/log choose $INVENTORY_OPTIONS)"
export PLAYBOOK_MAIN="main.yml"
if ! gum confirm "Would you like to use the main.yml playbook?"; then
PLAYBOOKS_OPTIONS="$(echo "\"playbooks"$(find ./playbooks/ -mindepth 1 -maxdepth 1 -name "*.yml" | sed 's/\.\/playbooks\///' | jq -R '[.]' | jq -s -c -r 'add | join("\" \"playbooks")')"\"")"
PLAYBOOK_DESCS_TMP="$(mktemp)" && echo "$PLAYBOOK_DESCRIPTIONS" > "$PLAYBOOK_DESCS_TMP"
TMP_LIST="$(mktemp)"
find ./playbooks/ -mindepth 1 -maxdepth 1 -name "*.yml" | sed 's/\.\/playbooks\///' | jq -R '[.]' | jq -s -c -r 'add' >> "$TMP_LIST"
echo "* **main.yml:** A generic playbook that attempts to install everything" >> "$PLAYBOOK_DESCS_TMP"
jq -c -r '.[]' "$TMP_LIST" | while read SLUG; do
PLAYBOOK_DESC="$(jq -r '.blueprint.fileDescriptions[$file]' package.json)"
if [ "$PLAYBOOK_DESC" != 'null' ]; then
echo "* **playbooks/${SLUG}.yml:** $PLAYBOOK_DESC" >> "$PLAYBOOK_DESCS_TMP"
fi
done
.config/log md "$PLAYBOOK_DESCS_TMP"
.config/log prompt 'Select the playbook you would like to provision with.'
export PLAYBOOK_MAIN="$(.config/log choose $PLAYBOOKS_OPTIONS)"
fi
task ansible:playbook:run:cli -- "$CHOSEN_INVENTORY"
fi
subrepo:init:
deps:
- :install:software:subrepo
summary: |
# Add Role as a Sub-Repository
Since roles should each have their own repository for Ansible Galaxy and to make it easier
for the community to download specific roles, they must be declared as sub-repositories.
Submodules could also be used but we use sub-repos instead because they are more flexible.
In the main playbook, if someone clones the playbook, the playbook and all the roles will download
without any requirement to initialize submodules. At the same time, each role can be in its own
repository. The playbook recognizes roles like this because they have a `.gitrepo` file that is only
saved in the playbook version of the role. Users can interact with the playbook and its role
repositories transparently without any need to understand what git subrepos are.
Managers of roles can update the role repositories without any need to understand what git subrepos
are. Managers of the playbook can use the tool [git-subrepo](https://github.com/ingydotnet/git-subrepo)
to perform various actions including pulling changes from individual role repositories and
other actions.
Usage:
`task ansible:playbook:subrepo:init -- path/to/folder/with.git/folder`
cmds:
- task: :git:commit:automated
- |
BASENAME="$(basename {{.CLI_ARGS}})"
REMOTE="$(git remote get-url $BASENAME)"
HUSKY=0 git subrepo init {{.CLI_ARGS}} -r "$REMOTE" -b master
types:
deps:
- :install:npm:quicktype
summary: |
# Generate Types from Vaulted Files
Automatically generate types from vaulted files.
**Example Usage:**
`task ansible:playbook:types -- ./environments/prod
vars:
TARGET_DIR: '{{if .CLI_ARGS}}{{.CLI_ARGS}}{{else}}./environments/prod{{end}}'
cmds:
- task: vault:decrypt
vars:
TARGET_DIR: '{{.TARGET_DIR}}'
- |
qtype() {
local FILE_NO_EXT="$(echo "$1" | sed 's/.yml$//')"
yq e -o=json '.' $1 | quicktype -l schema -o ${FILE_NO_EXT}.schema.json
}
while read FILE; do
qtype "$FILE"
done < <(find {{.TARGET_DIR}} -type f -name "*vault.yml")
- task: vault:encrypt
vars:
TARGET_DIR: '{{.TARGET_DIR}}'
vault:decrypt:
vars:
TARGET_DIR: '{{if .CLI_ARGS}}{{.CLI_ARGS}}{{else}}./environments/prod{{end}}'
cmds:
- find {{.TARGET_DIR}} -type f -name "*vault.yml" -printf "%h/\"%f\" " | xargs ansible-vault decrypt
vault:encrypt:
vars:
TARGET_DIR: '{{if .CLI_ARGS}}{{.CLI_ARGS}}{{else}}./environments/prod{{end}}'
cmds:
- find {{.TARGET_DIR}} -type f -name "*vault.yml" -printf "%h/\"%f\" " | xargs ansible-vault encrypt

View file

@ -0,0 +1,180 @@
---
version: '3'
vars:
META_PATH: meta/main.yml
REQUIREMENTS_PATH: requirements.yml
tasks:
collection:
deps:
- :install:software:yq
log:
error: Failed to auto-populate the `{{.KEY}}` collection
start: Determining if the `{{.KEY}}` collection should be added to the `{{.REQUIREMENTS_PATH}}`
cmds:
- |
COLLECTIONS="$(yq eval '.collections' '{{.REQUIREMENTS_PATH}}')"
REFERENCES="$(grep -Ril '{{.KEY}}' ./tasks || true)"
if [[ ! "$COLLECTIONS" =~ '{{.KEY}}' ]] && [ "$REFERENCES" ]; then
yq eval -i -P '.collections = .collections + {{.VAL}}' '{{.REQUIREMENTS_PATH}}'
.config/log success 'Automatically added `{{.VAL}}` to {{.REQUIREMENTS_PATH}}'
fi
- task: :fix:yaml:dashes
vars:
CLI_ARGS: '{{.REQUIREMENTS_PATH}}'
collection:force:
deps:
- :install:software:yq
log:
error: Failed to forcefully auto-populate the `{{.KEY}}` collection
start: Determining if the `{{.KEY}}` collection should be added to the `{{.REQUIREMENTS_PATH}}` (forcefully)
cmds:
- |
COLLECTIONS="$(yq eval '.collections' '{{.REQUIREMENTS_PATH}}')"
if [[ ! "$COLLECTIONS" =~ '{{.KEY}}' ]]; then
yq eval -i -P '.collections = .collections + {{.VAL}}' '{{.REQUIREMENTS_PATH}}'
.config/log success 'Automatically added `{{.VAL}}` to {{.REQUIREMENTS_PATH}}'
fi
- task: :fix:yaml:dashes
vars:
CLI_ARGS: '{{.REQUIREMENTS_PATH}}'
dependencies:
desc: 'Attempt to automatically populate `{{.META_PATH}}` and `{{.REQUIREMENTS_PATH}}`'
hide: '{{ne (print .REPOSITORY_TYPE "-" .REPOSITORY_SUBTYPE) "ansible-role"}}'
summary: |
# Automatically populate `{{.META_PATH}}` and `{{.REQUIREMENTS_PATH}}`
A role can sometimes have dependencies that need to be installed prior to being run (e.g. most
roles in Ansible >2.9 need the `community.general` collection installed). Roles also sometimes
need other roles to run before they are run (e.g. a task that installs a Node.js package needs
the Node.js installer to run first). This task will scan for common dependencies by doing a text
search for a handful of common strings. It will then attempt to automatically populate
`{{.META_PATH}}` and the `{{.REQUIREMENTS_PATH}}`.
Items it attempts to auto-populate for:
* chocolatey.chocolatey
* community.general
* community.general.homebrew
* community.general.gem
* community.general.npm
* community.general.snap
log:
start: Auto-populating the `{{.META_PATH}}` and `{{.REQUIREMENTS_PATH}}` with common dependencies (when appropriate)
success: Auto-populated the `{{.META_PATH}}` and `{{.REQUIREMENTS_PATH}}` with common dependencies
cmds:
- task: collection
vars:
KEY: chocolatey.chocolatey
VAL: >-
{"name": "chocolatey.chocolatey", "source": "https://galaxy.ansible.com"}
- task: collection
vars:
KEY: community.crypto
VAL: >-
{"name": "community.crypto", "source": "https://galaxy.ansible.com"}
- task: collection
vars:
KEY: community.general
VAL: >-
{"name": "community.general", "source": "https://galaxy.ansible.com"}
- task: collection:force
vars:
KEY: google.cloud
VAL: >-
{"name": "google.cloud", "source": "https://galaxy.ansible.com"}
- task: dependency
vars:
KEY: community.general.homebrew
ROLE: professormanhattan.homebrew
VAL: >-
{"role": "professormanhattan.homebrew", "when": "ansible_os_family == 'Darwin'"}
- task: dependency
vars:
KEY: community.general.npm
ROLE: professormanhattan.nodejs
VAL: >-
{"role": "professormanhattan.nodejs"}
- task: dependency
vars:
KEY: community.general.gem
ROLE: professormanhattan.ruby
VAL: >-
{"role": "professormanhattan.ruby"}
- task: dependency
vars:
KEY: community.general.snap
ROLE: professormanhattan.snapd
VAL: >-
{"role": "professormanhattan.snapd", "when": "ansible_system == 'Linux'"}
- task: dependency
vars:
KEY: professormanhattan.startmenu
ROLE: professormanhattan.startmenu
VAL: >-
{"role": "professormanhattan.startmenu", "when": "ansible_system == 'Windows'"}
- task: :ansible:sync:requirements
sources:
- '{{.META_PATH}}'
- '{{.REQUIREMENTS_PATH}}'
- tasks/**/*.yml
dependency:
deps:
- :install:software:yq
log:
error: Failed to auto-populate the `{{.KEY}}` role
start: Determining if the `{{.KEY}}` role should be added to the `{{.META_PATH}}`
cmds:
- |
DEPENDENCIES="$(yq eval '.dependencies' '{{.META_PATH}}')"
REFERENCES="$(grep -Ril '{{.KEY}}' ./tasks || true)"
if [[ ! "$DEPENDENCIES" =~ '{{.ROLE}}' ]] && [ "$REFERENCES" ]; then
if [[ '{{.GALAXY_NAMESPACE}}.{{.GALAXY_ROLE_NAME}}' != '{{.ROLE}}' ]]; then
yq eval -i -P '.dependencies = .dependencies + {{.VAL}}' '{{.META_PATH}}'
.config/log success 'Automatically added `{{.VAL}}` to {{.META_PATH}}'
fi
fi
- task: :fix:yaml:dashes
vars:
CLI_ARGS: '{{ .META_PATH }}'
status:
- '[[ "$DEPENDENCIES" =~ "{{.ROLE}}" ]] || [ ! "$REFERENCES" ]'
meta:
deps:
- :install:npm:prettier
- :install:software:jq
- :install:software:yq
vars:
DESCRIPTION:
sh: yq eval '.galaxy_info.description' '{{.META_PATH}}'
REFERENCE_LINK: Take a look at an [example meta/main.yml
file](https://gitlab.com/megabyte-labs/ansible-roles/androidstudio/-/blob/master/{{.META_PATH}}).
log:
error: Failed to synchronize `package.json` with `{{.META_PATH}}`
start: Updating `package.json` blueprint description and slug using values present in `{{.META_PATH}}`
success: Ensured `package.json` is synchronized with `{{.META_PATH}}`
cmds:
- |
TMP="$(mktemp)"
cat package.json
jq --arg a '{{.DESCRIPTION}}' --arg b '{{.GALAXY_ROLE_NAME}}' '.blueprint.description = $a | .blueprint.slug = $b' package.json > "$TMP"
mv "$TMP" package.json
- prettier --write package.json
sources:
- meta/main.yml
- package.json
preconditions:
- sh: 'test -f {{.META_PATH}}'
msg: 'The `{{.META_PATH}}` file is missing. A properly filled out `{{.META_PATH}}` file is required for the
update process. {{.REFERENCE_LINK}}'
- sh: '[[ "{{.DESCRIPTION}}" != "null" ]]'
msg: 'The `{{.META_PATH}}` file has a null value for the `galaxy_info.description` key. Ensure the description
is populated in `{{.META_PATH}}`. {{.REFERENCE_LINK}}'
- sh: '[[ "{{.GALAXY_ROLE_NAME}}" != "null" ]]'
msg: 'The `{{.META_PATH}}` file has a null value for the `galaxy_info.role_name` key. Ensure the role name is
populated in `{{.META_PATH}}`. {{.REFERENCE_LINK}}'

View file

@ -0,0 +1,985 @@
---
version: '3'
vars:
MOLECULE_LOGS_PATH: molecule/.results/logs
MOLECULE_TEST_OPTIONS: ANSIBLE_STDOUT_CALLBACK=community.general.yaml
ANSIBLE_CALLBACKS_ENABLED="junit, ansible.posix.profile_tasks, ansible.posix.timer"
JUNIT_OUTPUT_DIR="molecule/.results/junit" JUNIT_FAIL_ON_CHANGE=true JUNIT_HIDE_TASK_ARGUMENTS=true
PYTHON_MOLECULE_HANDLE:
sh: |
if [ -n "$CI" ]; then
echo ''
else
if command -v unbuffer > /dev/null; then
echo 'unbuffer {{.PYTHON_HANDLE}}'
else
echo '{{.PYTHON_HANDLE}}'
fi
fi
tasks:
allure:report:
deps:
- :install:software:allure
log:
error: Failed to generate and/or open the unit test report
start: Generating and opening unit test report
success: Successfully generated and opened unit test report
cmds:
- allure generate molecule/.results/junit --output allure-reports --clean
- mkdir -p molecule/.results/junit
- cp -rf allure-reports/history/ molecule/.results/junit/
- .config/log info 'Opening JUnit results with Allure in the default browser'
- allure open allure-reports
ansifilter:
deps:
- :install:software:ansifilter
cmds:
- TMP="$(mktemp)" && cat {{.RESULTS_FILE}} | ansifilter > "$TMP" && mv "$TMP" {{.RESULTS_FILE}}
status:
- '[ -n "$CI" ]'
default:
log:
start: Running default Ansible test
cmds:
- task: molecule:docker:matrix
local:
desc: Run the Ansible play on the local machine (or via WSL - see task summary)
hide: '{{ne (print .REPOSITORY_TYPE "-" .REPOSITORY_SUBTYPE) "ansible-playbook"}}'
summary: |
# Run the Ansible play on the local machine
This task will use the inventory stored in `test/<OS>/inventory`, the playbook
file stored in `test/<OS>/test.yml`, and the Ansible configuration file stored in
`test/<OS>/ansible.cfg` to run the play. At the beginning of the play, you will
be prompted for the sudo password.
cmds:
- task: local:test
local:test:
cmds:
- |
if [ -n "$CI" ]; then
.config/log info '`$CI` environment variable is present'
task ansible:test:local:test:ci
else
task ansible:test:local:test:local
fi
local:test:ci:
deps:
- :symlink:{{.REPOSITORY_SUBTYPE}}
cmds:
- |
if [ -n "$WINDOWS_ANSIBLE_TEST" ]; then
pip3 install ansible 'pywinrm[credssp]'
else
pip3 install ansible
fi
- |
PATH="$PATH:$HOME/.local/bin"
ansible-galaxy install -r requirements.yml
- task: local:test:logic
local:test:local:
deps:
- :symlink:{{.REPOSITORY_SUBTYPE}}
- task: :install:python:requirements
env:
INSTALL_OPTIONS: --no-dev
cmds:
- task: local:test:logic
local:test:logic:
vars:
ANSIBLE_CFG: |-
[winrm_connection]
scheme = https
server_cert_validation = ignore
transport = credssp,ssl
ROLE_NAME: '{{.GALAXY_NAMESPACE}}.{{.GALAXY_ROLE_NAME}}'
SUDO_PASS_PARAM:
sh: if [ -z "$CI" ]; then echo ' --ask-sudo-pass'; else echo ''; fi
WINDOWS_INVENTORY: windows-ci ansible_host=localhost ansible_user=runneradmin
ansible_password=AnsibleTest999 ansible_connection=winrm ansible_winrm_server_cert_validation=ignore
ansible_winrm_transport=credssp
log:
error: Encountered error while testing the Ansible playbook locally
start: Testing the Ansible playbook locally
success: Successfully tested the Ansible playbook locally
cmds:
- |
if [ -n "$WINDOWS_ANSIBLE_TEST" ]; then
echo '{{.WINDOWS_INVENTORY}}' > inventory
echo '{{.ANSIBLE_CFG}}' > ansible.cfg
else
echo 'localhost ansible_connection=local' > inventory
fi
- task: local:test:playbook
vars:
PLAY_HOSTS:
sh: if [ -z "$WINDOWS_ANSIBLE_TEST" ]; then echo 'localhost'; else echo 'windows-ci'; fi
PLAY_ROLE_NAME: '{{.ROLE_NAME}}'
- |
if [ -z "$CI" ]; then
.config/log info 'Prompting for sudo password (required for non-CI environments)'
fi
if [ -n "$WINDOWS_ANSIBLE_TEST" ]; then
export ANSIBLE_CONFIG="$PWD/ansible.cfg"
fi
PATH="$PATH:$HOME/.local/bin"
{{.PYTHON_MOLECULE_HANDLE}} ansible-playbook --skip-tags "mas" -i inventory{{.SUDO_PASS_PARAM}} test/{{OS}}/test.yml 2>&1 | tee debug.log || EXIT_CODE=$?
- task: post:molecule:log
vars:
RESULTS_FILE: debug.log
local:test:playbook:
vars:
TEST_PLAY: |
---
- hosts: {{.PLAY_HOSTS}}
roles:
- role: '{{.PLAY_ROLE_NAME}}'
cmds:
- echo '{{.TEST_PLAY}}' > test.yml
molecule:ci:requirements:
cmds:
- |
if [ -n "$WINDOWS_ANSIBLE_TEST" ]; then
.config/log info 'Running `pip3 install ansible ansibler molecule pywinrm[credssp]`'
pip3 install ansible ansibler molecule 'pywinrm[credssp]'
else
.config/log info 'Running `pip3 install ansible ansibler molecule`'
pip3 install ansible ansibler molecule
fi
- |
.config/log info 'Running `ansible-galaxy install --ignore-errors -r requirements.yml`'
PATH="$PATH:$HOME/.local/bin"
ansible-galaxy install --ignore-errors -r requirements.yml
molecule:dependencies:
run: once
cmds:
- |
if [ -n "$CI" ]; then
.config/log info '`$CI` environment is present'
task ansible:test:molecule:dependencies:ci
else
task ansible:test:molecule:dependencies:local
fi
molecule:dependencies:ci:
cmds:
- task: molecule:ci:requirements
status:
- '[ -z "$CI" ]'
molecule:dependencies:local:
deps:
- :install:python:requirements
- :install:software:expect
- :install:software:sshpass
log:
error: Encountered error while installing Ansible Galaxy requirements defined in `requirements.yml`
start: Installing Ansible Galaxy requirements defined in `requirements.yml`
success: Installed Ansible Galaxy requirements defined in `requirements.yml`
cmds:
- |
PATH="$PATH:$HOME/.local/bin"
if poetry &> /dev/null; then
{{.PYTHON_HANDLE}} ansible-galaxy install --ignore-errors -r requirements.yml
else
.config/log info 'Current shell is already a Poetry virtual environment'
ansible-galaxy install --ignore-errors -r requirements.yml
fi
- task: :symlink:{{.REPOSITORY_SUBTYPE}}
status:
- '[ -n "$CI" ]'
molecule:docker:
desc: Runs a Docker Molecule test
hide: '{{ne .REPOSITORY_TYPE "ansible"}}'
summary: |
# Runs a Docker Molecule test
This task runs the project's Molecule tests using Docker. It only tests against
Linux systems.
**Opens a prompt:**
`task ansible:test:molecule:docker`
**Runs the test against the "CentOS-8" group directly:**
`task ansible:test:molecule:docker -- CentOS-8`
**Save test results for use with auto-generating compatibility chart:**
`task ansible:test:molecule:docker:matrix`
cmds:
- |
if ! docker run --rm hello-world; then
.config/log warn 'The command `docker run --rm hello-world` failed'
if [ -f '/Applications/Docker.app' ]; then
.config/log info 'Attempting to open `Applications/Docker.app` (Docker Desktop for macOS)'
open /Applications/Docker.app
sleep 30
fi
fi
- task: molecule:docker:{{if .CLI_ARGS}}cli{{else}}prompt{{end}}
molecule:docker:cli:
deps:
- molecule:dependencies
- :install:software:docker
log:
error: The `{{.CLI_ARGS}}` Docker Molecule test finished with errors
start: Running Docker Molecule test on containers in the `{{.CLI_ARGS}}` group
success: Successfully ran the `{{.CLI_ARGS}}` Docker Molecule test
cmds:
- |
set -o pipefail
{{.MOLECULE_TEST_OPTIONS}} MOLECULE_GROUP="{{.CLI_ARGS}}" {{.PYTHON_MOLECULE_HANDLE}}molecule test -s docker \
-- --skip-tags skipdockertest 2>&1 | tee debug.log || EXIT_CODE=$?
if [ -n "$EXIT_CODE" ]; then
fi
- task: post:molecule:log
vars:
RESULTS_FILE: debug.log
molecule:docker:matrix:
deps:
- molecule:dependencies
- :install:software:docker
vars:
MOLECULE_DATE:
sh: date '+%Y-%m-%d'
log:
error: There were errors while running the test (results were logged to `{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-$SCENARIO.txt`)
start: Running Docker Molecule test with results teed to `{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-$SCENARIO.txt`
success: Finished running the test (results were logged to `{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-$SCENARIO.txt`)
cmds:
- mkdir -p {{.MOLECULE_LOGS_PATH}}
- |
SCENARIO="Linux"
if grep -Ril 'community.general.snap:' ./tasks; then
SCENARIO="Snap"
.config/log warn 'Running Docker Molecule tests on the Docker containers that are compatible with `snap` since the role has references to `snap`'
fi
set -o pipefail
{{.MOLECULE_TEST_OPTIONS}} MOLECULE_GROUP="$SCENARIO" {{.PYTHON_MOLECULE_HANDLE}}molecule test -s docker -- --skip-tags skipdockertest 2>&1 | \
tee '{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-$SCENARIO.txt' || EXIT_CODE=$?
if [ -n "$EXIT_CODE" ]; then
fi
- task: post:molecule:log
vars:
RESULTS_FILE: '{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-$SCENARIO.txt'
molecule:docker:prompt:
env:
MARKDOWN: |
# Ansible Molecule Test via Docker
Choose a container group from the options below to begin the Molecule test.
The choices should be mostly self-explanatory. The `Snap` group is a special group
that should be used to test roles that contain `snap` logic. Only recent versions of
Debian and Ubuntu support snap installations inside a Docker container. Docker tests
are a quick way to test Ansible plays without consuming a large amount of system
resources. Granted, to fully test an Ansible play, a VirtualBox method should be used
instead.
cmds:
- TMP="$(mktemp)" && echo "$MARKDOWN" > "$TMP" && .config/log md "$TMP"
- task: molecule:docker:prompt:continue
molecule:docker:prompt:continue:
interactive: true
deps:
- :install:software:gum
- :install:software:jq
- :install:software:yq
cmds:
- |
DOCKER_OPTIONS="$(yq eval -o=j '.groups' molecule/docker/molecule.yml | jq -r 'keys | join(" ")')"
DOCKER_OPTIONS_LENGTH="$(yq eval -o=j '.groups' molecule/docker/molecule.yml | jq -r 'keys | length')"
if [[ "$DOCKER_OPTIONS_LENGTH" == '0' ]]; then
.config/log error 'There are no Molecule groups defined in `molecule/desktop/molecule.yml`' && exit 1
else
.config/log info 'Press SPACE to select an item and ENTER when you are done selecting test environments'
.config/log prompt 'Which environment(s) would you like to run the test on?'
CHOSEN_OPTIONS="$(gum choose --no-limit $(echo "$DOCKER_OPTIONS"))"
COMBINED_OPTIONS=""
CHOSEN_COUNT="$(echo "$CHOSEN_OPTIONS" | wc -l)"
if [[ "$CHOSEN_COUNT" == '0' ]]; then
.config/log error 'No items were selected!' && exit 1
else
while read CURRENT_OPTION; do
COMBINED_OPTIONS="${COMBINED_OPTIONS}:${CURRENT_OPTION}"
done< <(echo "$CHOSEN_OPTIONS")
CHOSEN_OPTION="${COMBINED_OPTIONS:1}"
export ANSIBLE_ENABLE_TASK_DEBUGGER=true
.config/log info 'Running `task ansible:test:molecule:docker:cli -- '"$CHOSEN_OPTION"'`'
task ansible:test:molecule:docker:cli -- "$CHOSEN_OPTION"
fi
fi
- task: allure:report
preconditions:
- sh: test -f molecule/docker/molecule.yml
msg: The `molecule/docker/molecule.yml` file must be present and in the proper format
molecule:gcp:
deps:
- molecule:dependencies
- :install:software:gcloud
log:
error: Encountered error(s) while running the Google Cloud Platform Molecule test
start: Running Google Cloud Platform Molecule test
success: Finished running Google Cloud Platform Molecule test
cmds:
- task: molecule:gcp:preconditions
- |
set -o pipefail
.config/log 'Results will be available in the `debug.log` file'
{{.PYTHON_MOLECULE_HANDLE}}molecule test -s gcp 2>&1 | tee debug.log || EXIT_CODE=$?
if [ -n "$EXIT_CODE" ]; then
fi
- task: post:molecule:log
vars:
RESULTS_FILE: debug.log
molecule:gcp:matrix:
deps:
- molecule:dependencies
- :install:software:gcloud
- :install:software:yq
vars:
MOLECULE_DATE:
sh: date '+%Y-%m-%d'
log:
error: An error occurred while running the Google Cloud Platform Molecule test sequence
start: Running Docker Molecule test with results teed to `{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-$SCENARIO.txt`
success: Finished running and formatting the results of the Google Cloud Platform molecule test
cmds:
- task: molecule:gcp:preconditions
- mkdir -p {{.MOLECULE_LOGS_PATH}}
- |
set -o pipefail
{{.PYTHON_MOLECULE_HANDLE}}molecule test -s gcp 2>&1 | tee "{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-gcp.txt" || EXIT_CODE=$?
if [ -n "$EXIT_CODE" ]; then
else
.config/log success 'Finished running the test (results were logged to `{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-gcp.txt`)'
fi
- task: post:molecule:log
vars:
RESULTS_FILE: '{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-gcp.txt'
- |
RESULTS="{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-gcp.txt"
PLATFORM_LENGTH="$(yq e '.platforms | length' molecule/gcp/molecule.yml)"
INDEX=0
while [ $INDEX -lt $PLATFORM_LENGTH ]; do
NAME="$(yq e '.platforms['$INDEX'].name' molecule/gcp/molecule.yml)"
ALIAS="$(yq e '.platforms['$INDEX'].alias' molecule/gcp/molecule.yml)"
sed -i -- 's/'"$NAME"'/'"$ALIAS"'/g' "$RESULTS"
INDEX=$((INDEX+1))
done
molecule:gcp:preconditions:
preconditions:
- sh: '[ -n "$GCE_SERVICE_ACCOUNT_EMAIL" ]'
msg: The GCE_SERVICE_ACCOUNT_EMAIL environment variable must be set (e.g. export
GCE_SERVICE_ACCOUNT_EMAIL=molecule@megabyte-labs.iam.gserviceaccount.com).
- sh: '[ -n "$GCE_CREDENTIALS_FILE" ]'
msg: The GCE_CREDENTIALS_FILE environment variable must be set and pointing to the GCP
service account JSON key (e.g. export GCE_CREDENTIALS_FILE=~/.config/gcp.json).
- sh: test -f "$GCE_CREDENTIALS_FILE"
msg: The GCE_CREDENTIALS_FILE environment variable is defined but is not pointing to a file that exists.
- sh: '[ -n "$GCE_PROJECT_ID" ]'
msg: The GCE_PROJECT_ID environment variable must be set (e.g. export GCE_PROJECT_ID=megabyte-labs)
molecule:local:
desc: Runs a Molecule test on the localhost
hide: '{{ne .REPOSITORY_TYPE "ansible"}}'
summary: |
# Run a local Molecule test
This option is the same as running the play on the localhost with the added
benefit of incorporating Molecule's test for idempotency and other tests.
**Opens a prompt:**
`task ansible:test:local`
cmds:
- task: molecule:local:{{if .CLI_ARGS}}test{{else}}prompt{{end}}
molecule:local:prompt:
env:
MARKDOWN: |
# Run Molecule Locally
This testing option is provided for cases where you would like to locally test the
Ansible play with Molecule. This option assumes that the current user has sudo
privileges.
## Sudo Password
A sudo password is required for all roles because Molecule has a step where it
ensures Python is installed with `become: true`. The sudo password could potentially
be logged in clear text if logging is in verbose mode so be careful when using this
method.
## Running Locally Without Molecule
If you only want to install the play (without leveraging Molecule's features
like testing for idempotency and running test cases), then a more secure method would
be to run "ansible localhost --ask-sudo-pass -m include_role -a name=<role_name>" after
installing the role and its dependencies with ansible-galaxy.
cmds:
- TMP="$(mktemp)" && echo "$MARKDOWN" > "$TMP" && .config/log md "$TMP"
- task: molecule:local:prompt:continue
molecule:local:prompt:continue:
deps:
- :install:software:gum
interactive: true
cmds:
- .config/log prompt 'What is the sudo password for the current user?'
- |
SUDO_PASS="$(.config/log password 'Enter sudo password for local machine..')"
export ANSIBLE_ENABLE_TASK_DEBUGGER=true
export TEST_PASSWORD="$SUDO_PASS"
task ansible:test:molecule:local:test
- task: allure:report
molecule:local:test:
deps:
- molecule:dependencies
vars:
MOLECULE_DATE:
sh: date '+%Y-%m-%d'
log:
error: There was an error while running the Molecule test locally
start: Running the Molecule test locally
success: The local Molecule test was successfully run
cmds:
- |
set -o pipefail
if [ -z "$CI" ]; then
export PATH="$(poetry env info | grep 'Python: /' | sed 's/Python: //' | sed 's/$/\/bin/'):$PATH"
fi
{{.MOLECULE_TEST_OPTIONS}} {{.PYTHON_MOLECULE_HANDLE}}molecule test -s local 2>&1 | \
tee '{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-default.txt' || EXIT_CODE=$?
if [ -n "$EXIT_CODE" ]; then
fi
- task: post:molecule:log
vars:
RESULTS_FILE: '{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-default.txt'
molecule:ssh:
desc: Runs a Molecule test over SSH
hide: '{{ne .REPOSITORY_TYPE "ansible"}}'
summary: |
# Run an SSH Molecule test
This option allows you to run the Molecule test against a single
SSH host.
**Opens a prompt:**
`task ansible:test:molecule:ssh`
cmds:
- task: molecule:ssh:{{if .CLI_ARGS}}cli{{else}}prompt{{end}}
molecule:ssh:cli:
deps:
- molecule:dependencies
log:
error: Errors encountered while running the SSH Molecule test
start: Running the Molecule test over SSH
success: Successfully ran the Molecule test over SSH
cmds:
- |
set -o pipefail
{{.MOLECULE_TEST_OPTIONS}} {{.PYTHON_MOLECULE_HANDLE}}molecule test -s remote 2>&1 | tee debug.log || EXIT_CODE=$?
if [ -n "$EXIT_CODE" ]; then
fi
- task: post:molecule:log
vars:
RESULTS_FILE: debug.log
molecule:ssh:prompt:
env:
MARKDOWN: |
# Remote Ansible Molecule Test via SSH
This testing option is provided for cases where you would like to remotely test
the Ansible play on remote machines via SSH. The prompts will ask you for the
host IP address or FQDN, user, and password. Before running this test, you should
ensure that you can already connect to the machine via SSH (i.e. the ~/.ssh keys
should already be set up). This test assumes that SSH does not require any passwords
to establish the connection.
cmds:
- TMP="$(mktemp)" && echo "$MARKDOWN" > "$TMP" && .config/log md "$TMP"
- task: molecule:ssh:prompt:continue
molecule:ssh:prompt:continue:
deps:
- :install:software:gum
interactive: true
cmds:
- |
.config/log prompt 'What is the IP address or the FQDN of the target host?'
IP_ANSWER="$(.config/log input 'Enter IP address or FQDN..')"
if [[ "$IP_ANSWER" =~ ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$ ]] || \
[[ "$IP_ANSWER" =~ ^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$ ]]; then
.config/log prompt 'What port should the SSH connection use?'
PORT_ANSWER="$(gum input --placeholder='Enter SSH port..' --value='22')"
if [[ "$PORT_ANSWER" =~ ^[0-9]*$ ]]; then
.config/log prompt 'What is the username of a user that has both sudo and SSH privileges?'
USER_ANSWER="$(.config/log input 'SSH username..')"
if [[ "$USER_ANSWER" ^[a-z_]([a-z0-9_-]{0,31}|[a-z0-9_-]{0,30}\$)$ ]]; then
export ANSIBLE_ENABLE_TASK_DEBUGGER=true
export TEST_HOST="$IP_ANSWER"
export TEST_PORT="$PORT_ANSWER"
export TEST_SSH_USER="$USER_ANSWER"
export TEST_USER="$USER_ANSWER"
if [[ "$USER_ANSWER" != 'root' ]]; then
.config/log prompt 'What is the user's sudo password?'
SUDO_PASS_ANSWER="$(.config/log password 'Enter sudo password..')"
export TEST_PASSWORD="$SUDO_PASS_ANSWER"
export TEST_BECOME_PASSWORD="$SUDO_PASS_ANSWER"
fi
task ansible:test:molecule:ssh:cli
else
.config/log error 'Username is invalid!' && exit 1
else
.config/log error 'That SSH port is not valid!' && exit 1
else
.config/log error 'An invalid IP address / FQDN was entered.' && exit 1
fi
molecule:virtualbox:
desc: Runs a full E2E Molecule test for all supported operating systems
hide: '{{ne .REPOSITORY_TYPE "ansible"}}'
summary: |
# Run a full E2E Molecule test for all supported operating systems
This task uses VirtualBox to run tests for all of our supported operating
systems in parallel. It is very RAM intensive so, if you want to run this,
your computer should have _at least 32GB of RAM_.
**Opens a prompt:**
`task ansible:test:molecule:virtualbox`
**Generate the compatibility matrix used in the README.md:**
`task ansible:test:molecule:virtualbox:matrix`
cmds:
- task: molecule:virtualbox:{{if .CLI_ARGS}}cli{{else}}prompt{{end}}
# yamllint disable rule:truthy
molecule:virtualbox:cli:
deps:
- molecule:dependencies
- :install:software:vagrant
- :install:software:virtualbox
env:
OBJC_DISABLE_INITIALIZE_FORK_SAFETY: YES
log:
error: Errors encountered while running the `{{.CLI_ARGS}}` VirtualBox Molecule test
start: Running a VirtualBox Molecule test on platforms in the `{{.CLI_ARGS}}` group
success: Finished running the `{{.CLI_ARGS}}` VirtualBox Molecule test
cmds:
- |
set -o pipefail
{{.MOLECULE_TEST_OPTIONS}} MOLECULE_GROUP="{{.CLI_ARGS}}" {{.PYTHON_MOLECULE_HANDLE}}molecule test 2>&1 | tee debug.log || EXIT_CODE=$?
if [ -n "$EXIT_CODE" ]; then
fi
- task: post:molecule:log
vars:
RESULTS_FILE: debug.log
molecule:virtualbox:converge:
desc: Provisions a desktop VirtualBox VM and then runs a Molecule test
hide: '{{ne .REPOSITORY_TYPE "ansible"}}'
summary: |
# Provision a desktop VirtualBox VM and then run a Molecule test
This task opens a VM with an operating system of your choosing and then tests
the project's play against it. It then leaves the VM open for inspection.
**Example with interactive prompt for VM type:**
`task test:molecule`
**Example usage bypassing prompt:**
`task test:molecule -- ArchLinux`
## Available scenarios:
* ArchLinux
* CentOS
* Debian
* Fedora
* macOS
* Ubuntu
* Windows
cmds:
- task: molecule:virtualbox:converge:{{if .CLI_ARGS}}cli{{else}}prompt{{end}}
molecule:virtualbox:converge:cli:
deps:
- molecule:dependencies
- :install:software:vagrant
- :install:software:virtualbox
env:
OBJC_DISABLE_INITIALIZE_FORK_SAFETY: YES
log:
error: Errors were encountered while running the `{{.CLI_ARGS}}` VirtualBox Molecule converge play
start: Running the `{{.CLI_ARGS}}` VirtualBox Molecule converge play (this will leave the VirtualBox instance open for inspection)
success: Finished running the `{{.CLI_ARGS}}` VirtualBox Molecule converge play (you are encouraged to inspect the VM)
cmds:
- |
set -o pipefail
{{.MOLECULE_TEST_OPTIONS}} MOLECULE_GROUP={{.CLI_ARGS}} {{.PYTHON_MOLECULE_HANDLE}}molecule converge -s desktop 2>&1 | tee debug.log || EXIT_CODE=$?
if [ -n "$EXIT_CODE" ]; then
fi
- task: post:molecule:log
vars:
RESULTS_FILE: debug.log
molecule:virtualbox:converge:prompt:
env:
MARKDOWN: |
# Desktop Ansible Molecule Test via VirtualBox
Choose a desktop environment below to run the Ansible play on.
After choosing, a VirtualBox VM will be created. Then, the Ansible play will run on the VM.
After it is done, the VM will be left open for inspection. Please do get carried away
ensuring everything is working as expected and looking for configuration optimizations that
can be made. The operating systems should all be the latest stable release but might
not always be the latest version.
cmds:
- TMP="$(mktemp)" && echo "$MARKDOWN" > "$TMP" && .config/log md "$TMP"
- task: molecule:virtualbox:converge:prompt:continue
molecule:virtualbox:converge:prompt:continue:
interactive: true
deps:
- :install:software:gum
- :install:software:jq
- :install:software:yq
vars:
VIRTUALBOX_OPTIONS:
sh: echo "\"$(yq eval -o=j '.groups' molecule/desktop/molecule.yml | jq -r 'keys | join("\" \"")')\""
VIRTUALBOX_OPTIONS_LENGTH:
sh: yq eval -o=j '.groups' molecule/desktop/molecule.yml | jq -r 'keys | length'
cmds:
- |
if [[ '{{.VIRTUALBOX_OPTIONS_LENGTH}}' == '0' ]]; then
.config/log error 'There are no Molecule groups defined in `molecule/desktop/molecule.yml`' && exit 1
else
.config/log prompt 'Which desktop operating system would you like to test the Ansible play against?'
CHOSEN_OPTION="$(.config/log choose {{.VIRTUALBOX_OPTIONS}})"
export ANSIBLE_ENABLE_TASK_DEBUGGER=true
task ansible:test:molecule:virtualbox:converge:cli -- "$CHOSEN_OPTION"
fi
- task: allure:report
preconditions:
- sh: test -f molecule/desktop/molecule.yml
msg: The `molecule/desktop/molecule.yml` file must be present and in the proper format
molecule:virtualbox:matrix:
deps:
- molecule:dependencies
- :install:software:vagrant
- :install:software:virtualbox
vars:
MOLECULE_DATE:
sh: date '+%Y-%m-%d'
log:
error: Errors were encountered while running the full E2E test (see `{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-default.txt` for details)
start: Running a full E2E test with VirtualBox (results will be saved to `{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-default.txt`)
success: Finished running the full E2E test (results are in `{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-default.txt`)
cmds:
- mkdir -p {{.MOLECULE_LOGS_PATH}}
- |
set -o pipefail
{{.MOLECULE_TEST_OPTIONS}} {{.PYTHON_MOLECULE_HANDLE}}molecule test 2>&1 | \
tee '{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-default.txt' || EXIT_CODE=$?
if [ "$EXIT_CODE" ]; then
fi
- task: post:molecule:log
vars:
RESULTS_FILE: '{{.MOLECULE_LOGS_PATH}}/{{.MOLECULE_DATE}}-default.txt'
# yamllint enable rule:truthy
molecule:virtualbox:prompt:
env:
MARKDOWN: |
# Ansible Molecule Test via Headless VirtualBox Instances
This particular type of test is the best method for testing Ansible plays. It
uses VirtualBox and utilizes headless images. Despite that, running the test across
all the supported operating systems is RAM intensive. Ideally, you should have at
least 16GB of RAM to run all the tests at once. This type of test is used to generate
the compatibility chart so the results of this type of test have the final say.
You do not need to run the tests on all instances at once. Use the prompt below to
narrow your test scope.
cmds:
- TMP="$(mktemp)" && echo "$MARKDOWN" > "$TMP" && .config/log md "$TMP"
- task: molecule:virtualbox:prompt:continue
molecule:virtualbox:prompt:continue:
interactive: true
deps:
- :install:software:gum
- :install:software:jq
- :install:software:yq
vars:
VIRTUALBOX_OPTIONS:
sh: echo "\"$(yq eval -o=j '.groups' molecule/desktop/molecule.yml | jq -r 'keys | join("\" \"")')\""
VIRTUALBOX_OPTIONS_LENGTH:
sh: yq eval -o=j '.groups' molecule/desktop/molecule.yml | jq -r 'keys | length'
cmds:
- |
if [[ '{{.VIRTUALBOX_OPTIONS_LENGTH}}' == '0' ]]; then
.config/log error 'There are no Molecule groups defined in `molecule/desktop/molecule.yml`' && exit 1
else
.config/log prompt 'What environment(s) would you like to target with this test?'
CHOSEN_OPTIONS="$(gum choose --no-limit {{.VIRTUALBOX_OPTIONS}})"
COMBINED_OPTIONS=""
CHOSEN_COUNT="$(echo "$CHOSEN_OPTIONS" | wc -l)"
if [[ "$CHOSEN_COUNT" == '0' ]]; then
.config/log error 'No items were selected!' && exit 1
else
while read CURRENT_OPTION; do
COMBINED_OPTIONS="${COMBINED_OPTIONS}:${CURRENT_OPTION}"
done< <(echo "$CHOSEN_OPTIONS")
CHOSEN_OPTION="${COMBINED_OPTIONS:1}"
export ANSIBLE_ENABLE_TASK_DEBUGGER=true
task ansible:test:molecule:virtualbox:cli -- "$CHOSEN_OPTION"
fi
fi
- task: allure:report
preconditions:
- sh: test -f molecule/default/molecule.yml
msg: The `molecule/default/molecule.yml` file must be present and in the proper format
post:molecule:log:
deps:
- :ci:commit:config
cmds:
- task: ansifilter
vars:
RESULTS_FILE: '{{.RESULTS_FILE}}'
- task: post:molecule:log:commit:ci
- cmd: |
git pull --ff-only origin master
task --list > /dev/null || (echo "ERROR: Invalid Taskfiles!" && exit 1)
git add '{{.RESULTS_FILE}}'
HUSKY=0 git commit -o ci.skip -m "📝 docs(molecule): New Molecule test results added." -n
git push origin master
ignore_error: true
status:
- '[ "{{.RESULTS_FILE}}" == "debug.log" ] || [ ! -d .git ]'
post:molecule:log:commit:ci:
cmds:
- task: :ci:commit:config
status:
- '[ -z "$GITLAB_CI" ]'
prompt:
env:
MARKDOWN: |
# Molecule Test
There are currently six different options for running Molecule tests.
## 1. VirtualBox Headless
Runs tests using VirtualBox headless VMs. It is the test type used to generate
the compatibility chart.
## 2. VirtualBox Desktop
Runs tests using a VirtualBox desktop version VM. Use this type of test to run
the Ansible play and then open the VirtualBox VM to smoke test the software.
## 3. Docker
Utilizes Docker to test the Ansible play. It has some limitations such as not being
able to test snap installations on all operating systems. It also can only run tests
on Linux environments. This is, however, the fastest way to test roles and requires
the least amount of RAM.
## 4. Local
Runs the Ansible play on the local machine. Use this to run the Ansible play on your
local machine. You might use this if you want to inspect the software after running
the play.
## 5. SSH
Runs the Ansible play on a remote machine after connecting via SSH. This requires
that you already have the SSH credentials configured (i.e. ~/.ssh is setup).
## 6. Google Cloud Platform
Provisions Google Cloud Platform instances and tests the Ansible play on them. This
test requires that you have access to a GCP account and that the proper credentials
are in place. For help, see
[this guide](https://github.com/ProfessorManhattan/molecule-ansible-google-cloud/blob/master/README.md).
Without the environment variables mentioned in the guide set, this task will fail.
## Note on Debugging
All of the tests below (except GCP) enable the built-in Ansible debugger. If a task
fails, the STDOUT will freeze and you will be able to enter a few different commands.
For example, if you enter "r", then Ansible will run the task again. For more
information on the Ansible debugger (including available commands), see
https://docs.ansible.com/ansible/latest/user_guide/playbooks_debugger.html#available-debug-commands.
cmds:
- TMP="$(mktemp)" && echo "$MARKDOWN" > "$TMP" && .config/log md "$TMP"
- task: prompt:continue
prompt:continue:
interactive: true
cmds:
- .config/log prompt 'What type of test would you like to perform?'
- |
CHOSEN_TEST="$(.config/log choose 'VirtualBox Headless' 'VirtualBox Desktop' 'Docker' 'Local' 'SSH' 'Google Cloud Platform')"
if [[ "$CHOSEN_TEST" == 'VirtualBox Headless' ]]; then
TEST_SLUG='virtualbox:prompt'
elif [[ "$CHOSEN_TEST" == 'VirtualBox Desktop' ]]; then
TEST_SLUG='virtualbox:converge:prompt'
elif [[ "$CHOSEN_TEST" == 'Docker' ]]; then
TEST_SLUG='docker:prompt'
elif [ '{{.ANSWER}}' == 'Local' ]; then
TEST_SLUG='local'
elif [ '{{.ANSWER}}' == 'SSH' ]; then
TEST_SLUG='ssh:prompt'
elif [ '{{.ANSWER}}' == 'Google Cloud Platform' ]; then
TEST_SLUG='gcp'
fi
export ANSIBLE_ENABLE_TASK_DEBUGGER=true
task ansible:test:molecule:${TEST_SLUG}
- task: allure:report
vagrant:
desc: Runs the playbook using Vagrant
hide: '{{ne (print .REPOSITORY_TYPE "-" .REPOSITORY_SUBTYPE) "ansible-role"}}'
summary: |
# Run the playbook using Vagrant
Using Vagrant, you can pick and choose which operating system and
virtualization provider you want to use to test the playbook.
## Possible virtualization providers:
* hyperv
* libvirt
* parallels
* virtualbox
* vmware_fusion
* vmware_workstation
## Possible operating systems:
* archlinux
* centos
* debian
* fedora
* macos
* ubuntu
* windows
**Example opening interactive prompt:**
`task test:vagrant`
**Example bypassing interactive prompt:**
`task test:vagrant -- --provider=vmware_workstation windows`
cmds:
- task: vagrant:{{if .CLI_ARGS}}cli{{else}}prompt{{end}}
vagrant:cli:
deps:
- task: :install:python:requirements
vars:
INSTALL_OPTIONS: --no-dev
- :install:software:vagrant
- :install:software:virtualbox
log:
error: Encountered error when running `vagrant up {{.CLI_ARGS}}`
start: Running `vagrant up {{.CLI_ARGS}}`
success: Successfully ran `vagrant up {{.CLI_ARGS}}`
cmds:
- |
set -o pipefail
vagrant up {{.CLI_ARGS}} 2>&1 | tee debug.log || EXIT_CODE=$?
if [ -n "$EXIT_CODE" ]; then
fi
- task: post:molecule:log
vars:
RESULTS_FILE: debug.log
vagrant:prompt:
env:
MARKDOWN: |
# Launch VM via Vagrant and Run Playbook
Use the following prompts to select the type of operating system and
the virtualization platform you wish to use with Vagrant. After you make your choice
the corresponding environment will be provisioned with Vagrant.
The options are generated by inspecting your system for which virtualization
platforms are installed. The supported virtualization platforms are:
* **KVM** - Shows if `qemu-system-x86_64` command is available
* **Parallels** (macOS only) - Shows if `Parallels Desktop.app` is installed
* **VirtualBox** - Shows if `vboxmanage` command is available
* **VMWare Fusion** (macOS only) - Shows if `vmrun` command is available
* **VMWare Workstation** (Linux only) - Shows if `vmware` command is available
cmds:
- TMP="$(mktemp)" && echo "$MARKDOWN" > "$TMP" && .config/log md "$TMP"
- task: vagrant:prompt:continue
vagrant:prompt:continue:
deps:
- :install:software:gum
- :install:software:jq
interactive: true
vars:
PROMPT_OPTIONS:
sh: |
TMP="$(mktemp)"
if type qemu-system-x86_64 &> /dev/null; then
echo 'KVM' > "$TMP"
fi
if [[ '{{OS}}' == 'darwin' ]] && mdfind -name 'Parallels Desktop.app' &> /dev/null; then
echo 'Parallels' > "$TMP"
fi
if type vboxmanage &> /dev/null; then
echo 'VirtualBox' > "$TMP"
fi
if [[ '{{OS}}' == 'linux' ]] && type vmware &> /dev/null; then
echo 'VMWare Workstation' > "$TMP"
fi
if [[ '{{OS}}' == 'darwin' ]] && type vmrun &> /dev/null; then
echo 'VMWare Fusion' > "$TMP"
fi
LIST_LENGTH="$(jq -R -s -c -r 'split("\n") | length' < "$TMP")"
if [ "$LIST_LENGTH" != '0' ]; then
echo "\""$(jq -R -s -c -r 'split("\n") | join("\" \"")' < "$TMP")"\""
else
echo "None"
fi
cmds:
- |
if [[ '{{.PROMPT_OPTIONS' == 'None' ]]; then
.config/log error 'No virtualization platforms installed. Install a platform (e.g. VirtualBox, VMWare, QEMU) to continue.' && exit 1
else
.config/log prompt 'Which virtualization platform would you like to use?'
PLATFORM_CHOICE="$(.config/log choose '{{.PROMPT_OPTIONS}}')"
.config/log prompt 'Which desktop OS would you like to launch / provision?'
OS_CHOICE="$(.config/log choose 'ArchLinux' 'CentOS' 'Debian' 'Fedora' 'macOS' 'Ubuntu' 'Windows')"
task ansible:test:vagrant:cli -- --provider=\""$PLATFORM_CHOICE"\" "$OS_CHOICE"
fi

View file

@ -0,0 +1,609 @@
---
version: '3'
vars:
COLLECTION_DEPS: collection_dependencies
MAIN_TASKS_PATH: tasks/main.yml
META_PATH: meta/main.yml
MOLECULE_RESULTS_PATH: molecule/.results
PRE_SHARED_VAULT_KEY: YTtEnhPWtftHFcP3HneVmz6vX2qMqAwobTDAbvDwrdyunAaDCQ
REQUIREMENTS_PATH: requirements.yml
ROLE_NAME: '{{.GALAXY_NAMESPACE}}.{{.GALAXY_ROLE_NAME}}'
SAMPLE_PROJECT: https://github.com/megabyte-labs/ansible-snapd
VARIABLES_PATH: .variables.json
tasks:
build:none:
log:
start: Skipping build stage because Ansible project's do not require building
cmds:
- task: :donothing
collection-dependencies:
deps:
- :install:software:jq
- :install:software:yq
env:
COLLECTIONS:
sh: jq --arg collections "$(yq eval -o=json '.collections' {{.REQUIREMENTS_PATH}})" '.{{.COLLECTION_DEPS}} = ($collections | fromjson) |
.{{.COLLECTION_DEPS}}[] | "<a href=\"" + .source + "/" + (.name | split(".") | join("/")) + "\" title=\"" + .name +
" collection on Ansible Galaxy\" target=\"_blank\"><img alt=\"" + .name + " Ansible Galaxy badge\"
src=\"https://img.shields.io/badge/Ansible%20Galaxy-" + .name + "-000000?logo=ansible&logoColor=white&style=for-the-badge\"></a>"'
-r {{.VARIABLES_PATH}} | jq --raw-input --slurp 'split("\n") | .[0:((. | length) - 1)]'
TMP:
sh: mktemp
log:
error: Failed to generate documentation variable for collection dependencies
start: Generating documentation variable for collection dependencies
success: Generated documentation variable for collection dependencies
cmds:
- |
jq --arg collections "$COLLECTIONS" '.{{.COLLECTION_DEPS}} = ($collections | fromjson)' '{{.VARIABLES_PATH}}' > "$TMP"
mv "$TMP" '{{.VARIABLES_PATH}}'
collection-dependencies:markdown:
deps:
- :install:software:jq
vars:
COLLECTION_LENGTH:
sh: jq -r '.{{.COLLECTION_DEPS}} | length' '{{.VARIABLES_PATH}}'
FILE_PATH: .autodoc/{{.COLLECTION_DEPS}}.md
env:
MULTIPLE_COLLECTION_TEXT: "### Galaxy Collections\n\nThis role is dependent on multiple Ansible Galaxy collections.
The collections along with a links to their source are listed below.\n\n{{\"{{\"}}{{.COLLECTION_DEPS}}{{\"}}\"}}"
SINGLE_COLLECTION_TEXT: "### Galaxy Collection\n\nThis role is dependent on the following Ansible Galaxy
collection:\n\n{{\"{{\"}}{{.COLLECTION_DEPS}}{{\"}}\"}}"
log:
error: Failed to generate documentation partial for collection dependencies
start: Generating documentation partial for collection dependencies
success: Generated documentation partial for collection dependencies
cmds:
- mkdir -p '{{dir .FILE_PATH}}'
- |
{{if (eq .COLLECTION_LENGTH "0")}}
echo '' > '{{.FILE_PATH}}'
{{else if (eq .COLLECTION_LENGTH "1")}}
echo "$SINGLE_COLLECTION_TEXT" > '{{.FILE_PATH}}'
{{else}}
echo "$MULTIPLE_COLLECTION_TEXT" > '{{.FILE_PATH}}'
{{end}}
sources:
- '{{.FILE_PATH}}'
- '{{.VARIABLES_PATH}}'
galaxy:import:
log:
error: Error occurred while importing Ansible Galaxy role
start: Triggering Ansible Galaxy import
success: Successfully imported role on Ansible Galaxy
cmds:
- |
GITHUB_ROLE_SLUG="$(jq -r '.blueprint.repository.github' package.json | sed 's/.*\///')"
{{.PYTHON_HANDLE}} ansible-galaxy role import --token "$ANSIBLE_GALAXY_TOKEN" {{.GITHUB_ORG}} "$GITHUB_ROLE_SLUG"
status:
- '[ -z "$ANSIBLE_GALAXY_TOKEN" ]'
galaxy:requirements:
log:
error: Error encountered while installing the Ansible Galaxy requirements specified in `requirements.yml`
start: Installing the Ansible Galaxy requirements specified in `requirements.yml`
success: Installed the Ansible Galaxy requirements specified in `requirements.yml`
cmds:
- |
if [ -f ~/.netrc ]; then
chmod 600 ~/.netrc
fi
- cmd: '{{.PYTHON_HANDLE}} ansible-galaxy install -r requirements.yml --ignore-errors'
ignore_error: true
sources:
- requirements.yml
init:
cmds:
- ansible-galaxy init --role-skeleton=/path/to/skeleton role_name
keywords:sync:
deps:
- :install:npm:prettier
- :install:software:jq
- :install:software:yq
summary: |
# Sync Galaxy Tags with `package.json` Keywords
This task syncs the Ansible Galaxy tags found in `meta/main.yml` with the keywords in the `package.json`
file. The Ansible Galaxy tags are capped to a maximum of 20 tags.
env:
MERGED_TAGS:
sh: jq -s --argjson galaxy "$(yq e -o=j '.galaxy_info.galaxy_tags' meta/main.yml)" '.[0].keywords + $galaxy | sort | unique' package.json
MERGED_TAGS_LENGTH:
sh: jq -s --argjson galaxy "$(yq e -o=j '.galaxy_info.galaxy_tags' meta/main.yml)" '.[0].keywords + $galaxy | sort | unique | length' package.json
log:
error: Error encountered while running the `package.json` / `meta/main.yml` synchronization logic
start: Synchronizing the keywords in `package.json` and `meta/main.yml`
success: Synchronized the keywords in `package.json` and `meta/main.yml`
cmds:
- |
GALAXY_INFO="$(yq e -o=j meta/main.yml)"
OPTIONAL_TAGS="$(jq '.keywords' .config/common-keywords.json)"
TMP="$(mktemp)"
RESULT="$MERGED_TAGS"
if [ "$MERGED_TAGS_LENGTH" -gt 20 ]; then
function updateList() {
REMOVE_KEY="$(jq -n --argjson optional "$OPTIONAL_TAGS" '$optional['"$1"']')"
RESULT="$(jq -n --argjson remove "$REMOVE_KEY" --argjson jq "$RESULT" '$jq | del(.[] | select(. == $remove))')"
}
LOOP_COUNT="$((MERGED_TAGS_LENGTH-20))"
for i in $(seq "$LOOP_COUNT"); do
updateList "$i"
done
fi
jq -r --argjson result "$MERGED_TAGS" '.keywords = $result' package.json > "$TMP"
mv "$TMP" package.json
prettier --write package.json > /dev/null
mkdir -p .cache/megabytelabs
jq -n --argjson result "$RESULT" --argjson gi "$GALAXY_INFO" '$gi | .galaxy_info.galaxy_tags = $result' > .cache/megabytelabs/galaxy-meta.json
yq eval -P .cache/megabytelabs/galaxy-meta.json > meta/main.yml
- task: :fix:prettier
vars:
CLI_ARGS: meta/main.yml
- task: :fix:yaml:dashes
vars:
CLI_ARGS: meta/main.yml
mod-ansible-autodoc:
todo: Add ansible-autodoc-fork to the includes of the package
cmds:
- |
if [ -n "$CI" ]; then
pip3 install ansible-autodoc-fork
fi
- task: mod-ansible-autodoc:generate
- task: mod-ansible-autodoc:variables
mod-ansible-autodoc:generate:
deps:
- :install:pipx:mod-ansible-autodoc
- :install:software:jq
env:
ACTIONS_DESCRIPTION:
sh: jq -r '.autodoc_actions_description' '{{.VARIABLES_PATH}}'
# PATH:
# sh: echo "$PATH:$(poetry env info | grep 'Python /' | sed 's/Python //')"
TAGS_DESCRIPTION:
sh: jq -r '.autodoc_tags_description' '{{.VARIABLES_PATH}}'
TODO_DESCRIPTION:
sh: jq -r '.autodoc_todo_description' '{{.VARIABLES_PATH}}'
VARIABLES_DESCRIPTION:
sh: jq -r '.autodoc_variables_description' '{{.VARIABLES_PATH}}'
log:
error: Error encountered while generating documentation partials with `mod-ansible-autodoc`
start: Compiling `mod-ansible-autodoc` documentation from comments in the play *.yml files
success: Successfully generated documentation partials with `mod-ansible-autodoc`
cmds:
- >
{{.PYTHON_HANDLE}}mod-ansible-autodoc --actions-title '## Features' --actions-description "$ACTIONS_DESCRIPTION"
--tags-title '### Tags' --tags-description "$TAGS_DESCRIPTION" --todo-title '### TODO'
--todo-description "$TODO_DESCRIPTION" --variables-title '## Variables' --variables-description
"$VARIABLES_DESCRIPTION" --variable-example-comment-prefix '#💬'
- mkdir -p .autodoc
- mv ansible_actions.md ansible_tags.md ansible_todo.md ansible_variables.json ansible_variables.md .autodoc
sources:
- '{{.VARIABLES_PATH}}'
- defaults/**/*.yml
- tasks/**/*.yml
- vars/**/*.yml
mod-ansible-autodoc:variables:
deps:
- :install:software:jq
log:
error: Failed to merge `.autodoc/ansible_variables.json` into `.variables.json`
start: Merging `.autodoc/ansible_variables.json` into `.variables.json`
success: Successfully merged `.autodoc/ansible_variables.json` into `.variables.json`
cmds:
- |
ROLE_VARIABLES="$(jq -r '.role_variables' .autodoc/ansible_variables.json)"
TMP="$(mktemp)"
jq --arg vars "$ROLE_VARIABLES" '.role_variables = ($vars | fromjson)' '{{.VARIABLES_PATH}}' > "$TMP"
mv "$TMP" '{{.VARIABLES_PATH}}'
prepare:
deps:
- :ci:commit:config
log:
error: Failed to ensure GitLab and GitHub are in sync
start: Preparing for Ansible Galaxy upload
success: GitLab and GitHub are in sync
cmds:
- task: :git:remotes
- git pull origin master
- git push github master
publish:
cmds:
- task: galaxy:import
quickstart:
deps:
- :install:software:jq
- :symlink:{{.REPOSITORY_SUBTYPE}}
- task: :install:python:requirements
vars:
INSTALL_OPTIONS: --no-dev
cmds:
- git reset --hard HEAD
- |
if ! git pull origin master; then
git config url."https://gitlab.com/".insteadOf git@gitlab.com:
git config url."https://github.com/".insteadOf git@github.com:
git pull origin master || true
fi
- task: quickstart:environment
- task: quickstart:map
- task: quickstart:demo
quickstart:cli:
deps:
- :install:software:openssl
summary: |
# Ansible Quickstart CLI
Use this task if you already know the inventory file you would like to run the
`main.yml` playbook on. The unique feature is that this task will first run
`playbooks/init.yml` (and skip any task tagged with `skip_on_init`), reboot,
and then run the normal `main.yml`. This allows you to provision a single computer
that serves both as the client and host of Ansible (since some software requires
reboots).
{{.QUICKSTART_VAULT_SECURITY}}
vars:
INVENTORY: '{{if .CLI_ARGS}}{{.CLI_ARGS}}{{else}}inventories/local.yml{{end}}'
QUICKSTART_VAULT_SECURITY: |
## Ansible Vault Password Security
To achieve automation wherever possible, we save your Ansible Vault password
on disk during the installation process (i.e. to preserve the information
inbetween reboots).
To make it more secure, we employ the following measures:
1. The password is forcibly removed anytime the ansible-playbook command fails
2. It is passed in via the `ANSIBLE_VAULT_PASSWORD` variable to minimize the possibility
of the password displaying in the terminal or getting added to the command history.
3. It is stored in `~/.VAULT_PASSWORD` so you can easily spot it if something
goes wrong.
env:
ANSIBLE_CALLBACKS_ENABLED: 'junit, ansible.posix.profile_tasks, ansible.posix.timer, sentry'
ANSIBLE_STDOUT_CALLBACK: community.general.yaml
ANSIBLE_VAULT_PASSWORD_FILE: ~/.VAULT_PASSWORD
JUNIT_FAIL_ON_CHANGE: true
JUNIT_HIDE_TASK_ARGUMENTS: true
JUNIT_OUTPUT_DIR: .results/junit
QUICKSTART_PLAYBOOK_MATCH: |
# Playbook Matching Inventory File Name
A playbook with the same file name as the selected inventory was found in the
`playbooks/` directory. When the file names match up, this "quick start" script
will use the matching playbook file instead of the `main.yml` playbook found in
the root of the repository.
QUICKSTART_VAULT_SECURITY: '{{.QUICKSTART_VAULT_SECURITY}}'
log:
error: Error occurred while running the Ansible play
start: Running the Ansible play locally
success: Successfully ran the Ansible play locally
cmds:
- cmd: |
if [ ! -f "$ANSIBLE_VAULT_PASSWORD_FILE" ]; then
.config/log info 'Adding `~/ANSIBLE_PLAYBOOK_CONTINUE.sh` to run on reboot'
echo "#!/usr/env/bin bash" > ~/ANSIBLE_PLAYBOOK_CONTINUE.sh
echo "" > ~/ANSIBLE_PLAYBOOK_CONTINUE.sh
echo "cd $PWD" >> ~/ANSIBLE_PLAYBOOK_CONTINUE.sh
echo 'task ansible:quickstart:cli -- {{.INVENTORY}}' >> ~/ANSIBLE_PLAYBOOK_CONTINUE.sh
if [ -z "$ANSIBLE_VAULT_PASSWORD" ]; then
MD_TMP="$(mktemp)"
echo "$QUICKSTART_VAULT_SECURITY" > "$MD_TMP"
.config/log md "$MD_TMP"
.config/log info 'Read about security measures above.'
.config/log prompt 'What is your Ansible Vault password?'
export ANSIBLE_VAULT_PASSWORD="$(.config/log password 'Enter Vault password (leave empty if there is not a password yet)..')"
fi
fi
.config/log info 'Writing the `ANSIBLE_VAULT_PASSWORD` to temporary file in user home directory'
echo "$ANSIBLE_VAULT_PASSWORD" > ~/.VAULT_PASSWORD
INVENTORY="{{.INVENTORY}}"
INVENTORY_SLUG="$(echo "$INVENTORY" | sed 's/inventories\/\(.*\).yml/\1/')"
if [[ '{{.INVENTORY}}' == 'inventories/quickstart.yml' ]]; then
if [ -f '/etc/qubes-rpc' ] && [[ "$(whoami)" == 'user' ]]; then
.config/log info 'Assuming `ANSIBLE_USER` name to be `user` since this is Qubes'
export ANSIBLE_USER="user"
elif [ -z "$ANSIBLE_USER" ]; then
WHOAMI="$(whoami)"
.config/log prompt 'In the next step, select a user with `sudo` privileges'
if ! .config/log confirm 'Run playbook with `'"${WHOAMI}"'`?'; then
export ANSIBLE_USER="$(.config/log input 'Enter the username of the admin account you wish to use..')"
else
export ANSIBLE_USER="$WHOAMI"
fi
else
.config/log info '`ANSIBLE_USER` is set to `'"$ANSIBLE_USER"'`'
fi
if [ -z "$ANSIBLE_PASSWORD" ]; then
if sudo -n echo &> /dev/null; then
.config/log info 'Assuming passwordless sudo since `sudo -n echo` was truthy'
.config/log info 'Bypassing `ANSIBLE_PASSWORD` prompt'
else
.config/log prompt 'Enter the `sudo` password for your selected user (`'"$ANSIBLE_USER"'`)'
export ANSIBLE_PASSWORD="$(.config/log password 'Enter password (or just press enter if there is none)..')"
fi
else
.config/log info '`ANSIBLE_PASSWORD` is present as an environment variable.. bypassing password prompt'
fi
if [ -z "$MOLECULE_GROUP" ]; then
if [ -f '/etc/qubes-rpc' ]; then
.config/log info 'Machine is a Qubes dom0 environment'
export MOLECULE_GROUP="qubes"
else
.config/log info 'Assuming machine to be macOS/Linux'
export MOLECULE_GROUP="standard"
fi
fi
fi
if [ -f local/requirements.txt ]; then
pip3 install -r local/requirements.txt
else
.config/log warn 'The `local/requirements.txt` file is missing'
fi
if [ -f "playbooks/${INVENTORY_SLUG}.yml" ]; then
PLAY_TMP="$(mktemp)" && echo "$QUICKSTART_PLAYBOOK_MATCH" > "$PLAY_TMP" && .config/log md "$PLAY_TMP"
{{.PYTHON_HANDLE}}ansible-playbook --skip-tags "dotfiles,mas" -vvv -i {{.INVENTORY}} "playbooks/${INVENTORY_SLUG}.yml" || EXIT_CODE="$?"
else
.config/log info 'Using the `main.yml` playbook because there is no playbook located in `'"playbooks/${INVENTORY_SLUG}.yml"'`'
{{.PYTHON_HANDLE}}ansible-playbook --skip-tags "dotfiles,mas" -vvv -i {{.INVENTORY}} main.yml || EXIT_CODE="$?"
fi
#if [ -n "$EXIT_CODE" ]; then
# .config/log error 'There was an error while running the playbook.'
# .config/log warn 'Ensure there are no secrets in plain text (if you choose to upload the logs so our developers can fix this issue).'
# .config/log confirm 'Upload the playbook log so the developers can debug?'
# # sentry-cli
#fi
ignore_error: true
- sleep 5
- .config/log info 'Ensuring the temporary vault password file is forcibly removed'
- rm -f ~/.VAULT_PASSWORD
- rm -f ~/ANSIBLE_PLAYBOOK_CONTINUE.sh
quickstart:demo:
cmds:
- task: quickstart:cli
status:
- '[ -f files/inventory-map.json ]'
quickstart:environment:
cmds:
- task: :ansible:playbook:environment:cli
vars:
CLI_ARGS:
sh: echo "$ENV"
status:
- '[ -z "$ENV" ]'
quickstart:map:
deps:
- :install:pipx:getmac
- :install:software:gum
- :install:software:jq
summary: |
# Quickstart Mapping
This task is a helper task that attempts to automatically select the appropriate
Ansible inventory based on the MAC address. If the mapping entry is not already
saved, this task will guide the user through a series of prompts.
{{.MAC_ADDRESS_EXPLANATION}}
vars:
INVENTORY_OPTIONS:
sh: echo "\""$(find ./inventories/ -mindepth 1 -maxdepth 1 | sed 's/\.\/inventories\//inventories\//' | jq -R '[.]' | jq -s -c -r 'add | join("\" \"")')"\""
INVENTORY_OPTIONS_LENGTH:
sh: find ./inventories/ -mindepth 1 -maxdepth 1 | sed 's/\.\/inventories\///' | jq -R '[.]' | jq -s -c -r 'add | length'
MAC_ADDRESS_EXPLANATION: |
## Mapping MAC Addresses to Inventory Files
In order to achieve a completely automated flow, we have the ability
to define a map of MAC addresses and inventory files in the `files/`
folder.
You can get your MAC address by using the `getmac` program that this
task installs or you can run the following on Linux:
```
cat /sys/class/net/$(ip route show default | awk '/default/ {print $5}')/address
```
On Windows, if you are provisioning with Docker (the default "Quick Start" method)
and if you make sure you are running it on either a clean system or have no other
Docker containers running, then the MAC address will be:
```
02:42:ac:11:00:02
```
If you wanted to run the Quick Start method on Windows, you could create a file in
`files/inventory-map.json` that looks something like this:
```
{
"02:42:ac:11:00:02": "inventories/workstation.yml"
}
```
This configuration would instruct the script to automatically use the `inventories/workstation.yml`
inventory. If your MAC address is missing, the script will open an interactive prompt and include
the ability to save your MAC address to the file for later use.
env:
INVENTORY_EXPLANATION: |
# Which Inventory Should I Use?
A special / great starting point is the `quickstart.yml` inventory which allows you to pass your username
and password through a prompt. It is intended to be used to provision one machine at a time from that machine.
It is also intended to be used with the **Quick Start** links at the top of the README.md. It can be used to
provision any of our supported operating systems.
## TLDR: Choose `quickstart.yml`
MAC_ADDRESS_EXPLANATION: '{{.MAC_ADDRESS_EXPLANATION}}'
cmds:
- |
PATH="$PATH:$HOME/.local/bin"
# MAC_ADDRESS="$(cat /sys/class/net/$(ip route show default | awk '/default/ {print $5}')/address)" # Does not work on macOS
MAC_ADDRESS="$(getmac)"
TARGET_INV="$(jq --arg macAddress "$MAC_ADDRESS" -r '.[$macAddress]' files/inventory-map.json)"
if [[ "$TARGET_INV" == '' ]]; then
.config/log warn 'Initializing `files/inventory-map.json` since it appears to be an empty file'
echo '{}' > files/inventory-map.json
fi
if [ "$TARGET_INV" != 'null' ] && [ "$TARGET_INV" != '' ]; then
.config/log info "Provisioning with "'`'"$TARGET_INV"'`'
task ansible:quickstart:cli -- "$TARGET_INV"
else
MD_TMP="$(mktemp)"
echo "$MAC_ADDRESS_EXPLANATION" > "$MD_TMP"
.config/log md "$MD_TMP"
.config/log warn "MAC address missing from inventory-map.json ($MAC_ADDRESS). Details printed above."
if [ '{{.INVENTORY_OPTIONS_LENGTH}}' != '0' ]; then
.config/log prompt 'Given the information above, would you like to save your MAC address to the `files/inventory-map.json` file?'
if .config/log confirm 'Save MAC address?'; then
INV_TMP="$(mktemp)" && echo "$INVENTORY_EXPLANATION" > "$INV_TMP" && .config/log md "$INV_TMP"
.config/log prompt 'Which inventory file would you like the associate the MAC address with?'
INVENTORY_FILE="$(.config/log choose {{.INVENTORY_OPTIONS}})"
TMP="$(mktemp)"
.config/log info 'Generating new `files/inventory-map.json` file'
jq --arg inventory "$INVENTORY_FILE" --arg macaddr "$MAC_ADDRESS" '.[$macaddr] = $inventory' files/inventory-map.json > "$TMP"
mv "$TMP" files/inventory-map.json
.config/log success "Successfully associated the selected inventory with this machine's MAC address (remember to git add / commit)"
task ansible:quickstart:cli -- "$INVENTORY_FILE"
else
INV_TMP="$(mktemp)" && echo "$INVENTORY_EXPLANATION" > "$INV_TMP" && .config/log md "$INV_TMP"
.config/log prompt 'Which inventory file would you like to use?'
INVENTORY_FILE="$(.config/log choose {{.INVENTORY_OPTIONS}})"
task ansible:quickstart:cli -- "$INVENTORY_FILE"
fi
else
.config/log error 'There are no inventories defined in the `inventories/` folder.'
.config/log info 'Try running `task environment` to link to sample environments'
exit 1
fi
fi
status:
- '[ ! -f files/inventory-map.json ]'
sync:requirements:
deps:
- :install:software:jq
- :install:software:yq
log:
error: Failed to synchronize role dependencies in `{{.META_PATH}}` to `{{.REQUIREMENTS_PATH}}`
start: Ensuring role dependencies in `{{.META_PATH}}` are also listed in `{{.REQUIREMENTS_PATH}}`
success: Successfully ensured role dependencies in `{{.META_PATH}}` are also listed in `{{.REQUIREMENTS_PATH}}`
cmds:
- |
ROLES="$(yq eval '.roles' '{{.REQUIREMENTS_PATH}}')"
yq eval -o=json '.dependencies' '{{.META_PATH}}' | jq -rc '.[] .role' | while read ROLE_NAME; do
if [[ ! "$ROLES" =~ "$ROLE_NAME" ]]; then
yq eval -i -P '.roles = .roles + {"name": "'"$ROLE_NAME"'"}' '{{.REQUIREMENTS_PATH}}'
fi
done
- task: :fix:yaml:dashes
vars:
CLI_ARGS: '{{.REQUIREMENTS_PATH}}'
update:galaxy-id:
log:
error: Failed to look up or inject the Ansible Galaxy project ID into `package.json`
start: Adding Ansible Galaxy project ID to `package.json` (if available)
success: Successfully ensured Ansible Galaxy project ID is in `package.json` (if the project is on Ansible Galaxy)
cmds:
- |
TMP="$(mktemp)"
PROJECT_ID="$(ansible-galaxy info '{{.ROLE_NAME}}' 2> /dev/null | grep -E 'id: [0-9]' | cut -d ' ' -f2)"
if [ "$PROJECT_ID" ]; then
jq --arg a "$PROJECT_ID" '.blueprint.ansible_galaxy_project_id = $a' package.json > "$TMP"
mv "$TMP" package.json
fi
status:
- jq -e 'has("blueprint.ansible_galaxy_project_id")' package.json
update:variables:
cmds:
- task: :ansible:ansibler:ansibler
- task: update:variables:descriptions
update:variables:descriptions:
deps:
- :install:software:jq
- :install:software:yq
vars:
ALT_PREFIX: This repository is the home of an [Ansible](https://www.ansible.com/) role that
DESCRIPTION:
sh: yq e '.galaxy_info.description' '{{.META_PATH}}'
DESCRIPTION_LOWER: '{{lower (trunc 1 .DESCRIPTION)}}{{substr 1 (len .DESCRIPTION) .DESCRIPTION}}'
SUBHEADER_PREFIX: An Ansible role that
env:
ALT: '{{.ALT_PREFIX}} {{.DESCRIPTION_LOWER}}'
GALAXY_INFO:
sh: yq e -o=json '.galaxy_info' '{{.META_PATH}}'
SUBHEADER: '{{.SUBHEADER_PREFIX}} {{.DESCRIPTION_LOWER}}'
TMP:
sh: mktemp
log:
error: Failed to inject `.variables.json` with description variables
start: Injecting description variables into `.variables.json`
success: Successfully updated `.variables.json` with description variables
cmds:
- jq -S --arg alt "$ALT" --arg galaxyinfo "$GALAXY_INFO" --arg subheader "$SUBHEADER" '.alternative_description = $alt |
.galaxy_info = ($galaxyinfo | fromjson) | .subheader_description = $subheader' '{{.VARIABLES_PATH}}' > "$TMP"
- mv "$TMP" '{{.VARIABLES_PATH}}'
sources:
- '.common/variables.{{.REPOSITORY_SUBTYPE}}.json'
- '{{.META_PATH}}'
- package.json
preconditions:
- sh: type jq > /dev/null
msg: jq is not installed.
- sh: type yq > /dev/null
msg: yq is not installed.
- sh: 'test -f "{{.META_PATH}}"'
msg: 'The `{{.META_PATH}}` file is missing. A properly populated `{{.META_PATH}}` is required. You can find an
example of one at {{.SAMPLE_PROJECT}}.'
- sh: 'test -f "{{.VARIABLES_PATH}}"'
msg: 'The `{{.VARIABLES_PATH}}` file is missing!'
update:variables:playbook:
cmds:
- task: :ansible:playbook:docs
vault:lint:file:
summary: |
# Check for unencrypted Ansible Vault files
This task is leveraged by `lint-staged` to ensure that any file that matches `**/*vault.yml` is encrypted
with Ansible Vault.
log:
error: '`{{.CLI_ARGS}}` is not encrypted! All files matching `**/*vault.yml` must be encrypted by `ansible-vault`.'
start: Checking if `{{.CLI_ARGS}}` is encrypted with `ansible-vault`
success: Ensured `{{.CLI_ARGS}}` is encrypted
cmds:
- |
head -1 '{{.CLI_ARGS}}' | grep --quiet '^\$ANSIBLE_VAULT;' || {
if [ -s '{{.CLI_ARGS}}' ]; then
exit 1
fi
}
verify:
deps:
- :install:software:poetry
log:
error: Failed to connect to Ansible Galaxy with provided token
start: Verifying connection can be made to Ansible Galaxy
success: Successfully connected to Ansible Galaxy
cmds:
- poetry update ansible
- poetry run ansible-galaxy role setup --token "$ANSIBLE_GALAXY_TOKEN" null null null null --list

View file

@ -0,0 +1,34 @@
---
version: '3'
tasks:
clear:
deps:
- :install:software:virtualbox
summary: |
# Remove All VMs / Reset
This task will remove all the VirtualBox VMs. It is useful in scenarios
where VirtualBox is being called through automation and things can
potentially break. When they break, they need a reset.
**Example resetting all VMs:**
`task app:virtualbox:clear`
There is the capability of only resetting VMs that match a certain pattern.
**Example resetting all VMs matching a pattern:**
`task app:virtualbox:clear -- 'macOS'`
vars:
DEFAULT_PATTERN: default_
cmds:
- cmd: killall -9 VBoxHeadless
ignore_error: true
- |
while read VM; do
VM_NAME="$(echo $VM | sed 's/^"\(.*\)" {.*}/\1/')"
VM_UUID="$(echo $VM | sed 's/^".*".{\(.*\)}/\1/')"
vboxmanage startvm "$VM_UUID" --type emergencystop || true
vboxmanage unregistervm "$VM_UUID" || true
rm -rf "$HOME/VirtualBox VMs/$VM_NAME" || true
done < <(vboxmanage list vms | grep '{{if .CLI_ARGS}}{{.CLI_ARGS}}{{else}}{{.DEFAULT_PATTERN}}{{end}}')

View file

@ -0,0 +1,29 @@
---
version: '3'
vars:
ROWY_HOMEPAGE: https://megabyte.space/tables
ROWY_PATH: ./rowy
ROWY_SLUG: tables
tasks:
rowy:build:
deps:
- :install:npm:browserslist
- :install:software:yarn
env:
REACT_APP_FIREBASE_PROJECT_ID:
sh: jq -r '.blueprint.firebase.projectId' package.json
REACT_APP_FIREBASE_PROJECT_WEB_API_KEY:
sh: jq -r '.blueprint.firebase.webApiKey' package.json
cmds:
- mkdir -p "$(dirname '{{.ROWY_PATH}}')"
- git clone https://github.com/rowyio/rowy.git {{.ROWY_PATH}}
- cd {{.ROWY_PATH}} && yarn
- browserslist --update-db
- |
TMP="$(mktemp)"
jq --arg rowy '{{.ROWY_HOMEPAGE}}' '.homepage = $rowy' {{.ROWY_PATH}}/package.json > "$TMP"
mv "$TMP" {{.ROWY_PATH}}/package.json
- cd {{.ROWY_PATH}} && yarn build
- mv {{.ROWY_PATH}}/build dist/{{.ROWY_SLUG}}

View file

@ -0,0 +1,192 @@
---
version: '3'
vars:
DEFAULT_ANSIBLE_LICENSE: license (MIT)
DEFAULT_NAMESPACE: professormanhattan
GITHUB_ROLE_PATH_PREFIX: https://github.com/megabyte-labs/ansible-
GITLAB_ROLE_PATH_PREFIX: https://gitlab.com/megabyte-labs/ansible-roles/
MIN_ANSIBLE_VERSION: 2.10
tasks:
all:after:
deps:
- :install:software:jq
cmds:
- |
if [ "$(jq -r '.blueprint.overview' package.json)" == 'null' ]; then
.config/log warn 'The `blueprint.overview` field is missing from `package.json`'
fi
if [ "$(jq -r '.blueprint.description' package.json)" == 'null' ]; then
.config/log error 'The `blueprint.description` field is missing from `package.json`'
EXIT_PROGRAM=true
fi
if [ "$(jq -r '.blueprint.name' package.json)" == 'null' ]; then
.config/log error 'The `blueprint.name` field is missing from `package.json`'
EXIT_PROGRAM=true
fi
if [ "$EXIT_PROGRAM" == 'true' ]; then
exit 1
fi
all:before:
cmds:
- |
if [ ! -f package.json ]; then
.config/log error 'The `package.json` file must exist. See `https://gitlab.com/megabyte-labs/ansible-roles/androidstudio` for an example of one.'
exit 1
fi
- task: group
- task: subgroup
angular: 'true'
ansible:
cmds:
- task: all:before
- task: ansible:{{.REPOSITORY_SUBTYPE}}
- task: all:after
ansible:playbook: 'true'
ansible:role:
deps:
- :install:software:jq
- :install:software:yq
cmds:
- |
if [ ! -f meta/main.yml ]; then
.config/log error 'The `meta/main.yml` file must exist. See `https://gitlab.com/megabyte-labs/ansible-roles/androidstudio` for an example of one.'
exit 1
fi
- |
if [ "$(yq e '.galaxy_info.author' meta/main.yml)" == 'null' ]; then
.config/log info 'Setting `author` to `{{.GALAXY_AUTHOR}}` in `meta/main.yml`'
yq e -i '.galaxy_info.author = {{.GALAXY_AUTHOR}}' meta/main.yml
fi
- |
if [ "$(yq e '.galaxy_info.company' meta/main.yml)" == 'null' ]; then
.config/log info 'Setting `company` to `{{.GALAXY_COMPANY}}` in `meta/main.yml`'
yq e -i '.galaxy_info.company = {{.GALAXY_COMPANY}}' meta/main.yml
fi
- |
if [ "$(yq e '.galaxy_info.min_ansible_version' meta/main.yml)" == 'null' ]; then
.config/log info 'Setting `min_ansible_version` to `{{.MIN_ANSIBLE_VERSION}}` in `meta/main.yml`'
yq e -i '.galaxy_info.min_ansible_version = {{.MIN_ANSIBLE_VERSION}}' meta/main.yml
fi
- |
if [ "$(yq e '.galaxy_info.license' meta/main.yml)" == 'null' ]; then
.config/log info 'Setting `license` to `{{.DEFAULT_ANSIBLE_LICENSE}}` in `meta/main.yml`'
yq e -i '.galaxy_info.license = {{.DEFAULT_ANSIBLE_LICENSE}}' meta/main.yml
fi
- |
ROLE_NAME="$(yq e '.galaxy_info.role_name' meta/main.yml)"
if [ "$ROLE_NAME" == 'null' ]; then
.config/log warn 'The `meta/main.yml` file is missing the `.galaxy_info.role_name` property. Adding it as the folder name - please edit if necessary.'
ROLE_NAME="$(basename $PWD)" yq e -i '.galaxy_info.role_name = env(ROLE_NAME)' meta/main.yml
fi
SLUG="$(jq -r '.blueprint.slug' package.json)"
if [ "$SLUG" == 'null' ]; then
.config/log info 'Adding `slug` to package.json'
TMP="$(mktemp)" && jq --arg slug "$ROLE_NAME" '.blueprint.slug = $slug' package.json > "$TMP" && mv "$TMP" package.json
fi
GITLAB_REPO="$(jq -r '.blueprint.repository.gitlab' package.json)"
if [ "$GITLAB_REPO" == 'null' ]; then
GITLAB_REPO="{{.GITLAB_ROLE_PATH_PREFIX}}$ROLE_NAME"
.config/log info 'Adding GitLab repository to `package.json`'
TMP="$(mktemp)" && jq --arg repo "$GITLAB_REPO" '.blueprint.repository.gitlab = $repo' package.json > "$TMP" && mv "$TMP" package.json
fi
GITHUB_REPO="$(jq -r '.blueprint.repository.github' package.json)"
if [ "$GITHUB_REPO" == 'null' ]; then
GITHUB_REPO="{{.GITHUB_ROLE_PATH_PREFIX}}$ROLE_NAME"
.config/log info 'Adding GitHub repository to `package.json`'
TMP="$(mktemp)" && jq --arg repo "$GITHUB_REPO" '.blueprint.repository.github = $repo' package.json > "$TMP" && mv "$TMP" package.json
fi
if [ "$(yq e '.galaxy_info.issue_tracker_url' meta/main.yml)" == 'null' ]; then
export ISSUE_TRACKER="$GITLAB_REPO/-/issues"
.config/log info 'Adding `issue_tracker_url` to `meta/main.yml`'
yq e -i '.galaxy_info.issue_tracker_url = env(ISSUE_TRACKER)' meta/main.yml
fi
- |
NAME="$(jq -r '.blueprint.name' package.json)"
if [ "$NAME" != 'null' ]; then
if [ "$(jq -r '.blueprint.title' package.json)" == 'null' ]; then
.config/log info 'Populating the `blueprint.title` in package.json using the `blueprint.name` value'
TMP="$(mktemp)" && jq --arg title "$NAME Ansible Role" '.blueprint.title = $title' package.json > "$TMP" && mv "$TMP" package.json
fi
fi
- |
DESC="$(yq e '.galaxy_info.description' meta/main.yml)"
if [ "$DESC" == 'null' ]; then
BP_DESC="$(jq -r '.blueprint.description' package.json)"
if [ "$BP_DESC" == 'null' ]; then
.config/log error 'The `description` in `meta/main.yml` is missing. It must be present.'
.config/log info 'For an example `meta/main.yml` file see `https://gitlab.com/megabyte-labs/ansible-roles/androidstudio`.'
exit 1
else
yq e -i '.galaxy_info.description = env(BP_DESC)' meta/main.yml
.config/log info '`meta/main.yml` description populated using value from `package.json` `blueprint.description`'
fi
else
if [ "$(jq -r '.blueprint.description' package.json)" == 'null' ]; then
TMP="$(mktemp)" && jq --arg desc "$DESC" '.blueprint.description = $desc' package.json > "$TMP" && mv "$TMP" package.json
fi
fi
common: 'true'
docker: 'true'
documentation: 'true'
go: 'true'
group:
deps:
- :install:software:jq
- :install:software:yq
cmds:
- |
GROUP="$(jq -r '.blueprint.group' package.json)"
SUBGROUP="$(jq -r '.blueprint.subgroup' package.json)"
TASK_GROUP="{{.REPOSITORY_TYPE}}"
TASK_SUBGROUP="{{.REPOSITORY_SUBTYPE}}"
if ([ "$GROUP" != 'null' ] && [ "$TASK_GROUP" != 'null' ] && [ "$GROUP" != "$TASK_GROUP" ]) || \
([ "$GROUP" == 'null' ] && [ "$TASK_GROUP" != 'null' ]); then
.config/log info 'Setting `blueprint.group` in `package.json` equal to `vars.REPOSITORY_TYPE` from `Taskfile.yml`'
TMP="$(mktemp)" && jq --arg group "$TASK_GROUP" '.blueprint.group = $group' package.json > "$TMP" && mv "$TMP" package.json
elif [ "$GROUP" != 'null' ] && [ "$TASK_GROUP" == 'null' ]; then
.config/log info 'Setting `vars.REPOSITORY_TYPE` equal to value in `blueprint.group` in `package.json`'
yq e -i '.vars.REPOSITORY_TYPE = env(GROUP)' Taskfile.yml
elif [ "$GROUP" == 'null' ] && [ "$TASK_GROUP" == 'null' ]; then
.config/log error 'Either `blueprint.group` in `package.json` or `vars.REPOSITORY_TYPE` in `Taskfile.yml` must be defined'
fi
misc: 'true'
npm: 'true'
packer: 'true'
python: 'true'
subgroup:
deps:
- :install:software:jq
- :install:software:yq
cmds:
- |
if ([ "$SUBGROUP" != 'null' ] && [ "$TASK_SUBGROUP" != 'null' ] && [ "$SUBGROUP" != "$TASK_SUBGROUP" ]) || \
([ "$SUBGROUP" == 'null' ] && [ "$TASK_SUBGROUP" != 'null' ]); then
.config/log info 'Setting `blueprint.subgroup` in `package.json` equal to `vars.REPOSITORY_SUBTYPE` from `Taskfile.yml`'
TMP="$(mktemp)" && jq --arg group "$TASK_SUBGROUP" '.blueprint.subgroup = $group' package.json > "$TMP" && mv "$TMP" package.json
elif [ "$SUBGROUP" != 'null' ] && [ "$TASK_SUBGROUP" == 'null' ]; then
.config/log info 'Setting `vars.REPOSITORY_SUBTYPE` equal to value in `blueprint.subgroup` in `package.json`'
yq e -i '.vars.REPOSITORY_SUBTYPE = env(SUBGROUP)' Taskfile.yml
elif [ "$SUBGROUP" == 'null' ] && [ "$TASK_SUBGROUP" == 'null' ]; then
.config/log error 'Either `blueprint.subgroup` in `package.json` or `vars.REPOSITORY_SUBTYPE` in `Taskfile.yml` must be defined'
fi
type:
cmds:
- 'true'

View file

@ -0,0 +1,187 @@
---
version: '3'
vars:
COMMON_DOCS_URL: https://gitlab.com/megabyte-labs/documentation/shared/-/raw/master/common.json
COMMON_URL: 'https://gitlab.com/megabyte-labs/common/'
PROJECT_SUBTYPE_VARS_URL: '{{.COMMON_URL}}{{.REPOSITORY_TYPE}}/-/raw/master/project-{{.REPOSITORY_SUBTYPE}}/.config/variables.json'
PROJECT_VARS_URL: '{{.COMMON_URL}}{{.REPOSITORY_TYPE}}/-/raw/master/project/.config/variables.json'
tasks:
all:
cmds:
- task: name
- task: :boilerplate:populate:group
- task: group
- task: :boilerplate:populate:subgroup
- task: subgroup
- task: title
- task: description
- task: overview
- task: slug
- task: project-specific
- task: build
- task: test
build:
summary: |
This task prompts the user for the `build` command to place in `scripts.build` inside
of `package.json`.
For the default value, it looks at the corresponding common respoitory by first checking the
`project-subtype` folder and then the `project` folder's value for `scriptsBuild` inside of the
`.config/variables.json` file.
cmds:
- .config/log prompt 'Enter the build command placed in `scripts.build` inside of `package.json`'
- |
BUILD_ANSWER="$(.config/log input 'Enter build command..')"
task boilerplate:prompt:build:continue -- "$BUILD_ANSWER"
status:
- |
[[ "$(jq -r '.blueprint.description' package.json)" != "null" ]]
build:continue:
cmds:
- TMP="$(mktemp)" && jq --arg cmd '{{.CLI_ARGS | replace "'" "\'"}}' '.scripts.build = $cmd' package.json && mv "$TMP" package.json
description:
cmds:
- .config/log prompt 'Enter a description for the project'
- |
DESC_ANSWER="$(.config/log input 'Enter description..')"
task boilerplate:prompt:description:continue -- "$DESC_ANSWER"
status:
- |
[[ "$(jq -r '.blueprint.description' package.json)" != "null" ]]
description:continue:
cmds:
- TMP="$(mktemp)" && jq --arg desc '{{.CLI_ARGS | replace "'" "\'"}}' '.blueprint.description = $desc' package.json && mv "$TMP" package.json
group:
prompt:
type: select
message: Select a group
options:
- angular
- ansible
- docker
- go
- npm
- packer
- python
answer:
cmds:
- |
TMP="$(mktemp)" && jq --arg group '{{.ANSWER}}' '.blueprint.group = $group' package.json > "$TMP" && mv "$TMP" package.json
- |
TYPE='{{.ANSWER}}' yq e -i '.vars.REPOSITORY_TYPE = env(TYPE)' Taskfile.yml
status:
- |
[[ "$(jq -r '.blueprint.group' package.json)" != "null" ]] && [[ "$(yq e '.vars.REPOSITORY_TYPE' Taskfile.yml)" != "null" ]]
name:
cmds:
- .config/log prompt 'Enter a name for the project'
- |
NAME_ANSWER="$(.config/log input 'Enter the project name..')"
task boilerplate:prompt:name:continue -- "$NAME_ANSWER"
status:
- |
[[ "$(jq -r '.blueprint.name' package.json)" != "null" ]]
name:continue:
cmds:
- TMP="$(mktemp)" && jq --arg name '{{.CLI_ARGS}}' '.blueprint.name = $name' package.json > "$TMP" && mv "$TMP" package.json
overview:
prompt:
type: input
message: Enter an overview for the project
answer:
cmds:
- TMP="$(mktemp)" && jq --arg overview '{{.ANSWER}}' '.blueprint.overview = $overview' package.json > "$TMP" && mv "$TMP" package.json
status:
- |
[[ "$(jq -r '.blueprint.overview' package.json)" != "null" ]]
project-specific: 'true'
slug:
cmds:
- .config/log prompt 'Enter a slug for the project'
- |
SLUG_ANSWER="$(.config/log input 'Enter a slug..')"
task boilerplate:prompt:slug:continue -- "$SLUG_ANSWER"
status:
- |
[[ "$(jq -r '.blueprint.slug' package.json)" != "null" ]]
slug:continue:
cmds:
- TMP="$(mktemp)" && jq --arg slug '{{.CLI_ARGS}}' '.blueprint.slug = $slug' package.json > "$TMP" && mv "$TMP" package.json
subgroup:
env:
SUBGROUP_GROUP:
sh: |
TASK_GROUP="$(yq e '.vars.REPOSITORY_TYPE' Taskfile.yml)"
if [ "$TASK_GROUP" == 'null' ]; then
PKG_GROUP="$(jq -r '.blueprint.group' package.json)"
if [ "$PKG_GROUP" == 'null' ]; then
echo 'generic'
else
echo "$PKG_GROUP"
fi
else
echo "$TASK_GROUP"
fi
prompt:
type: select
message: Select a subgroup
options:
sh: curl -sSL '{{.COMMON_DOCS_URL}}' | jq --arg type "$SUBGROUP_GROUP" '.groups[$type]'
answer:
cmds:
- |
TMP="$(mktemp)" && jq --arg subtype '{{.ANSWER}}' '.blueprint.subgroup = $subtype' package.json > "$TMP" && mv "$TMP" package.json
- |
SUBTYPE='{{.ANSWER}}' yq e -i '.vars.REPOSITORY_SUBTYPE = env(SUBTYPE)' Taskfile.yml
status:
- |
[[ "$(jq -r '.blueprint.subgroup' package.json)" != "null" ]] && [[ "$(yq e '.vars.REPOSITORY_SUBTYPE' Taskfile.yml)" != "null" ]]
test:
summary: |
This task prompts the user for the `test` command to place in `scripts.test` inside
of `package.json`.
For the default value, it looks at the corresponding common respoitory by first checking the
`project-subtype` folder and then the `project` folder's value for `scriptsTest` inside of the
`.config/variables.json` file.
cmds:
- .config/log prompt 'Enter the test command placed in `scripts.test` inside of `package.json`'
- |
TEST_ANSWER="$(.config/log input 'Enter test command..')"
task boilerplate:prompt:test:continue -- "$TEST_ANSWER"
status:
- |
[[ "$(jq -r '.scripts.test' package.json)" != "null" ]]
test:continue:
cmds:
- TMP="$(mktemp)" && jq --arg cmd '{{.CLI_ARGS | replace "'" "\'"}}' '.scripts.test = $cmd' package.json && mv "$TMP" package.json
title:
cmds:
- .config/log prompt 'Enter the title of the README.md'
- |
TITLE_ANSWER="$(.config/log input 'Enter README.md title..')"
task boilerplate:prompt:title:continue -- "$TITLE_ANSWER"
status:
- |
[[ "$(jq -r '.blueprint.title' package.json)" != "null" ]]
title:continue:
cmds:
- TMP="$(mktemp)" && jq --arg title '{{.CLI_ARGS | replace "'" "\'"}}' '.blueprint.title = $title' package.json && mv "$TMP" package.json

View file

@ -0,0 +1,136 @@
---
version: '3'
tasks:
check:package:
interactive: true
deps:
- :install:modules:local
- :install:software:jq
vars:
BLUEPRINT_REQUIRED_FIELDS: title description group name overview slug subgroup
run: once
log:
error: Error occurred while validating/prompting for blueprint settings
start: Ensuring required fields in the blueprint section of `package.json` are present
succes: Successfully ensured `package.json` minimum blueprint requirements are present
cmds:
- task: prime:package
- task: ensure:gitlab-ci
- task: :boilerplate:prompt:all
- task: :boilerplate:populate:type
- task: update:taskfile
clean:
deps:
- :install:software:jq
log:
error: Failed to clean `package.json`
start: Cleaning `package.json`
success: Cleaned `package.json`
cmds:
- |
TMP="$(mktemp)"
jq 'del(."standard-version")' package.json > "$TMP"
mv "$TMP" package.json
- |
TMP="$(mktemp)"
jq 'del(."lint-staged")' package.json > "$TMP"
mv "$TMP" package.json
ensure:gitlab-ci:
run: once
cmds:
- |
if [ ! -f .gitlab-ci.yml ]; then
echo '---' > .gitlab-ci.yml
echo 'stages:' >> .gitlab-ci.yml
echo ' - lint' >> .gitlab-ci.yml
echo '' >> .gitlab-ci.yml
fi
prime:package:
deps:
- :install:software:jq
run: once
log:
error: Failed to merge shared `package.json` settings
start: Ensuring `package.json` has shared settings
success: Successfully merged shared `package.json` settings
cmds:
- curl -s https://gitlab.com/megabyte-labs/common/shared/-/raw/master/package.json > package-reference.json
- task: prime:package:ensure-deps
vars:
PKG_FILE: package.json
- task: prime:package:ensure-deps
vars:
PKG_FILE: package-reference.json
- |
DEPS="$(jq -s '.[0].dependencies * .[1].dependencies' package-reference.json package.json)"
DEV_DEPS="$(jq -s '.[0].devDependencies * .[1].devDependencies' package-reference.json package.json)"
OPT_DEPS="$(jq -s '.[0].optionalDependencies * .[1].optionalDependencies' package-reference.json package.json)"
ESLINT_CONFIG="$(jq -r '.eslintConfig.extends' package-reference.json)"
PRETTIER_CONFIG="$(jq -r '.prettier' package-reference.json)"
TMP="$(mktemp)"
jq --arg deps "$DEPS" --arg devDeps "$DEV_DEPS" --arg optDeps "$OPT_DEPS" --arg eslint "$ESLINT_CONFIG" \
--arg prettier "$PRETTIER_CONFIG" '.dependencies = ($deps | fromjson) | .devDependencies = ($devDeps
| fromjson) | .optionalDependencies = ($optDeps | fromjson) | .eslintConfig.extends = $eslint
| .prettier = $prettier' package.json > "$TMP"
mv "$TMP" package.json
- rm package-reference.json
prime:package:ensure-deps:
deps:
- :install:software:jq
run: once
cmds:
- |
if [ "$(jq -r '.dependencies' {{.PKG_FILE}})" == 'null' ]; then
TMP="$(mktemp)"
jq '.dependencies = {}' {{.PKG_FILE}} > "$TMP"
mv "$TMP" {{.PKG_FILE}}
fi
- |
if [ "$(jq -r '.devDependencies' {{.PKG_FILE}})" == 'null' ]; then
TMP="$(mktemp)"
jq '.devDependencies = {}' {{.PKG_FILE}} > "$TMP"
mv "$TMP" {{.PKG_FILE}}
fi
- |
if [ "$(jq -r '.optionalDependencies' {{.PKG_FILE}})" == 'null' ]; then
TMP="$(mktemp)"
jq '.optionalDependencies = {}' {{.PKG_FILE}} > "$TMP"
mv "$TMP" {{.PKG_FILE}}
fi
update:taskfile:
deps:
- :install:software:yq
run: once
log:
error: Error encountered while ensuring `Taskfile.yml` has correct settings
start: Ensuring `Taskfile.yml` has correct settings
success: Successfully applied `Taskfile.yml` assurances
cmds:
- |
GROUP="$(jq -r '.blueprint.group' package.json)"
SUBGROUP="$(jq -r '.blueprint.subgroup' package.json)"
TASK_GROUP="$(yq eval '.vars.REPOSITORY_TYPE' Taskfile.yml)"
TASK_SUBGROUP="$(yq eval '.vars.REPOSITORY_SUBTYPE' Taskfile.yml)"
if [ "$GROUP" != "$TASK_GROUP" ]; then
yq e -i ".vars.REPOSITORY_TYPE = \"$GROUP\"" Taskfile.yml
fi
if [ "$SUBGROUP" != "$TASK_SUBGROUP" ]; then
yq e -i ".vars.REPOSITORY_SUBTYPE = \"$SUBGROUP\"" Taskfile.yml
fi
UPSTREAM='upstream:project'
if [ '{{.REPOSITORY_TYPE}}.{{.REPOSITORY_SUBTYPE}}' == 'common.shared' ]; then
UPSTREAM='upstream:shared'
elif [ '{{.REPOSITORY_TYPE}}.{{.REPOSITORY_SUBTYPE}}' == 'documentation.shared' ]; then
UPSTREAM='upstream:commondocs'
elif [ '{{.REPOSITORY_TYPE}}' == 'common' ]; then
UPSTREAM='upstream:common'
elif [ '{{.REPOSITORY_TYPE}}' == 'documentation' ]; then
UPSTREAM='upstream:docs'
fi
yq e -i ".tasks.start.cmds[0].task = \"$UPSTREAM\"" Taskfile.yml

View file

@ -0,0 +1,45 @@
---
version: '3'
tasks:
actions:test:
deps:
- :install:software:act
- :install:software:docker
desc: Locally test the on-push GitHub Action event (only works for Linux containers)
hide:
sh: '! test -d .github/workflows'
summary: |
# Test GitHub Actions
This task ensures Docker and Act are installed. It then uses Act to locally
test Linux-based on-push GitHub Action events.
See [Act's README.md](https://github.com/nektos/act) for more information.
log:
error: Error encountered while testing GitHub Actions locally with `act`
start: Testing GitHub Actions locally with `act`
success: Completed local GitHub Actions test
cmds:
- act
synchronize:
deps:
- :install:software:git
summary: |
Forces a push to the GitHub master branch so that GitHub stays mirrored with
the GitLab master branch.
env:
GITHUB_HTTP_REPO:
sh: jq -r '.blueprint.repository.github' package.json
cmds:
- cmd: |
if [ "$GITHUB_HTTP_REPO" != 'null' ]; then
GITHUB_REPO_WITH_TOKEN="$(echo "$GITHUB_HTTP_REPO" | sed "s/github.com/${GITHUB_TOKEN}@github.com/")"
git remote add github "${GITHUB_REPO_WITH_TOKEN}.git"
git fetch --unshallow origin
git push github master --force
else
.config/log warn 'The .blueprint.repository.github field is missing! Cannot synchronize to GitHub.'
fi
ignore_error: true

View file

@ -0,0 +1,190 @@
---
version: '3'
tasks:
before:
deps:
- :install:software:git
cmds:
- task: commit:config
- task: checkout
- task: lockfiles
- task: before:npm
status:
- '[ -z "$CI" ]'
before:npm:
deps:
- :install:npm:pnpm
log:
error: Error encountered while configuring pnpm to store its cache in `.pnpm-store`
start: Configuring pnpm to store its cache in `.pnpm-store`
success: Successfully updated pnpm to store its cache in `.pnpm-store`
cmds:
- pnpm config set store-dir .pnpm-store
checkout:
deps:
- :install:software:git
log:
error: Failed to pull latest changes
start: Pulling latest changes
success: Successfully pulled latest changes
cmds:
- cmd: git pull
ignore_error: true
- |
if [ "$CI_COMMIT_REF_NAME" == 'synchronize' ]; then
git checkout master
git pull origin master
else
git checkout "$CI_COMMIT_REF_NAME"
git pull origin "$CI_COMMIT_REF_NAME"
fi
status:
- '[ -n "$NO_GIT_CREDS" ]'
codeclimate:
deps:
- :install:software:docker
log:
error: Encountered error while running CodeClimate for CI
start: Running CodeClimate for CI
success: Successfully finished running CodeClimate for CI
cmds:
- |
if [ -f .config/codeclimate.yml ]; then
cp .config/codeclimate.yml .codeclimate.yml
else
curl -sSL https://gitlab.com/megabyte-labs/common/shared/-/raw/master/.config/codeclimate.yml > .codeclimate.yml
fi
- docker pull megabytelabs/codeclimate-internet
- docker tag megabytelabs/codeclimate-internet codeclimate/codeclimate
- task: :lint:codeclimate:load:custom-engines
- task: codeclimate:gitlab
codeclimate:gitlab:
cmds:
- docker run --rm --env CODECLIMATE_CODE="$PWD" --volume "$PWD":/code --volume /var/run/docker.sock:/var/run/docker.sock
--volume /tmp/cc:/tmp/cc codeclimate/codeclimate analyze --dev -f json > gl-code-quality-report.json
status:
- '[ -z "$GITLAB_CI" ]'
commit:
deps:
- :install:software:git
vars:
CURR_BRANCH:
sh: git rev-parse --abbrev-ref HEAD
log:
error: Encountered error while pushing changes to {{.CURR_BRANCH}}
start: Bypassing git hooks and pushing changes to {{.CURR_BRANCH}} (if there are any changes)
cmds:
- task: commit:config
- task: lockfiles:clean
- task --list > /dev/null || (echo "ERROR Invalid Taskfiles!" && exit 1)
- git add --all
- git diff --cached "*"
- |
if [[ $(git status --porcelain) ]]; then
git commit -m "☁️ chore(automation): Applying changes from upstream repository."
git push -q -o ci.skip origin {{.CURR_BRANCH}} || FAILED_PUSH=$?
if [ -n "$FAILED_PUSH" ]; then
git pull -X theirs origin {{.CURR_BRANCH}}
task --list &> /dev/null || (echo "ERROR: Invalid Taskfiles!" && exit 1)
git add --all
git commit -m "☁️ chore(automation): Merging changes from upstream repository with -X theirs."
git push --force -q -o ci.skip origin {{.CURR_BRANCH}}
fi
fi
status:
- '[ -z "$CI" ] || [ -n "$NO_GIT_CREDS" ]'
commit:config:
deps:
- :install:software:git
run: once
log:
error: Error configuring git
start: Configuring git to use CI-friendly parameters
success: Successfully configured git to use CI parameters
cmds:
- git remote set-url origin "https://root:$GROUP_ACCESS_TOKEN@$CI_SERVER_HOST/$CI_PROJECT_PATH.git"
- git config user.email "$GITLAB_CI_EMAIL"
- git config user.name "$GITLAB_CI_NAME"
status:
- '[ -z "$GROUP_ACCESS_TOKEN" ] || [ -z "$GITLAB_CI_EMAIL" ] || [ -z "$GITLAB_CI_NAME" ] || [ -z "$GITLAB_CI" ]'
lockfiles:
cmds:
- |
if [ -f local/package-lock.json ]; then
cp local/package-lock.json package-lock.json
fi
- |
if [ -f local/yarn.lock ]; then
cp local/yarn.lock yarn.lock
fi
lockfiles:clean:
cmds:
- |
if [ -f local/package-lock.json ]; then
rm -f package-lock.json
fi
- |
if [ -f local/yarn.lock ];then
rm -f yarn.lock
fi
mirror:github:
deps:
- :donothing
submodules:
deps:
- :install:software:git
log:
error: Encountered error while ensuring submodules are up-to-date
start: Ensuring submodules are configured and up-to-date with their master remote
success: Ensured submodules are up-to-date
cmds:
- >
git submodule foreach 'git config user.email "$GITLAB_CI_EMAIL"; git config user.name "$GITLAB_CI_NAME";
DEFAULT_BRANCH="$(git symbolic-ref refs/remotes/origin/HEAD | sed 's@^refs/remotes/origin/@@')";
git checkout -q "$DEFAULT_BRANCH"; git pull -q origin "$DEFAULT_BRANCH" --ff-only'
status:
- '[ -z "$CI" ] || [ -n "$NO_GIT_CREDS" ]'
synchronize:
deps:
- commit:config
run: once
log:
error: Failed to update the `synchronize` branch
start: Synchronizing the `synchronize` branch with the `master` branch
success: Successfully updated the `synchronize` branch
cmds:
- git fetch origin
- git checkout -b synchronize || git checkout synchronize
- git reset --hard HEAD
- git pull -q origin master
- |
git push --force -q -o ci.skip origin synchronize || FAILED_SYNC=$?
if [ -n "$FAILED_SYNC" ]; then
task git:gitlab:protected:off -- "synchronize"
git push --force -q -o ci.skip origin synchronize
fi
- task: synchronize:gitlab
- git checkout master
- task: :ci:github:synchronize
status:
- '[ -n "$NO_GIT_CREDS" ] || ([ "$FULLY_AUTOMATED_TASKS" != "true" ] && [ -z "$CI" ])'
synchronize:gitlab:
run: once
cmds:
- curl -s --request POST --form "token=${CI_JOB_TOKEN}" --form ref=master --form "variables[PIPELINE_SOURCE]=$PIPELINE_SOURCE"
"https://gitlab.com/api/v4/projects/${CI_PROJECT_ID}/trigger/pipeline"
status:
- '[ -z "$GITLAB_CI" ] || [ -n "$NO_GITLAB_SYNCHRONIZE" ]'

View file

@ -0,0 +1,19 @@
---
version: '3'
tasks:
cloudflare:dns:
summary: |
This task sets up CNAME Record pointing to the given Value, in Cloudflare DNS. This is primarily used in
conjunciton with Heroku tasks to setup custom domains. These variables are needed:
CONFIGURE_CLOUDFLARE_DNS: Set to `true` to configure Cloudflare DNS
CLOUDFLARE_DNS_ZONE: Name of the DNS Zone where the record should be added to
CLOUDFLARE_API_TOKEN: API Token to authenticate to Cloudflare
CLOUDFLARE_RECORD_NAME: The name of the record
CLOUDFLARE_RECORD_VALUE: Target for the CNAME record
cmds:
- |
{{if eq .CONFIGURE_CLOUDFLARE_DNS "true")}}curl -X POST "https://api.cloudflare.com/client/v4/zones/{{.CLOUDFLARE_DNS_ZONE}}/dns_records/" \
-H "Authorization: Bearer {{.CLOUDFLARE_API_TOKEN}}" \
-H "Content-Type: application/json" \
--data '{"type":"CNAME","name":"{{.CLOUDFLARE_RECORD_NAME}}","content":"{{.CLOUDFLARE_RECORD_VALUE}}","proxied":true,"ttl":3600}'{{end}}

View file

@ -0,0 +1,591 @@
---
version: '3'
vars:
CLOUDFLARE_API_TOKEN: '' # Needed when `CONFIGURE_CLOUDFLARE_DNS` is set to `true`
CLOUDFLARE_DNS_ZONE: '' # Needed when `CONFIGURE_CLOUDFLARE_DNS` is set to `true`
CONFIGURE_CLOUDFLARE_DNS: false # Set to `true` to configure Cloudflare DNS to point to the custom domain. Provide values for the below variables when set to `true`
CONFIGURE_CUSTOM_DOMAIN: false # Used in conjunction with `DOMAIN` value set for each application. Set to `true` to configure custom domains
env:
REGION: us # The region to deploy the application to
tasks:
appsmith:
summary: |
Task to deploy `appsmith` to Heroku.
Below are the environment variables that can be configured. Set the values in the `env` section.
APPSMITH_DISABLE_TELEMETRY: Share anonymous usage data
APPSMITH_ENCRYPTION_PASSWORD: Encryption password to encrypt all sensitive credentials in the database. You can use any random string. The more random, the better.
APPSMITH_ENCRYPTION_SALT: Encryption salt used to encrypt all sensitive credentials in the database. You can use any random string. The more random, the better.
APPSMITH_MONGODB_URI: Your Mongo Database URI. Since Heroku doesn't support a managed MongoDB instance, you'll have to create a Mongo DB instance on another service such as https://cloud.mongodb.com
APPSMITH_SUPERVISOR_PASSWORD: Basic authentication password to access Supervisor UI - An web interface, which allow you to manage various process
The below variable can be used to pass the value of Custom Domain for the application
DOMAIN: The custom domain to be added to the application. Set this to a valid value to add the domain (`CONFIGURE_CUSTOM_DOMAIN` should be set to `true`).
vars:
DOMAIN: ''
env:
APPSMITH_DISABLE_TELEMETRY: true
APPSMITH_ENCRYPTION_PASSWORD: 'kna%si*sj19lk>0s'
APPSMITH_ENCRYPTION_SALT: 'm,a-01s'
APPSMITH_MONGODB_URI: mongo.example.com
APPSMITH_SUPERVISOR_PASSWORD: "sdf'6as9I1a"
cmds:
- |
RNDM=$(shuf -i 10000-1000000 -n 1)
heroku api POST /app-setups --body '{\"app\":{\"region\":\"$REGION\",\"name\":\"appsmith-$RNDM\"},\
\"source_blob\":{\"url\":\"https://api.github.com/repos/appsmithorg/appsmith/tarball/master\"},\
\"overrides\":{\"env\":{\"APPSMITH_DISABLE_TELEMETRY\": $APPSMITH_DISABLE_TELEMETRY, \"APPSMITH_ENCRYPTION_PASSWORD\":\
\"$APPSMITH_ENCRYPTION_PASSWORD\", \"APPSMITH_ENCRYPTION_SALT\": \"$APPSMITH_ENCRYPTION_SALT\", \"APPSMITH_MONGODB_URI\":\
\"$APPSMITH_MONGODB_URI\", \"APPSMITH_SUPERVISOR_PASSWORD\": \"$APPSMITH_SUPERVISOR_PASSWORD\"}}}'
{{if and (eq .CONFIGURE_CUSTOM_DOMAIN "true") (ne .DOMAIN "")}}heroku domains:add {{.DOMAIN}} -a appsmith-$RNDM {{end}}
- task: cloudflare:dns
vars:
CLOUDFLARE_API_TOKEN: '{{.CLOUDFLARE_API_TOKEN}}'
CLOUDFLARE_DNS_ZONE: '{{.CLOUDFLARE_DNS_ZONE}}'
CLOUDFLARE_RECORD_NAME: appsmith
CLOUDFLARE_RECORD_VALUE: '{{.DOMAIN}}'
CONFIGURE_CLOUDFLARE_DNS: '{{.CONFIGURE_CLOUDFLARE_DNS}}'
baserow:
summary: |
Task to deploy `baserow` to Heroku.
Below are the environment variables that can be configured. Set the values in the `env` section.
BASEROW_PUBLIC_URL: The public URL of your Heroku Baserow app. If empty, the default Heroku app URL is used, but if it differs it must be changed. (eg. https://baserow-test.com).
BASEROW_AMOUNT_OF_WORKERS: The amount of workers per dyno.
AWS_ACCESS_KEY_ID: The spaces API key.
AWS_SECRET_ACCESS_KEY: The spaces API secret key.
AWS_STORAGE_BUCKET_NAME: The name of your space.
AWS_S3_REGION_NAME: Name of the Digital Ocean spaces region (eg. ams3) or Name of the AWS S3 region to use (eg. eu-west-1)
AWS_S3_ENDPOINT_URL: Custom S3 URL to use when connecting to S3, including scheme.
AWS_S3_CUSTOM_DOMAIN: Your custom domain where the files can be downloaded from.
The below variable can be used to pass the value of Custom Domain for the application
DOMAIN: The custom domain to be added to the application. Set this to a valid value to add the domain (`CONFIGURE_CUSTOM_DOMAIN` should be set to `true`).
vars:
DOMAIN: ''
env:
AWS_ACCESS_KEY_ID: ''
AWS_S3_CUSTOM_DOMAIN: ''
AWS_S3_ENDPOINT_URL: ''
AWS_S3_REGION_NAME: ''
AWS_SECRET_ACCESS_KEY: ''
AWS_STORAGE_BUCKET_NAME: ''
BASEROW_AMOUNT_OF_WORKERS: '1'
BASEROW_PUBLIC_URL: https://baserow.megabyte.space
cmds:
- task: bucket:create
vars:
AWS_ACCESS_KEY_ID: '{{.AWS_ACCESS_KEY_ID}}'
AWS_REGION: '{{.AWS_S3_REGION_NAME}}'
AWS_SECRET_ACCESS_KEY: '{{.AWS_SECRET_ACCESS_KEY}}'
BUCKET_NAME: '{{.AWS_STORAGE_BUCKET_NAME}}'
- |
RNDM=$(shuf -i 10000-1000000 -n 1)
heroku api POST /app-setups --body '{\"app\":{\"region\":\"$REGION\",\"name\":\"baserow-$RNDM\"},\
\"source_blob\":{\"url\":\"https://api.github.com/repos/bram2w/baserow/tarball/master\"},\
\"overrides\":{\"env\":{{\"BASEROW_PUBLIC_URL\":\"$BASEROW_PUBLIC_URL\",\"BASEROW_AMOUNT_OF_WORKERS\":\
\"$BASEROW_AMOUNT_OF_WORKERS\",\"AWS_ACCESS_KEY_ID\":\"$AWS_ACCESS_KEY_ID\",\"AWS_SECRET_ACCESS_KEY\":\
\"$AWS_SECRET_ACCESS_KEY\",\"AWS_STORAGE_BUCKET_NAME\":\"$AWS_STORAGE_BUCKET_NAME\",\"AWS_S3_REGION_NAME\":\
\"$AWS_S3_REGION_NAME\",\"AWS_S3_ENDPOINT_URL\":\"$AWS_S3_ENDPOINT_URL\",\"AWS_S3_CUSTOM_DOMAIN\":\"$AWS_S3_CUSTOM_DOMAIN\"}}}'
{{if and (eq .CONFIGURE_CUSTOM_DOMAIN "true") (ne .DOMAIN "")}}heroku domains:add {{.DOMAIN}} -a baserow-$RNDM {{end}}
- task: cloudflare:dns
vars:
CLOUDFLARE_API_TOKEN: '{{.CLOUDFLARE_API_TOKEN}}'
CLOUDFLARE_DNS_ZONE: '{{.CLOUDFLARE_DNS_ZONE}}'
CLOUDFLARE_RECORD_NAME: baserow
CLOUDFLARE_RECORD_VALUE: '{{.DOMAIN}}'
CONFIGURE_CLOUDFLARE_DNS: '{{.CONFIGURE_CLOUDFLARE_DNS}}'
chatwoot:
summary: |
Task to deploy `chatwoot` to Heroku.
Below are the environment variables that can be configured. Set the values in the `env` section.
FRONTEND_URL: Public root URL of the Chatwoot installation. This will be used in the emails.
INSTALLATION_ENV: Installation method used for Chatwoot.
RACK_ENV: Environment for rack middleware.
RAILS_ENV: Environment for rails middleware.
REDIS_OPENSSL_VERIFY_MODE: OpenSSL verification mode for Redis connections. Ref https://help.heroku.com/HC0F8CUS/redis-connection-issues
The below variable can be used to pass the value of Custom Domain for the application
DOMAIN: The custom domain to be added to the application. Set this to a valid value to add the domain (`CONFIGURE_CUSTOM_DOMAIN` should be set to `true`).
vars:
DOMAIN: ''
env:
FRONTEND_URL: https://chatwoot.megabyte.space
INSTALLATION_ENV: heroku
RACK_ENV: production
RAILS_ENV: production
REDIS_OPENSSL_VERIFY_MODE: none
cmds:
- |
RNDM=$(shuf -i 10000-1000000 -n 1)
heroku api POST /app-setups --body '{\"app\":{\"region\":\"$REGION\",\"name\":\"chatwoot-$RNDM\"},\
\"source_blob\":{\"url\":\"https://api.github.com/repos/chatwoot/chatwoot/tarball/master\"},\
\"overrides\":{\"env\":{\"FRONTEND_URL\": \"$FRONTEND_URL\", \"INSTALLATION_ENV\": \"$INSTALLATION_ENV\", \"RACK_ENV\":\
\"$RACK_ENV\", \"RAILS_ENV\": \"$RAILS_ENV\", \"REDIS_OPENSSL_VERIFY_MODE\": \"$REDIS_OPENSSL_VERIFY_MODE\"}}}'
{{if and (eq .CONFIGURE_CUSTOM_DOMAIN "true") (ne .DOMAIN "")}}heroku domains:add {{.DOMAIN}} -a chatwoot-$RNDM {{end}}
- task: cloudflare:dns
vars:
CLOUDFLARE_API_TOKEN: '{{.CLOUDFLARE_API_TOKEN}}'
CLOUDFLARE_DNS_ZONE: '{{.CLOUDFLARE_DNS_ZONE}}'
CLOUDFLARE_RECORD_NAME: chatwoot
CLOUDFLARE_RECORD_VALUE: '{{.DOMAIN}}'
CONFIGURE_CLOUDFLARE_DNS: '{{.CONFIGURE_CLOUDFLARE_DNS}}'
directus:
summary: |
Task to deploy `directus` to Heroku.
Below are the environment variables that can be configured. Set the values in the `env` section.
ACCESS_TOKEN_TTL: The access token TTL.
ADMIN_EMAIL: The initial admin email.
ADMIN_PASSWORD: The initial admin password.
CACHE_ENABLED: Whether the cache should be enabled or not.
CACHE_NAMESPACE: The cache namespace.
CACHE_STORE: The cache store to use.
CONFIG_PATH: Application config loader path.
DB_CLIENT: Database server type.
EMAIL_SMTP_HOST: The email server host.
EMAIL_SMTP_POOL: Whether to setup smtp pooling or not.
EMAIL_SMTP_PORT: The email server port.
EMAIL_SMTP_SECURE: Whether email connection is secure or not.
EMAIL_TRANSPORT: The email transport.
EXTENSIONS_PATH: The application's extension folder.
OAUTH_PROVIDERS: OAuth providers.
PUBLIC_URL: Application public URL.
RATE_LIMITER_DURATION: Rate limiter duration in minutes.
RATE_LIMITER_ENABLED: Whether the rate limiter should be enabled or not.
RATE_LIMITER_KEY_PREFIX: The rate limiter key prefixes.
RATE_LIMITER_POINTS: Rate limiter points.
RATE_LIMITER_STORE: The rate limiter storage type.
REFRESH_TOKEN_COOKIE_SAME_SITE: Same site cookie policy.
REFRESH_TOKEN_COOKIE_SECURE: Whether cookies should be secure (http-only) or not.
REFRESH_TOKEN_TTL: The refresh token TTL.
STORAGE_CLOUD_BUCKET: The storage bucket name.
STORAGE_CLOUD_DRIVER: The storage driver.
STORAGE_CLOUD_ENDPOINT: The storage endpoint URL.
STORAGE_CLOUD_KEY: The storage key id.
STORAGE_CLOUD_PUBLIC_URL: The storage key id.
STORAGE_CLOUD_REGION: The storage bucket region.
STORAGE_CLOUD_ROOT: Storage root location.
STORAGE_CLOUD_SECRET: The storage secret key.
STORAGE_LOCATIONS: The storage key. Please refer to the docs for more information.
The below variable can be used to pass the value of Custom Domain for the application
DOMAIN: The custom domain to be added to the application. Set this to a valid value to add the domain (`CONFIGURE_CUSTOM_DOMAIN` should be set to `true`).
vars:
DOMAIN: ''
env:
ACCESS_TOKEN_TTL: 15m
ADMIN_EMAIL: admin@email.com
ADMIN_PASSWORD: RandomPasword$
CACHE_ENABLED: true
CACHE_NAMESPACE: cache
CACHE_STORE: redis
CONFIG_PATH: /app/directus.config.js
DB_CLIENT: pg
EMAIL_SMTP_HOST: smtp.example.com
EMAIL_SMTP_POOL: true
EMAIL_SMTP_PORT: '587'
EMAIL_SMTP_SECURE: false
EMAIL_TRANSPORT: smtp
EXTENSIONS_PATH: /app/extensions
OAUTH_PROVIDERS: ''
PUBLIC_URL: /
RATE_LIMITER_DURATION: '1'
RATE_LIMITER_ENABLED: true
RATE_LIMITER_KEY_PREFIX: rate-limitter
RATE_LIMITER_POINTS: '30'
RATE_LIMITER_STORE: redis
REFRESH_TOKEN_COOKIE_SAME_SITE: true
REFRESH_TOKEN_COOKIE_SECURE: true
REFRESH_TOKEN_TTL: 7d
STORAGE_CLOUD_BUCKET: your-bucket
STORAGE_CLOUD_DRIVER: s3
STORAGE_CLOUD_ENDPOINT: https://nyc3.digitaloceanspaces.com
STORAGE_CLOUD_KEY: your-s3-key-id
STORAGE_CLOUD_PUBLIC_URL: https://your-bucket.nyc3.digitaloceanspaces.com
STORAGE_CLOUD_REGION: nyc3
STORAGE_CLOUD_ROOT: /
STORAGE_CLOUD_SECRET: your-s3-secret-key
STORAGE_LOCATIONS: cloud
cmds:
- task: bucket:create
vars:
AWS_ACCESS_KEY_ID: '{{.STORAGE_CLOUD_KEY}}'
AWS_REGION: '{{.STORAGE_CLOUD_REGION}}'
AWS_SECRET_ACCESS_KEY: '{{.STORAGE_CLOUD_SECRET}}'
BUCKET_NAME: '{{.STORAGE_CLOUD_BUCKET}}'
- |
RNDM=$(shuf -i 10000-1000000 -n 1)
heroku api POST /app-setups --body '{\"app\":{\"region\":\"$REGION\",\"name\":\"directus-$RNDM\"},\
\"source_blob\":{\"url\":\"https://api.github.com/repos/directus-community/heroku-template/tarball/master\"},\
\"overrides\":{\"env\":{\"ACCESS_TOKEN_TTL\": \"$ACCESS_TOKEN_TTL\", \"ADMIN_EMAIL\": \"$ADMIN_EMAIL\", \"ADMIN_PASSWORD\":\
\"$ADMIN_PASSWORD$\", \"CACHE_ENABLED\": $CACHE_ENABLED, \"CACHE_NAMESPACE\": \"$CACHE_NAMESPACE\", \"CACHE_STORE\":\
\"$CACHE_STORE\", \"CONFIG_PATH\": \"$CONFIG_PATH\", \"DB_CLIENT\": \"$DB_CLIENT\", \"EMAIL_SMTP_HOST\": \"$EMAIL_SMTP_HOST\",\
\"EMAIL_SMTP_POOL\": $EMAIL_SMTP_POOL, \"EMAIL_SMTP_PORT\": \"$EMAIL_SMTP_PORT\", \"EMAIL_SMTP_SECURE\": $EMAIL_SMTP_SECURE,\
\"EMAIL_TRANSPORT\": \"$EMAIL_TRANSPORT\", \"EXTENSIONS_PATH\": \"$EXTENSIONS_PATH\", \"OAUTH_PROVIDERS\": \"$OAUTH_PROVIDERS\",\
\"PUBLIC_URL\": \"$PUBLIC_URL\", \"RATE_LIMITER_DURATION\": \"$RATE_LIMITER_DURATION\", \"RATE_LIMITER_ENABLED\": $RATE_LIMITER_ENABLED,\
\"RATE_LIMITER_KEY_PREFIX\": \"$RATE_LIMITER_KEY_PREFIX\", \"RATE_LIMITER_POINTS\": \"$RATE_LIMITER_POINTS\", \"RATE_LIMITER_STORE\":\
\"$RATE_LIMITER_STORE\", \"REFRESH_TOKEN_COOKIE_SAME_SITE\": $REFRESH_TOKEN_COOKIE_SAME_SITE, \"REFRESH_TOKEN_COOKIE_SECURE\":\
\"$REFRESH_TOKEN_COOKIE_SECURE\", \"REFRESH_TOKEN_TTL\": \"$REFRESH_TOKEN_TTL\", \"STORAGE_CLOUD_BUCKET\": \"$STORAGE_CLOUD_BUCKET\",\
\"STORAGE_CLOUD_DRIVER\": \"$STORAGE_CLOUD_DRIVER\", \"STORAGE_CLOUD_ENDPOINT\": \"$STORAGE_CLOUD_ENDPOINT\", \"STORAGE_CLOUD_KEY\":\
\"$STORAGE_CLOUD_KEY\", \"STORAGE_CLOUD_PUBLIC_URL\": \"$STORAGE_CLOUD_PUBLIC_URL\", \"STORAGE_CLOUD_REGION\": \"$STORAGE_CLOUD_REGION\",\
\"STORAGE_CLOUD_ROOT\": \"$STORAGE_CLOUD_ROOT\", \"STORAGE_CLOUD_SECRET\": \"$STORAGE_CLOUD_SECRET\", \"STORAGE_LOCATIONS\": \"$STORAGE_LOCATIONS\"}}}'
{{if and (eq .CONFIGURE_CUSTOM_DOMAIN "true") (ne .DOMAIN "")}}heroku domains:add {{.DOMAIN}} -a directus-$RNDM {{end}}
- task: cloudflare:dns
vars:
CLOUDFLARE_API_TOKEN: '{{.CLOUDFLARE_API_TOKEN}}'
CLOUDFLARE_DNS_ZONE: '{{.CLOUDFLARE_DNS_ZONE}}'
CLOUDFLARE_RECORD_NAME: directus
CLOUDFLARE_RECORD_VALUE: '{{.DOMAIN}}'
CONFIGURE_CLOUDFLARE_DNS: '{{.CONFIGURE_CLOUDFLARE_DNS}}'
ghostonheroku:
summary: |
Task to deploy `ghost-on-heroku` to Heroku.
Below are the environment variables that can be configured. Set the values in the `env` section.
PUBLIC_URL: The HTTPS URL of this app: either your custom domain or default 'herokuapp.com' hostname.
S3_ACCESS_KEY_ID: Set your AWS Access Key ID to enable S3 file storage. Leave blank to disable file uploads.
S3_ACCESS_SECRET_KEY: AWS Access Secret Key, if using S3 file storage.
S3_ASSET_HOST_URL: Optional custom CDN asset host url, if using S3 file storage.
S3_BUCKET_NAME: Name of your S3 bucket on AWS, if using S3 file storage.
S3_BUCKET_REGION: Region of your S3 bucket on AWS, if using S3 file storage.
The below variable can be used to pass the value of Custom Domain for the application
DOMAIN: The custom domain to be added to the application. Set this to a valid value to add the domain (`CONFIGURE_CUSTOM_DOMAIN` should be set to `true`).
vars:
DOMAIN: ''
env:
PUBLIC_URL: https://ghost.megabyte.space
S3_ACCESS_KEY_ID: S3 access key id
S3_ACCESS_SECRET_KEY: S3 secret access key
S3_ASSET_HOST_URL: e.g https://my.custom.domain/
S3_BUCKET_NAME: S3 bucket
S3_BUCKET_REGION: S3 bucket region (e.g. us-east-1)
cmds:
- task: bucket:create
vars:
AWS_ACCESS_KEY_ID: '{{.S3_ACCESS_KEY_ID}}'
AWS_REGION: '{{.S3_BUCKET_REGION}}'
AWS_SECRET_ACCESS_KEY: '{{.S3_ACCESS_SECRET_KEY}}'
BUCKET_NAME: '{{.S3_BUCKET_NAME}}'
- |
RNDM=$(shuf -i 10000-1000000 -n 1)
heroku api POST /app-setups --body '{\"app\":{\"region\":\"$REGION\",\"name\":\"ghostonheroku-$RNDM\"},\
\"source_blob\":{\"url\":\"https://api.github.com/repos/cobyism/ghost-on-heroku/tarball/master\"},\
\"overrides\":{\"env\":{\"PUBLIC_URL\": \"$PUBLIC_URL\", \"S3_ACCESS_KEY_ID\": \"$S3_ACCESS_KEY_ID\",\
\"S3_ACCESS_SECRET_KEY\": \"$S3_ACCESS_SECRET_KEY\", \"S3_ASSET_HOST_URL\": \"$S3_ASSET_HOST_URL\",\
\"S3_BUCKET_NAME\": \"$S3_BUCKET_NAME\", \"S3_BUCKET_REGION\": \"$S3_BUCKET_REGION\"}}}'
{{if and (eq .CONFIGURE_CUSTOM_DOMAIN "true") (ne .DOMAIN "")}}heroku domains:add {{.DOMAIN}} -a ghostonheroku-$RNDM {{end}}
- task: cloudflare:dns
vars:
CLOUDFLARE_API_TOKEN: '{{.CLOUDFLARE_API_TOKEN}}'
CLOUDFLARE_DNS_ZONE: '{{.CLOUDFLARE_DNS_ZONE}}'
CLOUDFLARE_RECORD_NAME: ghostonheroku
CLOUDFLARE_RECORD_VALUE: '{{.DOMAIN}}'
CONFIGURE_CLOUDFLARE_DNS: '{{.CONFIGURE_CLOUDFLARE_DNS}}'
hasura:
summary: |
Task to deploy `hasura` to Heroku.
The below variable can be used to pass the value of Custom Domain for the application
DOMAIN: The custom domain to be added to the application. Set this to a valid value to add the domain (`CONFIGURE_CUSTOM_DOMAIN` should be set to `true`).
vars:
DOMAIN: ''
cmds:
- |
RNDM=$(shuf -i 10000-1000000 -n 1)
heroku api POST /app-setups --body '{\"app\":{\"region\":\"$REGION\",\"name\":\"hasura-$RNDM\"},\
\"source_blob\":{\"url\":\"https://api.github.com/repos/hasura/graphql-engine-heroku/tarball/master\"}}'
{{if and (eq .CONFIGURE_CUSTOM_DOMAIN "true") (ne .DOMAIN "")}}heroku domains:add {{.DOMAIN}} -a hasura-$RNDM {{end}}
- task: cloudflare:dns
vars:
CLOUDFLARE_API_TOKEN: '{{.CLOUDFLARE_API_TOKEN}}'
CLOUDFLARE_DNS_ZONE: '{{.CLOUDFLARE_DNS_ZONE}}'
CLOUDFLARE_RECORD_NAME: hasura
CLOUDFLARE_RECORD_VALUE: '{{.DOMAIN}}'
CONFIGURE_CLOUDFLARE_DNS: '{{.CONFIGURE_CLOUDFLARE_DNS}}'
manet:
summary: |
Task to deploy `manet` to Heroku.
The below variable can be used to pass the value of Custom Domain for the application
DOMAIN: The custom domain to be added to the application. Set this to a valid value to add the domain (`CONFIGURE_CUSTOM_DOMAIN` should be set to `true`).
vars:
DOMAIN: ''
cmds:
- |
RNDM=$(shuf -i 10000-1000000 -n 1)
heroku api POST /app-setups --body '{\"app\":{\"region\":\"$REGION\",\"name\":\"manet-$RNDM\"},\
\"source_blob\":{\"url\":\"https://api.github.com/repos/vbauer/manet/tarball/master\"}}'
{{if and (eq .CONFIGURE_CUSTOM_DOMAIN "true") (ne .DOMAIN "")}}heroku domains:add {{.DOMAIN}} -a manet-$RNDM {{end}}
- task: cloudflare:dns
vars:
CLOUDFLARE_API_TOKEN: '{{.CLOUDFLARE_API_TOKEN}}'
CLOUDFLARE_DNS_ZONE: '{{.CLOUDFLARE_DNS_ZONE}}'
CLOUDFLARE_RECORD_NAME: manet
CLOUDFLARE_RECORD_VALUE: '{{.DOMAIN}}'
CONFIGURE_CLOUDFLARE_DNS: '{{.CONFIGURE_CLOUDFLARE_DNS}}'
metabase:
summary: |
Task to deploy `metabase` to Heroku.
The below variable can be used to pass the value of Custom Domain for the application
DOMAIN: The custom domain to be added to the application. Set this to a valid value to add the domain (`CONFIGURE_CUSTOM_DOMAIN` should be set to `true`).
vars:
DOMAIN: ''
cmds:
- |
RNDM=$(shuf -i 10000-1000000 -n 1)
heroku api POST /app-setups --body '{\"app\":{\"region\":\"$REGION\",\"name\":\"metabase-$RNDM\"},\
\"source_blob\":{\"url\":\"https://api.github.com/repos/metabase/metabase-deploy/tarball/master\"}}'
{{if and (eq .CONFIGURE_CUSTOM_DOMAIN "true") (ne .DOMAIN "")}}heroku domains:add {{.DOMAIN}} -a metabase-$RNDM {{end}}
- task: cloudflare:dns
vars:
CLOUDFLARE_API_TOKEN: '{{.CLOUDFLARE_API_TOKEN}}'
CLOUDFLARE_DNS_ZONE: '{{.CLOUDFLARE_DNS_ZONE}}'
CLOUDFLARE_RECORD_NAME: metabase
CLOUDFLARE_RECORD_VALUE: '{{.DOMAIN}}'
CONFIGURE_CLOUDFLARE_DNS: '{{.CONFIGURE_CLOUDFLARE_DNS}}'
nocodb:
summary: |
Task to deploy `nocodb` to Heroku.
Below are the environment variables that can be configured. Set the values in the `env` section.
NC_ONE_CLICK: Used for Heroku one-click deployment
NODE_TLS_REJECT_UNAUTHORIZED: Reject unauthorized
AWS_ACCESS_KEY_ID: For Litestream - S3 access key id
AWS_SECRET_ACCESS_KEY: For Litestream - S3 secret access key
AWS_BUCKET_REGION: Region where the bucket is present
AWS_BUCKET: For Litestream - S3 bucket
AWS_BUCKET_PATH: For Litestream - S3 bucket path (like folder within S3 bucket)
The below variable can be used to pass the value of Custom Domain for the application
DOMAIN: The custom domain to be added to the application. Set this to a valid value to add the domain (`CONFIGURE_CUSTOM_DOMAIN` should be set to `true`).
vars:
DOMAIN: ''
env:
AWS_ACCESS_KEY_ID: S3 access key id
AWS_BUCKET: S3 bucket
AWS_BUCKET_PATH: S3 bucket path (like folder within S3 bucket)
AWS_BUCKET_REGION: S3 Region
AWS_SECRET_ACCESS_KEY: S3 secret access key
NC_ONE_CLICK: true
NODE_TLS_REJECT_UNAUTHORIZED: '0'
cmds:
- task: bucket:create
vars:
AWS_ACCESS_KEY_ID: '{{.AWS_ACCESS_KEY_ID}}'
AWS_REGION: '{{.AWS_BUCKET_REGION}}'
AWS_SECRET_ACCESS_KEY: '{{.AWS_SECRET_ACCESS_KEY}}'
BUCKET_NAME: '{{.AWS_BUCKET}}'
- |
RNDM=$(shuf -i 10000-1000000 -n 1)
heroku api POST /app-setups --body '{\"app\":{\"region\":\"$REGION\",\"name\":\"nocodb-$RNDM\"},\
\"source_blob\":{\"url\":\"https://api.github.com/repos/nocodb/nocodb-seed-heroku/tarball/master\"},\
\"overrides\":{\"env\":{\"NC_ONE_CLICK\": $NC_ONE_CLICK, \"NODE_TLS_REJECT_UNAUTHORIZED\": \"$NODE_TLS_REJECT_UNAUTHORIZED\",\
\"AWS_ACCESS_KEY_ID\": \"$AWS_ACCESS_KEY_ID\", \"AWS_SECRET_ACCESS_KEY\": \"$AWS_SECRET_ACCESS_KEY\", \"AWS_BUCKET\":\
\"$AWS_BUCKET\", \"AWS_BUCKET_PATH\": \"$AWS_BUCKET_PATH\"}}}'
{{if and (eq .CONFIGURE_CUSTOM_DOMAIN "true") (ne .DOMAIN "")}}heroku domains:add {{.DOMAIN}} -a nocodb-$RNDM {{end}}
- task: cloudflare:dns
vars:
CLOUDFLARE_API_TOKEN: '{{.CLOUDFLARE_API_TOKEN}}'
CLOUDFLARE_DNS_ZONE: '{{.CLOUDFLARE_DNS_ZONE}}'
CLOUDFLARE_RECORD_NAME: nocodb
CLOUDFLARE_RECORD_VALUE: '{{.DOMAIN}}'
CONFIGURE_CLOUDFLARE_DNS: '{{.CONFIGURE_CLOUDFLARE_DNS}}'
tooljet:
summary: |
Task to deploy `tooljet` to Heroku.
Below are the environment variables that can be configured. Set the values in the `env` section.
DEPLOYMENT_PLATFORM: Platform ToolJet is deployed on
DISABLE_MULTI_WORKSPACE: Disables Multi-Workspace feature
DISABLE_SIGNUPS: Disable sign up in login page only applicable if Multi-Workspace feature is turned on
LOCKBOX_MASTER_KEY: Master key for encrypting datasource credentials.
NODE_ENV: Environment [production/development]
NODE_OPTIONS: Node options configured to increase node memory to support app build
SECRET_KEY_BASE: Used by ToolJet server as the input secret to the application's key generator.
TOOLJET_HOST: Public URL of ToolJet installtion. This is usually https://<app-name-in-first-step>.herokuapp.com
TOOLJET_SERVER_URL: URL of ToolJet server installtion. (This is same as the TOOLJET_HOST for Heroku deployments)
The below variable can be used to pass the value of Custom Domain for the application
DOMAIN: The custom domain to be added to the application. Set this to a valid value to add the domain (`CONFIGURE_CUSTOM_DOMAIN` should be set to `true`).
vars:
DOMAIN: ''
env:
DEPLOYMENT_PLATFORM: heroku
DISABLE_MULTI_WORKSPACE: false
DISABLE_SIGNUPS: false
LOCKBOX_MASTER_KEY: m@s73rk8s
NODE_ENV: production
NODE_OPTIONS: --max-old-space-size=4096
SECRET_KEY_BASE: SomeC0m6l00
TOOLJET_HOST: https://tooljet.herokuapp.com
TOOLJET_SERVER_URL: https://tooljet.herokuapp.com
cmds:
- |
RNDM=$(shuf -i 10000-1000000 -n 1)
heroku api POST /app-setups --body '{\"app\":{\"region\":\"$REGION\",\"name\":\"tooljet-$RNDM\"},\
\"source_blob\":{\"url\":\"https://api.github.com/repos/tooljet/tooljet/tarball/master\"},\
\"overrides\":{\"env\":{\"DEPLOYMENT_PLATFORM\": \"$DEPLOYMENT_PLATFORM\", \"DISABLE_MULTI_WORKSPACE\":\
\"$DISABLE_MULTI_WORKSPACE\", \"DISABLE_SIGNUPS\": $DISABLE_SIGNUPS, \"LOCKBOX_MASTER_KEY\":\
\"$LOCKBOX_MASTER_KEY\", \"NODE_ENV\": \"$NODE_ENV\", \"NODE_OPTIONS\": \"$NODE_OPTIONS\",\
\"SECRET_KEY_BASE\": \"$SECRET_KEY_BASE\", \"TOOLJET_HOST\": \"$TOOLJET_HOST\", \"TOOLJET_SERVER_URL\": \"$TOOLJET_SERVER_URL\"}}}'
{{if and (eq .CONFIGURE_CUSTOM_DOMAIN "true") (ne .DOMAIN "")}}heroku domains:add {{.DOMAIN}} -a tooljet-$RNDM {{end}}
- task: cloudflare:dns
vars:
CLOUDFLARE_API_TOKEN: '{{.CLOUDFLARE_API_TOKEN}}'
CLOUDFLARE_DNS_ZONE: '{{.CLOUDFLARE_DNS_ZONE}}'
CLOUDFLARE_RECORD_NAME: tooljet
CLOUDFLARE_RECORD_VALUE: '{{.DOMAIN}}'
CONFIGURE_CLOUDFLARE_DNS: '{{.CONFIGURE_CLOUDFLARE_DNS}}'
urltopdf:
summary: |
Task to deploy `url-to-pdf-api` to Heroku.
Below are the environment variables that can be configured. Set the values in the `env` section.
ALLOW_HTTP: When set to "true", unsecure requests are allowed
API_TOKENS: Comma-separated list of accepted keys in x-api-key header
The below variable can be used to pass the value of Custom Domain for the application
DOMAIN: The custom domain to be added to the application. Set this to a valid value to add the domain (`CONFIGURE_CUSTOM_DOMAIN` should be set to `true`).
vars:
DOMAIN: ''
env:
ALLOW_HTTP: false
API_TOKENS: ''
cmds:
- |
RNDM=$(shuf -i 10000-1000000 -n 1)
heroku api POST /app-setups --body "{\"app\":{\"region\":\"$REGION\",\"name\":\"urltopdf-$RNDM\"},\
\"source_blob\":{\"url\":\"https://api.github.com/repos/alvarcarto/url-to-pdf-api/tarball/master\"},\
\"overrides\":{\"env\":{\"ALLOW_HTTP\": $ALLOW_HTTP, \"API_TOKENS\": \"$API_TOKENS\"}}}"
{{if and (eq .CONFIGURE_CUSTOM_DOMAIN "true") (ne .DOMAIN "")}}heroku domains:add {{.DOMAIN}} -a urltopdf-$RNDM {{end}}
- task: cloudflare:dns
vars:
CLOUDFLARE_API_TOKEN: '{{.CLOUDFLARE_API_TOKEN}}'
CLOUDFLARE_DNS_ZONE: '{{.CLOUDFLARE_DNS_ZONE}}'
CLOUDFLARE_RECORD_NAME: urltopdf
CLOUDFLARE_RECORD_VALUE: '{{.DOMAIN}}'
CONFIGURE_CLOUDFLARE_DNS: '{{.CONFIGURE_CLOUDFLARE_DNS}}'
whoogle:
summary: |
Task to deploy `whoogle` to Heroku.
Below are the environment variables that can be configured. Set the values in the `env` section.
WHOOGLE_ALT_IG: The site to use as a replacement for instagram.com when site alternatives are enabled in the config.
WHOOGLE_ALT_IMG: The site to use as a replacement for imgur.com when site alternatives are enabled in the config.
WHOOGLE_ALT_MD: The site to use as a replacement for medium.com when site alternatives are enabled in the config.
WHOOGLE_ALT_RD: The site to use as a replacement for reddit.com when site alternatives are enabled in the config.
WHOOGLE_ALT_TL: The Google Translate alternative to use for all searches following the 'translate ___' structure.
WHOOGLE_ALT_TW: The site to use as a replacement for twitter.com when site alternatives are enabled in the config.
WHOOGLE_ALT_WIKI: The site to use as a replacement for wikipedia.com when site alternatives are enabled in the config.
WHOOGLE_ALT_YT: The site to use as a replacement for youtube.com when site alternatives are enabled in the config.
WHOOGLE_CONFIG_ALTS: [CONFIG] Use social media alternatives (set to 1 or leave blank)
WHOOGLE_CONFIG_BLOCK: [CONFIG] Block websites from search results (comma-separated list)
WHOOGLE_CONFIG_COUNTRY: [CONFIG] The country to use for restricting search results (use values from https://raw.githubusercontent.com/benbusby/whoogle-search/develop/app/static/settings/countries.json)
WHOOGLE_CONFIG_DISABLE: [CONFIG] Disable ability for client to change config (set to 1 or leave blank)
WHOOGLE_CONFIG_GET_ONLY: [CONFIG] Search using GET requests only (set to 1 or leave blank)
WHOOGLE_CONFIG_LANGUAGE: [CONFIG] The language to use for the interface (use values from https://raw.githubusercontent.com/benbusby/whoogle-search/develop/app/static/settings/languages.json)
WHOOGLE_CONFIG_NEAR: [CONFIG] Restrict results to only those near a particular city
WHOOGLE_CONFIG_NEW_TAB: [CONFIG] Always open results in new tab (set to 1 or leave blank)
WHOOGLE_CONFIG_SAFE: [CONFIG] Use safe mode for searches (set to 1 or leave blank)
WHOOGLE_CONFIG_SEARCH_LANGUAGE: [CONFIG] The language to use for search results (use values from https://raw.githubusercontent.com/benbusby/whoogle-search/develop/app/static/settings/languages.json)
WHOOGLE_CONFIG_STYLE: [CONFIG] Custom CSS styling (provide CSS or leave blank)
WHOOGLE_CONFIG_THEME: [CONFIG] Set theme to 'dark', 'light', or 'system'
WHOOGLE_CONFIG_TOR: [CONFIG] Use Tor, if available (set to 1 or leave blank)
WHOOGLE_CONFIG_VIEW_IMAGE: [CONFIG] Enable View Image option (set to 1 or leave blank)
WHOOGLE_MINIMAL: Remove everything except basic result cards from all search queries (set to 1 or leave blank)
WHOOGLE_PASS: The password for basic auth. WHOOGLE_USER must also be set if used. Leave empty to disable.
WHOOGLE_PROXY_LOC: The location of the proxy server (host or ip). Leave empty to disable.
WHOOGLE_PROXY_PASS: The password of the proxy server. Leave empty to disable.
WHOOGLE_PROXY_TYPE: The type of the proxy server. For example "socks5". Leave empty to disable.
WHOOGLE_PROXY_USER: The username of the proxy server. Leave empty to disable.
WHOOGLE_URL_PREFIX: The URL prefix to use for the whoogle instance (i.e. "/whoogle")
WHOOGLE_USER: The username for basic auth. WHOOGLE_PASS must also be set if used. Leave empty to disable.
The below variable can be used to pass the value of Custom Domain for the application
DOMAIN: The custom domain to be added to the application. Set this to a valid value to add the domain (`CONFIGURE_CUSTOM_DOMAIN` should be set to `true`).
vars:
DOMAIN: ''
env:
WHOOGLE_ALT_IG: farside.link/bibliogram/u
WHOOGLE_ALT_IMG: farside.link/rimgo
WHOOGLE_ALT_MD: farside.link/scribe
WHOOGLE_ALT_RD: farside.link/libreddit
WHOOGLE_ALT_TL: farside.link/lingva
WHOOGLE_ALT_TW: farside.link/nitter
WHOOGLE_ALT_WIKI: farside.link/wikiless
WHOOGLE_ALT_YT: farside.link/invidious
WHOOGLE_CONFIG_ALTS: ''
WHOOGLE_CONFIG_BLOCK: ''
WHOOGLE_CONFIG_COUNTRY: countryUS
WHOOGLE_CONFIG_DISABLE: ''
WHOOGLE_CONFIG_GET_ONLY: ''
WHOOGLE_CONFIG_LANGUAGE: lang_en
WHOOGLE_CONFIG_NEAR: ''
WHOOGLE_CONFIG_NEW_TAB: ''
WHOOGLE_CONFIG_SAFE: ''
WHOOGLE_CONFIG_SEARCH_LANGUAGE: lang_en
WHOOGLE_CONFIG_STYLE: ':root { /* LIGHT THEME COLORS */ --whoogle-background: #d8dee9; --whoogle-accent: #2e3440; --whoogle-text: #3B4252; --whoogle-contrast-text: #eceff4; --whoogle-secondary-text: #70757a; --whoogle-result-bg: #fff; --whoogle-result-title: #4c566a; --whoogle-result-url: #81a1c1; --whoogle-result-visited: #a3be8c; /* DARK THEME COLORS */ --whoogle-dark-background: #222; --whoogle-dark-accent: #685e79; --whoogle-dark-text: #fff; --whoogle-dark-contrast-text: #000; --whoogle-dark-secondary-text: #bbb; --whoogle-dark-result-bg: #000; --whoogle-dark-result-title: #1967d2; --whoogle-dark-result-url: #4b11a8; --whoogle-dark-result-visited: #bbbbff; }'
WHOOGLE_CONFIG_THEME: system
WHOOGLE_CONFIG_TOR: ''
WHOOGLE_CONFIG_VIEW_IMAGE: ''
WHOOGLE_MINIMAL: ''
WHOOGLE_PASS: ''
WHOOGLE_PROXY_LOC: ''
WHOOGLE_PROXY_PASS: ''
WHOOGLE_PROXY_TYPE: ''
WHOOGLE_PROXY_USER: ''
WHOOGLE_URL_PREFIX: /whoogle
WHOOGLE_USER: ''
cmds:
- |
RNDM=$(shuf -i 10000-1000000 -n 1)
heroku api POST /app-setups --body '{\"app\":{\"region\":\"$REGION\",\"name\":\"whoogle-$RNDM\"},\
\"source_blob\":{\"url\":\"https://api.github.com/repos/benbusby/whoogle-search/tarball/master\"},\
\"overrides\":{\"env\":{\"WHOOGLE_ALT_IG\": \"$WHOOGLE_ALT_IG\", \"WHOOGLE_ALT_IMG\":\
\"$WHOOGLE_ALT_IMG\", \"WHOOGLE_ALT_MD\": \"$WHOOGLE_ALT_MD\", \"WHOOGLE_ALT_RD\": \"$WHOOGLE_ALT_RD\",\
\"WHOOGLE_ALT_TL\": \"$WHOOGLE_ALT_TL\", \"WHOOGLE_ALT_TW\": \"$WHOOGLE_ALT_TW\", \"WHOOGLE_ALT_WIKI\":\
\"$WHOOGLE_ALT_WIKI\", \"WHOOGLE_ALT_YT\": \"$WHOOGLE_ALT_YT\", \"WHOOGLE_CONFIG_ALTS\": \"$WHOOGLE_CONFIG_ALTS\",\
\"WHOOGLE_CONFIG_BLOCK\": \"$WHOOGLE_CONFIG_BLOCK\", \"WHOOGLE_CONFIG_COUNTRY\": \"$WHOOGLE_CONFIG_COUNTRY\",\
\"WHOOGLE_CONFIG_DISABLE\": \"$WHOOGLE_CONFIG_DISABLE\", \"WHOOGLE_CONFIG_GET_ONLY\": \"$WHOOGLE_CONFIG_GET_ONLY\",\
\"WHOOGLE_CONFIG_LANGUAGE\": \"$WHOOGLE_CONFIG_LANGUAGE\", \"WHOOGLE_CONFIG_NEAR\": \"$WHOOGLE_CONFIG_NEAR\",\
\"WHOOGLE_CONFIG_NEW_TAB\": \"$WHOOGLE_CONFIG_NEW_TAB\", \"WHOOGLE_CONFIG_SAFE\": \"$WHOOGLE_CONFIG_SAFE\",\
\"WHOOGLE_CONFIG_SEARCH_LANGUAGE\": \"$WHOOGLE_CONFIG_SEARCH_LANGUAGE\", \"WHOOGLE_CONFIG_STYLE\":\
\"$WHOOGLE_CONFIG_STYLE\", \"WHOOGLE_CONFIG_THEME\": \"$WHOOGLE_CONFIG_THEME\", \"WHOOGLE_CONFIG_TOR\":\
\"$WHOOGLE_CONFIG_TOR\", \"WHOOGLE_CONFIG_VIEW_IMAGE\": \"$WHOOGLE_CONFIG_VIEW_IMAGE\", \"WHOOGLE_MINIMAL\":\
\"$WHOOGLE_MINIMAL\", \"WHOOGLE_PASS\": \"$WHOOGLE_PASS\", \"WHOOGLE_PROXY_LOC\": \"$WHOOGLE_PROXY_LOC\",\
\"WHOOGLE_PROXY_PASS\": \"$WHOOGLE_PROXY_PASS\", \"WHOOGLE_PROXY_TYPE\": \"$WHOOGLE_PROXY_TYPE\",\
\"WHOOGLE_PROXY_USER\": \"$WHOOGLE_PROXY_USER\", \"WHOOGLE_URL_PREFIX\": \"$WHOOGLE_URL_PREFIX\", \"WHOOGLE_USER\": \"$WHOOGLE_USER\"}}}'
{{if and (eq .CONFIGURE_CUSTOM_DOMAIN "true") (ne .DOMAIN "")}}heroku domains:add {{.DOMAIN}} -a whoogle-$RNDM {{end}}
- task: cloudflare:dns
vars:
CLOUDFLARE_API_TOKEN: '{{.CLOUDFLARE_API_TOKEN}}'
CLOUDFLARE_DNS_ZONE: '{{.CLOUDFLARE_DNS_ZONE}}'
CLOUDFLARE_RECORD_NAME: whoogle
CLOUDFLARE_RECORD_VALUE: '{{.DOMAIN}}'
CONFIGURE_CLOUDFLARE_DNS: '{{.CONFIGURE_CLOUDFLARE_DNS}}'

View file

@ -0,0 +1,19 @@
---
version: '3'
tasks:
sync:ssh-keys:
deps:
- :install:software:heroku
cmds:
- .config/log info 'Clearing Heroku SSH keys'
- heroku keys:clear
- .config/log start 'Syncing SSH keys with Heroku'
- |
for KEY in `ls $HOME/.ssh/*.pub`; do
heroku keys:add "$KEY"
done
- .config/log success 'Finished syncing SSH keys with Heroku'
preconditions:
- sh: '[ -n "$HEROKU_API_KEY" ]'
msg: The HEROKU_API_KEY must be set to a personal access token.

View file

@ -0,0 +1,62 @@
---
version: '3'
tasks:
bucket:create:
deps:
- :install:software:s5cmd
summary: |
# Create S3 Bucket
This task creates an S3 bucket in the given account
`AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`, `AWS_REGION` and `BUCKET_NAME` are passed from the calling task. The first 3
variables are set as environment variables
env:
AWS_ACCESS_KEY_ID: '{{.AWS_ACCESS_KEY_ID}}'
AWS_REGION: '{{.AWS_REGION}}'
AWS_SECRET_ACCESS_KEY: '{{.AWS_SECRET_ACCESS_KEY}}'
cmds:
- s5cmd mb s3://'{{.BUCKET_NAME}}'
bucket:jumpusb:populate:
deps:
- :install:software:axel
summary: |
# Populate an S3 Bucket with JumpUSB Assets
This script first downloads the script from the JumpUSB repository:
https://gitlab.com/megabyte-labs/jumpusb/-/blob/master/local/distros.json
After that, it downloads all the operating systems and uploads them to an
S3 bucket using the same path that is defined in each object's path key.
For example, the following:
```
{
"url": "https://mirrors.edge.kernel.org/zorinos-isos/16/Zorin-OS-16.1-Core-64-bit.iso",
"url_axel": "https://{mirrors.edge.kernel.org/zorinos-isos,mirror2.sandyriver.net/pub/zorinos,mirror.clarkson.edu/zorinos/isos,distro.ibiblio.org/zorinos}/16/Zorin-OS-16.1-Core-64-bit.iso",
"path": "/iso/zorin/zorin-16.1-amd64.iso",
"persistence_base": "persistence_ext4_4GB_casper-rw.dat.7z",
"persistence_file": "zorin.dat",
"live": true
}
```
1. Downloads the ISO with Axel using the `url_axel` URL (and falls back to the `url` if `url_axel` is not present)
2. Then uploads the file into the `/iso/zorin` directory of the S3 bucket where the file is named `zorin-16.1-amd64.iso
env:
DISTROS_TMP:
sh: mktemp
cmds:
- curl -sSL https://gitlab.com/megabyte-labs/jumpusb/-/raw/master/local/distros.json > "$DISTROS_TMP"
bucket:synchronize:
deps:
- :install:software:s5cmd
summary: |
# Synchronize S3 Buckets
Synchronize the contents of one S3 bucket with another using s5cmd.
cmds:
- s5cmd sync '{{.SOURCE_BUCKET}}' '{{.DEST_BUCKET}}'

Some files were not shown because too many files have changed in this diff Show more