Moved around things

This commit is contained in:
Brian Zalewski 2024-05-05 04:05:33 +00:00
parent c01886a9a7
commit 8df0a8a261
55 changed files with 1808 additions and 1715 deletions

View file

@ -1,9 +1,11 @@
* Write requirements for `software.yml`
https://github.com/harababurel/gcsf
https://nixos.wiki/wiki/Nix_Installation_Guide
https://github.com/seaweedfs/seaweedfs
[text](https://github.com/gitbito/CLI)
https://github.com/awslabs/mountpoint-s3
https://gist.github.com/chadmayfield/ada07e4e506d7acd577a665541a70c9b
* Move age decryption higher
* Add ~/.local/share/sounds was symlink to {{ .host.home }}/.local/share/betelgeuse/share/sounds
xattr -d com.apple.quarantine rclone

View file

@ -283,7 +283,7 @@ softwareGroups:
- duf
- dust
- empty-trash
- exa
- eza
- fcp
- fd
- firefox-profile
@ -538,7 +538,6 @@ softwareGroups:
Development-Tools-Desktop: &Development-Tools-Desktop
- github-desktop
- intellij-idea-ce
# - iterm2
- meld
- powershell
- redis-desktop-manager
@ -925,6 +924,7 @@ softwareGroups:
- amethyst
- espanso
# - fiscript
- languagetool
- libreoffice
- microsoft-office
- notion
@ -935,7 +935,6 @@ softwareGroups:
- flake8
- isort
- mambaforge
- micromamba
- poetry
- python
- virtualenv
@ -1040,7 +1039,6 @@ softwareGroups:
- htop
- ipmitool
- iproute2mac
- macprefs
- masscan
- plumber
- prefsniff
@ -1494,161 +1492,6 @@ softwareGroups:
__hostname__web-tmpl:
- *Browsers-Desktop
__hostname__work-tmpl: []
deprecated:
- pkg: apt-cacher-ng
note: Deprecated in favor of using Sonatype Nexus apt proxies.
- pkg: adobe-creative-cloud
note: Creates distracting boot window pop-up
- pkg: ariang
note: Not very polished and only available via cask
- pkg: asdf
note: ASDF is currently installed via a script rather than in the software definitions.
- pkg: astronvim
note: AstronVIM is deprecated in favor of NvChad which has more stars and is less troublesome to install (in our experience)
- pkg: bivac
note: Deprecated in favor of using the RClone Docker plugin for S3-backed mounts
- pkg: boilr
note: Deprecated in favor of using `gomplate`
- pkg: captain
note: Unused Docker assistance package
- pkg: catfs
note: CatFS is reportedly in the alpha stage of development and we experienced issues when trying to use it.
- pkg: catlight
note: Free tier has strict limitations and app is not very polished
- pkg: ccat
note: bat can be used instead and has more features
- pkg: cerebro
note: macOS system search more native way of providing similar features. Better alternatives are available.
- pkg: chromium
note: Deprecated in favor of the regular Chrome.
- pkg: cumulus
note: Unnecessary menu bar widget for SoundCloud.
- pkg: diffsofancy
note: Deprecated in favor of `delta`
- pkg: filezilla
note: FileZilla has an offensive user-interface. Absolutely atrocious.
- pkg: gdu
note: Deprecated in favor of `duf` which is written in Rust
- pkg: gitdock
note: Only supports GitLab
- pkg: git-fuzzy
note: Installed via dotfiles via git
- pkg: graphql-playground
note: Altair is more popular and well-maintained on GitHub
- pkg: gvm
note: Deprecated in favor of using ASDF.
- pkg: hyper
note: Deprecated in favor of Tabby for a full-featured terminal.
- pkg: kitty
note: Deprecated in favor of using alternative terminals such as iTerm2, Tabby, and bundled Linux terminals.
- pkg: koodo-reader
note: Unneeded and interface is not perfect
- pkg: ksnip
note: macOS version was lackluster
- pkg: lepton
note: Deprecated in favor of [Pieces](https://pieces.app/). The core components of Pieces are not open-source but the functionality is significantly better than massCode.
- pkg: librewolf
note: Creates ~/.librewolf and Firefox is preferred
- pkg: loop
note: Encountering error during cargo install
- pkg: lpass
note: Deprecated in favor of using the BitWarden password manager and its related tools.
- pkg: lsd
note: Deprecated in favor of using `exa`. `lsd` may be re-introduced when Windows support is added.
- pkg: manta
note: Replaced by web apps like waveapps.com
- pkg: masscode # Deprecated in favor of Pieces
note: Deprecated in favor of [Pieces](https://pieces.app/). The core components of Pieces are not open-source but the functionality is significantly better than massCode.
- pkg: mcfly
note: Deprecated in favor of atuin
- pkg: microsoft-todo
note: Deprecated in favor of Google Tasks
- pkg: mullvad-vpn
note: Switched to ProtonVPN exclusively
- pkg: multipass
note: Opens persistent menu icon on GNOME and has issues that sometimes require switching the virt driver
- pkg: neovide
note: Prefer other IDEs - neovim is good for the terminal though
- pkg: nordvpn
note: Deprecated in favor of leveraging ProtonVPN as the primary VPN service.
- pkg: nvm
note: Deprecated in favor of using ASDF.
- pkg: nuclear
note: Music app - UI is horrendous
- pkg: orbstack
note: Faster / better alternative to Docker Desktop on macOS. Deprecated because it does not support Docker Extensions and is only for macOS.
- pkg: pip
note: The `pip` installation is handled by the `install-program` program bundled with Install Doctor.
- pkg: pipx
note: The `pipx` installation is handled by the `install-program` program bundled with Install Doctor.
- pkg: profilecreator
note: macOS app that allows creating profiles. Crashes on macOS with enterprise managed settings due to read-only file access.
- pkg: pyenv
note: Deprecated in favor of using ASDF.
- pkg: s3filesystem
note: Deprecated in favor of using RClone.
- pkg: starship
note: Deprecated in favor of alternative terminal status prompts such as PowerLevel10k.
- pkg: raindrop
note: Deprecated because browser extensions do a better job of unifying bookmarks. Namely, the extension called Floccus allows cross-browser bookmark syncing. The Raindrop package also requires a paid subscription for advanced features.
- pkg: rvm
note: Deprecated in favor of using ASDF.
- pkg: sdkman-cli
note: Deprecated in favor of mise
- pkg: sidekick
note: Not free for all features
- pkg: standard-notes
note: Deprecated in favor of Obsidian / Notion
- pkg: sqlectron
note: beekeeper-studio preferred
- pkg: stubby
note: Causes issues when other programs are modifying the DNS resolving endpoint. Might be worth reinvestigating but ideally the DNS should be encrypted on pfSense or use CloudFlare WARP to handle it.
- pkg: temps
note: macOS menu bar app for weather. Buggy software.
- pkg: taskwarrior
note: Removed from default install because the binary executable conflicts with go-task's binary executable.
- pkg: termius
note: Deprecated since all the interesting features require a paid subscription.
- pkg: ugm
note: Error encountered while installing with Go reported [here](https://github.com/ariasmn/ugm/issues/2).
- pkg: ulauncher
note: Deprecated in favor of alternative app launchers.
- pkg: vscodium
note: Not all VSCode plugins work with VSCodium.
- pkg: xhyve
note: Disabled on Homebrew because it has not been modified for several years and does not build properly
- pkg: yubikey-agent
note: The OpenSSH library can now create native keys that integrate with the client.
- pkg: chef-workstation
note: Not utilizing Chef and it automatically adds a top bar menu item that causes load delay
- pkg: wordops
note: Prefer EasyEngine instead
# Files below need to be reviewed before adding them to the stack. They should include all of the
# definitions in software.yml that are not included somewhere in the definitions that this file
# maps out.
queued:
- pkg: android-platform-tools
note: Might only be needed in headless scenarios since Android Studio will download the tools
- pkg: editly
note: Editly NPM package is failing to install on macOS. Attempts to compile with node-gyp and fails.
- pkg: hishtory
note: Erroring out - waiting on an alternate installation method like Homebrew to be released
- pkg: metasploit
note: Determine whether or not this will be flagged by management settings
- pkg: rancher-desktop
note: Conflicts with Docker Desktop
- pkg: rear
note: System backup utility that may be incorporated after comparing it with alternative and perhaps better alternatives.
- pkg: frps / frpc
note: No need to integrate these packages at this time.
- pkg: vector
note: Package currently relies on bash one-liner script
- pkg: catfs
note: After `sudo apt-get install -y fuse libfuse-dev, the following error still shows up error could not find system library 'fuse' required by the 'fuse' crate
- pkg: snapd
note: Bundled into installer
- pkg: signal
note: Not needed - mostly a phone app
helmCharts:
- vector
helm:

View file

@ -50,7 +50,7 @@ GHORG_INCLUDE_SUBMODULES: true
# Deletes all files/directories found in your local clone directory that are not found on the remote (e.g., after remote deletion). With GHORG_SKIP_ARCHIVED set, archived repositories will also be pruned from your local clone.
# Will prompt before deleting any files unless used in combination with --prune-no-confirm
# flag (--prune)
GHORG_PRUNE: true
GHORG_PRUNE: false
# Skip interactive y/n prompt when pruning clones (can only be used in combination with --prune).
# flag (--prune-no-confirm)

View file

@ -70,10 +70,10 @@ alias curl-impersonate='docker run --rm lwthiker/curl-impersonate:0.5-chrome cur
# alias curl='curlie'
# fi
### exa
if command -v exa > /dev/null; then
alias ls='exa --long --all --color auto --icons --sort=type'
alias tree='exa --tree'
### eza
if command -v eza > /dev/null; then
alias ls='eza --long --all --color auto --icons --sort=type'
alias tree='eza --tree'
alias la='ls -la'
alias lt='ls --tree --level=2'
else
@ -266,11 +266,13 @@ alias ssh-config='${EDITOR:code} ~/.ssh/config'
# Pastebin
alias sprunge='curl -F "sprunge=<-" http://sprunge.us'
# Disable Tor for current shell
alias toroff='source torsocks off'
if command -v torsocks > /dev/null; then
# Disable Tor for current shell
alias toroff='source torsocks off'
# Enable Tor for current shell
alias toron='source torsocks on'
# Enable Tor for current shell
alias toron='source torsocks on'
fi
# Test Tor connection
alias tortest='curl --socks5-hostname 127.0.0.1:9050 --silent https://check.torproject.org/ | head -25'

View file

@ -55,6 +55,7 @@ export PATH="$HOME/.local/bin/firejail:$PATH"
export PATH="$HOME/.local/bin/flatpak:$PATH"
export PATH="$HOME/.local/bin/gpt:$PATH"
export PATH="$HOME/.local/bin/pipx:$PATH"
export PATH="$HOME/.local/bin/post-installx:$PATH"
if [ -f /usr/bin/qubes-session ]; then
export PATH="$HOME/.local/bin/qubes:$PATH"
fi
@ -303,6 +304,11 @@ export K9SCONFIG="${XDG_CONFIG_HOME:-$HOME/.config}/k9s"
### KDE
export KDEHOME="${XDG_CONFIG_HOME:-$HOME/.config}/kde"
### Keybase
if [ -f /Applications/Keybase.app/Contents/SharedSupport/bin/keybase ]; then
export PATH="/Applications/Keybase.app/Contents/SharedSupport/bin:$PATH"
fi
### Kodi
export KODI_DATA="${XDG_DATA_HOME:-$HOME/.local/share}/kodi"
@ -485,6 +491,9 @@ fi
export VAGRANT_ALIAS_FILE="${XDG_CONFIG_HOME:-$HOME/.config}/vagrant/aliases"
export VAGRANT_DEFAULT_PROVIDER=virtualbox
export VAGRANT_HOME="${XDG_DATA_HOME:-$HOME/.local/share}/vagrant.d"
if [ -d /opt/vagrant-vmware-desktop/bin ]; then
export PATH="/opt/vagrant-vmware-desktop/bin:$PATH"
fi
### Visual Studio Code
export VSCODE_EXTENSIONS="${XDG_DATA_HOME:-$HOME/.local/share}/vscode"

View file

@ -43,7 +43,7 @@ async function runScript(key, script) {
try {
runSilentCommand(`glow --width 80 "${cacheDir}/${key}-glow"`)
// TODO: Set process.env.DEBUG || true here because the asynchronous method is not logging properly / running slow
if (process.env.DEBUG || true) {
if (process.env.DEBUG) {
return runSilentCommand(`bash "${cacheDir}/${key}" || logg error 'Error occurred while processing script for ${key}'`)
} else {
return $`bash "${cacheDir}/${key}" || logg error 'Error occurred while processing script for ${key}'`.pipe(process.stdout)
@ -455,7 +455,7 @@ async function main() {
acquireManagerList('gem', `gem list | awk '{print $1}'`),
acquireManagerList('npm', `volta list --format plain | awk '{print $2}' | sed 's/@.*//'`),
acquireManagerList('pacman', `pacman -Qs`),
acquireManagerList('pip3', `pip3 list | awk '{print $1}'`),
acquireManagerList('pip', `pip3 list | awk '{print $1}'`),
acquireManagerList('pipx', `pipx list --short | awk '{print $1}'`),
acquireManagerList('snap', `if command -v snapd; then snap list; fi`),
acquireManagerList('zap', `zap list`)
@ -472,7 +472,7 @@ async function main() {
gem: lists[5],
npm: lists[6],
pacman: lists[7],
pip3: lists[8],
pip: lists[8],
pipx: lists[9],
snap: lists[10],
zap: lists[11]
@ -500,6 +500,10 @@ async function main() {
// Filter out packages already installed by by package managers
return x.installList.length
})
.filter(x => {
// Filter out packages that contain a deprecation note
return !x._deprecated
})
.filter(x => {
// Filter out macOS apps that already have a _app installed
if (x.installType === 'cask' || (osId === 'darwin' && x._app)) {
@ -544,7 +548,24 @@ async function main() {
})
log(`Running installation routine`)
await installPackages(installInstructions)
log(`Running post-install scripts`)
log(`Adding users / groups defined under _groups`)
const usersGroupsAdditions = installData
.flatMap(x => {
const groupsField = getPkgData('_groups', x, x.installType)
if (!groupsField) return Promise.resolve()
log(`Ensuring user(s) / group(s) created for ${x.listKey}`)
if (typeof typeof x[groupsField] !== 'string' && !Array.isArray(x[groupsField])) {
log(`Failed to parse _groups for ${x.installKey}. The _groups field must be either a string or string[].`)
return Promise.resolve()
} else {
const groups = typeof x[groupsField] === 'string' ? [x[groupsField]] : x[groupsField]
return groups.flatMap(y => {
return $`sudo "${os.homedir()}/.local/bin/add-usergroup" "${process.env.USER}" "${x[]}"`
})
}
})
await Promise.allSettled(usersGroupsAdditions)
log(`Running post-install inline scripts`)
const postScripts = installData
.flatMap(x => {
const postField = getPkgData('_post', x, x.installType)
@ -552,7 +573,38 @@ async function main() {
log(`Running post-install script for ${x.listKey}`)
return (postField && runScript(x.listKey, x[postField])) || Promise.resolve()
})
await Promise.allSettled(postScripts)
log(`Running post-install scripts defined in ~/.local/bin/post-installx`)
const postScriptFiles = installData
.flatMap(x => {
const scriptPath = `${os.homedir()}/.local/bin/post-installx/post-${x.installKey}.sh`
const scriptExists = fs.existsSync(scriptPath)
if (!scriptExists) return Promise.resolve()
log(`Running post-install script defined in ${scriptPath}`)
return runScript(`post-${x.listKey}.sh`, fs.readFileSync(scriptPath, 'utf8'))
})
await Promise.allSettled(...postScripts, ...postScriptFiles)
log(`Starting services flagged with _serviceEnabled`)
const systemctlInstalled = which.sync('systemctl', { nothrow: true })
const brewInstalled = which.sync('brew', { nothrow: true })
const servicePromises = installData
.filter(x => x._serviceEnabled)
.filter(x => x._service)
.flatMap(x => {
const serviceField = getPkgData('_service', x, x.installType)
if (!serviceField) return Promise.resolve()
const services = typeof x[serviceField] === 'string' ? [{ name: x[serviceField] }] : (Array.isArray(x[serviceField]) ? x[serviceField] : [{ name: x[serviceField].name, sudo: x[serviceField].sudo }])
return services.flatMap(y => {
const name = typeof y === 'string' ? y : y.name
const sudo = typeof y === 'string' ? null : y.sudo
if (osType === 'linux' && x.installType !== 'brew' && x.installType !== 'cask' && systemctlInstalled) {
return sudo !== false ? $`sudo systemctl enable --now ${name}` : $`systemctl enable --now ${name}`
} else if (brewInstalled) {
return sudo === true ? $`sudo brew services start ${name}` : $`brew services start ${name}`
}
})
})
await Promise.allSettled(servicePromises)
log(`Installation process complete!`)
}
main()

View file

@ -0,0 +1,14 @@
#!/usr/bin/env bash
# @file Aqua Initialization
# @brief Updates and installs any Aqua dependencies that are defined in Aqua's configuration file.
# @description
# This script updates Aqua and then installs any Aqua dependencies that are defined.
if command -v aqua > /dev/null; then
logg info 'Updating Aqua'
aqua update-aqua
logg info 'Installing Aqua dependencies (if any are defined)'
aqua install -a
else
logg info 'Skipping aqua install script because aqua was not installed'
fi

View file

@ -0,0 +1,13 @@
#!/usr/bin/env bash
# @file Atuin Initialization
# @brief Registers with atuin, logs in, imports command history, and synchronizes
if command -v atuin > /dev/null; then
source "${XDG_CONFIG_HOME:-$HOME/.config}/shell/private.sh"
atuin register -u "$ATUIN_USERNAME" -e "$ATUIN_EMAIL" -p "$ATUIN_PASSWORD"
atuin login -u "$ATUIN_USERNAME" -p "$ATUIN_PASSWORD" -k "$ATUIN_KEY"
atuin import auto
atuin sync
else
logg info 'atuin is not available in the PATH'
fi

View file

@ -0,0 +1,21 @@
#!/usr/bin/env bash
# @file Blocky Configuration
# @brief Copies over configuration (and service file, in the case of Linux) to the appropriate system location
if command -v blocky > /dev/null; then
if [ -d /Applications ] && [ -d /System ]; then
### macOS
cp -f "$HOME/.local/etc/blocky/config.yaml" "$(brew --prefix)/etc/blocky/config.yaml"
else
### Linux
sudo mkdir -p /usr/local/etc/blocky
if [ -d /usr/lib/systemd/system ]; then
sudo cp -f "$HOME/.local/etc/blocky/config.yaml" /usr/local/etc/blocky/config.yaml
sudo cp -f "$HOME/.local/etc/blocky/blocky.service" /usr/lib/systemd/system/blocky.service
else
logg "/usr/lib/systemd/system is missing from the file system"
fi
fi
else
logg info 'Blocky is not available in the PATH'
fi

View file

@ -0,0 +1,63 @@
#!/usr/bin/env bash
# @file Brave Browser Setup
# @brief Applies browser policy configurations
function chromeSetUp() {
### Ensure Chrome policies directory is present
logg info 'Processing policy directories for Chromium based browsers'
for POLICY_DIR in "/etc/brave/policies"; do
if [ -d "$(dirname "$POLICY_DIR")" ]; then
### Managed policies
if [ ! -f "$POLICY_DIR/managed/policies.json" ]; then
logg info "Ensuring directory $POLICY_DIR/managed exists"
sudo mkdir -p "$POLICY_DIR/managed"
logg info "Copying ${XDG_CONFIG_HOME:-$HOME/.config}/chrome/managed.json to $POLICY_DIR/managed/policies.json"
sudo cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/chrome/managed.json" "$POLICY_DIR/managed/policies.json"
fi
### Recommended policies
if [ ! -f "$POLICY_DIR/recommended/policies.json" ]; then
logg info "Ensuring directory $POLICY_DIR/recommended exists" && sudo mkdir -p "$POLICY_DIR/recommended"
logg info "Copying ${XDG_CONFIG_HOME:-$HOME/.config}/chrome/recommended.json to $POLICY_DIR/recommended/policies.json"
sudo cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/chrome/recommended.json" "$POLICY_DIR/recommended/policies.json"
fi
else
logg info "Skipping extension injection into $POLICY_DIR - create these folders prior to running to create managed configs"
fi
done
### Add Chrome extension JSON
# logg info 'Populating Chrome extension JSON'
# for EXTENSION_DIR in "/etc/brave/extensions" "$HOME/Library/Application Support/BraveSoftware/Brave-Browser/External Extensions"; do
# ### Ensure program-type is installed
# if [ -d "$(dirname "$EXTENSION_DIR")" ]; then
# ### Ensure extension directory exists
# if [[ "$EXTENSION_DIR" == '/opt/'* ]] || [[ "$EXTENSION_DIR" == '/etc/'* ]]; then
# if [ ! -d "$EXTENSION_DIR" ]; then
# logg info "Creating directory $EXTENSION_DIR" && sudo mkdir -p "$EXTENSION_DIR"
# fi
# else
# if [ ! -d "$EXTENSION_DIR" ]; then
# logg info "Creating directory $EXTENSION_DIR" && mkdir -p "$EXTENSION_DIR"
# fi
# fi
# ### Add extension JSON
# logg info "Adding Chrome extensions to $EXTENSION_DIR"
# for EXTENSION in { { list (.chromeExtensions | toString | replace "[" "" | replace "]" "") | uniq | join " " } }; do
# logg info "Adding Chrome extension manifest ($EXTENSION)"
# if ! echo "$EXTENSION" | grep 'https://chrome.google.com/webstore/detail/' > /dev/null; then
# EXTENSION="https://chrome.google.com/webstore/detail/$EXTENSION"
# fi
# EXTENSION_ID="$(echo "$EXTENSION" | sed 's/^.*\/\([^\/]*\)$/\1/')"
# if [[ "$EXTENSION_DIR" == '/opt/'* ]] || [[ "$EXTENSION_DIR" == '/etc/'* ]]; then
# sudo cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/chrome/extension.json" "$EXTENSION_DIR/${EXTENSION_ID}.json"
# else
# cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/chrome/extension.json" "$EXTENSION_DIR/${EXTENSION_ID}.json"
# fi
# done
# else
# logg info "$EXTENSION_DIR does not exist"
# fi
# done
}
chromeSetUp

View file

@ -0,0 +1,65 @@
#!/usr/bin/env bash
# @file Chromium Configuration
# @brief Applies browser policy configurations
function chromeSetUp() {
### Ensure Chrome policies directory is present
logg info 'Processing policy directories for Chromium based browsers'
for POLICY_DIR in "/etc/chromium/policies"; do
if [ -d "$(dirname "$POLICY_DIR")" ]; then
### Managed policies
if [ ! -f "$POLICY_DIR/managed/policies.json" ]; then
logg info "Ensuring directory $POLICY_DIR/managed exists"
sudo mkdir -p "$POLICY_DIR/managed"
logg info "Copying ${XDG_CONFIG_HOME:-$HOME/.config}/chrome/managed.json to $POLICY_DIR/managed/policies.json"
sudo cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/chrome/managed.json" "$POLICY_DIR/managed/policies.json"
fi
### Recommended policies
if [ ! -f "$POLICY_DIR/recommended/policies.json" ]; then
logg info "Ensuring directory $POLICY_DIR/recommended exists" && sudo mkdir -p "$POLICY_DIR/recommended"
logg info "Copying ${XDG_CONFIG_HOME:-$HOME/.config}/chrome/recommended.json to $POLICY_DIR/recommended/policies.json"
sudo cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/chrome/recommended.json" "$POLICY_DIR/recommended/policies.json"
fi
else
logg info "Skipping extension injection into $POLICY_DIR - create these folders prior to running to create managed configs"
fi
done
# ### Add Chrome extension JSON
# logg info 'Populating Chrome extension JSON'
# ### TODO - Find `EXTENSION_DIR` for macOS in Application Support folder like `$HOME/Library/Application Support/Google/Chrome/External Extensions` for Google Chrome
# for EXTENSION_DIR in "/etc/chromium/extensions"; do
# ### Ensure program-type is installed
# if [ -d "$(dirname "$EXTENSION_DIR")" ]; then
# ### Ensure extension directory exists
# if [[ "$EXTENSION_DIR" == '/opt/'* ]] || [[ "$EXTENSION_DIR" == '/etc/'* ]]; then
# if [ ! -d "$EXTENSION_DIR" ]; then
# logg info "Creating directory $EXTENSION_DIR" && sudo mkdir -p "$EXTENSION_DIR"
# fi
# else
# if [ ! -d "$EXTENSION_DIR" ]; then
# logg info "Creating directory $EXTENSION_DIR" && mkdir -p "$EXTENSION_DIR"
# fi
# fi
# ### Add extension JSON
# logg info "Adding Chrome extensions to $EXTENSION_DIR"
# for EXTENSION in {{ list (.chromeExtensions | toString | replace "[" "" | replace "]" "") | uniq | join " " }}; do
# logg info "Adding Chrome extension manifest ($EXTENSION)"
# if ! echo "$EXTENSION" | grep 'https://chrome.google.com/webstore/detail/' > /dev/null; then
# EXTENSION="https://chrome.google.com/webstore/detail/$EXTENSION"
# fi
# EXTENSION_ID="$(echo "$EXTENSION" | sed 's/^.*\/\([^\/]*\)$/\1/')"
# if [[ "$EXTENSION_DIR" == '/opt/'* ]] || [[ "$EXTENSION_DIR" == '/etc/'* ]]; then
# sudo cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/chrome/extension.json" "$EXTENSION_DIR/${EXTENSION_ID}.json"
# else
# cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/chrome/extension.json" "$EXTENSION_DIR/${EXTENSION_ID}.json"
# fi
# done
# else
# logg info "$EXTENSION_DIR does not exist"
# fi
# done
}
chromeSetUp

View file

@ -0,0 +1,37 @@
#!/usr/bin/env bash
# @file ClamAV Configuration
# @brief Applies ClamAV configuration, updates its database, and configures background services
if command -v freshclam > /dev/null; then
### Add freshclam.conf
if [ -f "$HOME/.local/etc/clamav/freshclam.conf" ]; then
sudo mkdir -p /usr/local/etc/clamav
sudo cp -f "$HOME/.local/etc/clamav/freshclam.conf" /usr/local/etc/clamav/freshclam.conf
if [ -d "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/clamav" ] && [ ! -f "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/clamav/freshclam.conf" ]; then
ln -s /usr/local/etc/clamav/freshclam.conf "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/clamav/freshclam.conf"
fi
fi
### Add clamd.conf
if [ -f "$HOME/.local/etc/clamav/clamd.conf" ]; then
sudo mkdir -p /usr/local/etc/clamav
sudo cp -f "$HOME/.local/etc/clamav/clamd.conf" /usr/local/etc/clamav/clamd.conf
if [ -d "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/clamav" ] && [ ! -f "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/clamav/clamd.conf" ]; then
ln -s /usr/local/etc/clamav/clamd.conf "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/clamav/clamd.conf"
fi
fi
### Setting up launchd services on macOS
if [ -d /Applications ] && [ -d /System ]; then
sudo mkdir -p /var/log/clamav
# sudo chown $USER /var/log/clamav
sudo cp -f "$HOME/.local/etc/clamav/clamdscan.plist" /Library/LaunchDaemons/clamdscan.plist
sudo cp -f "$HOME/.local/etc/clamav/freshclam.plist" /Library/LaunchDaemons/freshclam.plist
sudo launchctl load -w /Library/LaunchDaemons/clamdscan.plist
sudo launchctl load -w /Library/LaunchDaemons/freshclam.plist
fi
### Update database
freshclam
else
logg info 'freshclam is not available in the PATH'
fi

View file

@ -0,0 +1,91 @@
#!/usr/bin/env bash
# @file Cloudflared Configuration
# @brief Applies cloudflared configuration, connects to Argo tunnel with managed configuration, and enables it on system start
{{- $registrationToken := "" }}
{{- if and (stat (joinPath .host.home ".config" "age" "chezmoi.txt")) (stat (joinPath .chezmoi.sourceDir ".chezmoitemplates" "cloudflared" .host.hostname)) -}}
{{- $registrationToken = (includeTemplate (print "cloudflared/" .host.hostname) | decrypt) -}}
{{- end }}
### Set up CloudFlare tunnels
if command -v cloudflared > /dev/null && [ -d "$HOME/.local/etc/cloudflared" ]; then
# Show warning message about ~/.cloudflared already existing
if [ -d "$HOME/.cloudflared" ]; then
logg warn '~/.cloudflared is already in the home directory - to ensure proper deployment, remove previous tunnel configuration folders'
fi
### Ensure /usr/local/etc/cloudflared exists
if [ -d /usr/local/etc/cloudflared ]; then
logg info 'Creating folder /usr/local/etc/cloudflared'
sudo mkdir -p /usr/local/etc/cloudflared
fi
# Copy over configuration files
logg info 'Ensuring /usr/local/etc/cloudflared exists' && sudo mkdir -p /usr/local/etc/cloudflared
logg info 'Copying over configuration files from ~/.local/etc/cloudflared to /usr/local/etc/cloudflared'
sudo cp -f "$HOME/.local/etc/cloudflared/cert.pem" /usr/local/etc/cloudflared/cert.pem
sudo cp -f "$HOME/.local/etc/cloudflared/config.yml" /usr/local/etc/cloudflared/config.yml
### Register tunnel (if not already registered)
if sudo cloudflared tunnel list | grep "host-{{ .host.hostname }}" > /dev/null; then
logg info 'CloudFlare tunnel is already registered'
else
logg info 'Creating a CloudFlare tunnel to this host'
sudo cloudflared tunnel create "host-{{ .host.hostname }}"
fi
TUNNEL_ID="$(sudo cloudflared tunnel list | grep 'host-{{ .host.hostname }}' | sed 's/ .*//')"
logg info "Tunnel ID: $TUNNEL_ID"
if [ -f "/usr/local/etc/cloudflared/${TUNNEL_ID}.json" ]; then
logg info 'Symlinking tunnel configuration to /usr/local/etc/cloudflared/credentials.json'
rm -f /usr/local/etc/cloudflared/credentials.json
sudo ln -s "/usr/local/etc/cloudflared/${TUNNEL_ID}.json" /usr/local/etc/cloudflared/credentials.json
else
logg info 'Handling case where the tunnel registration is not present in /usr/local/etc/cloudflared'
{{ if eq $registrationToken "" -}}
logg warn 'Registration token is unavailable - you might have to delete the pre-existing tunnel or set up secrets properly'
{{- else -}}
logg info 'Registration token retrieved from encrypted blob stored at home/.chezmoitemplates/cloudflared/{{ .host.hostname }}'
{{ if eq (substr 0 1 $registrationToken) "{" -}}
logg info 'Registration token stored in credential file form'
echo -n '{{ $registrationToken }}' | sudo tee /usr/local/etc/cloudflared/credentials.json > /dev/null
{{ else }}
logg info 'Registration token is in token form - it will be used in conjunction with sudo cloudflared service install'
{{- end }}
{{- end }}
fi
### Set up service
if [ -d /Applications ] && [ -d /System ]; then
# System is macOS
if [ -f /Library/LaunchDaemons/com.cloudflare.cloudflared.plist ]; then
logg info 'cloudflared service is already installed'
else
logg info 'Running sudo cloudflared service install'
sudo cloudflared service install{{ if and (ne $registrationToken "") (eq (substr 0 1 $registrationToken) "{") -}} {{ $registrationToken }}{{ end }}
fi
logg info 'Ensuring cloudflared service is installed'
sudo launchctl start com.cloudflare.cloudflared
elif [ -f /etc/os-release ]; then
# System is Linux
if systemctl --all --type service | grep -q "cloudflared" > /dev/null; then
logg info 'cloudflared service is already available as a service'
else
logg info 'Running sudo cloudflared service install'
sudo cloudflared service install{{ if and (ne $registrationToken "") (eq (substr 0 1 $registrationToken) "{") -}} {{ $registrationToken }}{{ end }}
fi
logg info 'Ensuring cloudflared service is started'
sudo systemctl start cloudflared
logg info 'Enabling cloudflared as a boot systemctl service'
sudo systemctl enable cloudflared
else
# System is Windows
cloudflared service install
mkdir C:\Windows\System32\config\systemprofile\.cloudflared
# Copy same cert.pem as being used above
# copy C:\Users\%USERNAME%\.cloudflared\cert.pem C:\Windows\System32\config\systemprofile\.cloudflared\cert.pem
# https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/tunnel-guide/local/as-a-service/windows/
fi
else
logg info 'cloudflared was not installed so CloudFlare Tunnels cannot be enabled. (Or the ~/.local/etc/cloudflared folder is not present)'
fi

View file

@ -0,0 +1,17 @@
#!/usr/bin/env bash
# @file Mise Install / Tweaks
# @brief Performs initial install of mise targets and applies tweaks such as symlinking mise's Java version with the system Java target on macOS
if command -v mise > /dev/null; then
logg info 'Running mise install' && mise install
### Symlink Java on macOS
if [ -d /Applications ] && [ -d /System ]; then
if [ -d "${XDG_DATA_HOME:-$HOME/.local/share}/mise/installs/java/openjdk-20/Contents" ]; then
sudo mkdir -p /Library/Java/JavaVirtualMachines/openjdk-20.jdk
sudo ln -s "${XDG_DATA_HOME:-$HOME/.local/share}/mise/installs/java/openjdk-20/Contents" /Library/Java/JavaVirtualMachines/openjdk-20.jdk/Contents
fi
fi
else
logg info 'mise is not available on the PATH'
fi

View file

@ -0,0 +1,29 @@
#!/usr/bin/env bash
# @file NTFY Dependencies
# @brief Ensures branding assets and sound files are in system locations. Also, ensures system dependencies are installed
if command -v ntfy > /dev/null; then
### Branding assets
logg info 'Ensuring branding assets are in expected place for ntfy'
sudo mkdir -p /usr/local/etc/branding
sudo cp -f "$HOME/.local/etc/branding/logo-color-256x256.png" /usr/local/etc/branding/logo-color-256x256.png
### Sound files
logg info 'Ensuring shared sound files are synced to system location'
sudo mkdir -p /usr/local/share/sounds
sudo rsync -rtvp "${XDG_DATA_HOME:-$HOME/.local/share}/sounds/" /usr/local/share/sounds
### Debian dependency
if command -v apt-get > /dev/null; then
logg info 'Running sudo apt-get update && sudo apt-get install -y python-dbus'
sudo apt-get update && sudo apt-get install -y python-dbus
fi
### Termux dependency
if command -v termux-setup-storage > /dev/null; then
logg info 'Running apt install -y termux-api'
apt install -y termux-api
fi
else
logg info 'ntfy not available on PATH'
fi

View file

@ -0,0 +1,60 @@
#!/usr/bin/env bash
# @file Privoxy Configuration
# @brief This script applies the Privoxy configuration stored at `${XDG_CONFIG_HOME:-HOME/.config}/privoxy/config` to the system and then restarts Privoxy
# @description
# Privoxy is a web proxy that can be combined with Tor to provide an HTTPS / HTTP proxy that can funnel all traffic
# through Tor. This script:
#
# 1. Determines the system configuration file location
# 2. Applies the configuration stored at `${XDG_CONFIG_HOME:-HOME/.config}/privoxy/config`
# 3. Enables and restarts the Privoxy service with the new configuration
#
# ## Links
#
# * [Privoxy configuration](https://github.com/megabyte-labs/install.doctor/tree/master/home/dot_config/privoxy/config)
### Configure variables
if [ -d /Applications ] && [ -d /System ]; then
### macOS
if [ -d "/usr/local/etc/privoxy" ]; then
PRIVOXY_CONFIG_DIR=/usr/local/etc/privoxy
elif [ -d "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/privoxy" ]; then
PRIVOXY_CONFIG_DIR="${HOMEBREW_PREFIX:-/opt/homebrew}/etc/privoxy"
else
logg warn 'Unable to detect Privoxy configuration directory'
fi
else
### Linux
PRIVOXY_CONFIG_DIR=/etc/privoxy
fi
PRIVOXY_CONFIG="$PRIVOXY_CONFIG_DIR/config"
### Copy Privoxy configuration stored at `${XDG_CONFIG_HOME:-HOME/.config}/privoxy/config` to the system location
if command -v privoxy > /dev/null; then
if [ -d "$PRIVOXY_CONFIG_DIR" ]; then
sudo cp -f "${XDG_CONFIG_HOME:-HOME/.config}/privoxy/config" "$PRIVOXY_CONFIG"
sudo chmod 600 "$PRIVOXY_CONFIG"
if command -v add-usergroup > /dev/null; then
sudo add-usergroup "$USER" privoxy
fi
sudo chown privoxy:privoxy "$PRIVOXY_CONFIG" 2> /dev/null || sudo chown privoxy:$(id -g -n) "$PRIVOXY_CONFIG"
### Restart Privoxy after configuration is applied
if [ -d /Applications ] && [ -d /System ]; then
### macOS
brew services restart privoxy
else
if [[ ! "$(test -d /proc && grep Microsoft /proc/version > /dev/null)" ]]; then
### Linux
sudo systemctl enable privoxy
sudo systemctl restart privoxy
else
logg info 'The system is a WSL environment so the Privoxy systemd service will not be enabled / restarted'
fi
fi
else
logg warn 'The '"$PRIVOXY_CONFIG_DIR"' directory is missing'
fi
else
logg logg 'privoxy is missing from the PATH - skipping configuration'
fi

View file

@ -0,0 +1,146 @@
#!/usr/bin/env bash
# @file Rclone S3 Mounts
# @brief This script configures Rclone to provide several S3-compliant mounts by leveraging CloudFlare R2
# @description
# Install Doctor leverages Rclone and CloudFlare R2 to provide S3-compliant bucket mounts that allow you to retain stateful files and configurations.
# In general, these buckets are used for backing up files like your browser profiles, Docker backup files, and other files that cannot be stored as
# as code in your Install Doctor fork.
#
# This script sets up Rclone to provide several folders that are synchronized with S3-compliant buckets (using CloudFlare R2 by default).
# The script ensures required directories are created and that proper permissions are applied. This script will only run if `rclone` is
# available in the `PATH`. It also requires the user to provide `CLOUDFLARE_R2_ID` and `CLOUDFLARE_R2_SECRET` as either environment variables
# or through the encrypted repository-fork-housed method detailed in the [Secrets documentation](https://install.doctor/docs/customization/secrets).
#
# ## Mounts
#
# The script will setup five mounts by default and enable / start `systemd` services on Linux systems so that the mounts are available
# whenever the device is turned on. The mounts are:
#
# | Mount Location | Description |
# |-----------------------|-----------------------------------------------------------------------------------------------------------------------|
# | `/mnt/Private` | Private system-wide bucket used for any private files that should not be able to be accessed publicly over HTTPS |
# | `/mnt/Public` | Public system-wide bucket that can be accessed by anyone over HTTPS with the bucket's URL (provided by CloudFlare R2) |
# | N/A | Private system-wide bucket used for storing Docker-related backups / files |
# | N/A | Private system-wide bucket similar to `/mnt/Private` but intended for system file backups |
# | `$HOME/Public` | Private user-specific bucket (used for backing up application settings) |
#
# ## Permissions
#
# The system files are all assigned proper permissions and are owned by the user `rclone` with the group `rclone`. The exception to this is the
# user-specific mount which uses the user's user name and user group.
#
# ## Samba
#
# If Samba is installed, then by default Samba will create two shares that are symlinked to the `/mnt/s3-private` and `/mnt/s3-public`
# buckets. This feature allows you to easily access the two buckets from other devices in your local network. If Rclone buckets are not
# available then the Samba setup script will just create regular empty folders as shares.
#
# ## Notes
#
# * The mount services all leverage the executable found at `$HOME/.local/bin/rclone-mount` to mount the shares.
#
# ## Links
#
# * [Rclone mount script](https://github.com/megabyte-labs/install.doctor/tree/master/home/dot_local/bin/executable_rclone-mount)
# * [Rclone default configurations](https://github.com/megabyte-labs/install.doctor/tree/master/home/dot_config/rclone)
# * [Rclone documentation](https://rclone.org/docs/)
if command -v rclone > /dev/null; then
{{- if and (or (and (stat (joinPath .host.home ".config" "age" "chezmoi.txt")) (stat (joinPath .chezmoi.sourceDir ".chezmoitemplates" "secrets" "CLOUDFLARE_R2_ID"))) (env "CLOUDFLARE_R2_ID")) (or (and (stat (joinPath .host.home ".config" "age" "chezmoi.txt")) (stat (joinPath .chezmoi.sourceDir ".chezmoitemplates" "secrets" "CLOUDFLARE_R2_SECRET"))) (env "CLOUDFLARE_R2_SECRET")) (ne .user.cloudflare.r2 "") }}
logg info 'Removing ~/.config/rclone/rclone.conf Install Doctor managed block'
CONFIG_FILE="${XDG_CONFIG_HOME:-$HOME/.config}/rclone/rclone.conf"
if cat "$CONFIG_FILE" | grep '# INSTALL DOCTOR MANAGED S3 START' > /dev/null; then
# TODO: Remove old block
START_LINE="$(echo `grep -n -m 1 "# INSTALL DOCTOR MANAGED S3 START" "$CONFIG_FILE" | cut -f1 -d ":"`)"
END_LINE="$(echo `grep -n -m 1 "# INSTALL DOCTOR MANAGED S3 END" "$CONFIG_FILE" | cut -f1 -d ":"`)"
if command -v gsed > /dev/null; then
gsed -i "$START_LINE,${END_LINE}d" "$CONFIG_FILE" > /dev/null
else
sed -i "$START_LINE,${END_LINE}d" "$CONFIG_FILE" > /dev/null
fi
fi
logg info 'Adding ~/.config/rclone/rclone.conf INSTALL DOCTOR managed block'
sudo tee -a "$CONFIG_FILE" > /dev/null <<EOT
# INSTALL DOCTOR MANAGED S3 START
[User-{{ .user.username}}]
access_key_id = {{ if (stat (joinPath .chezmoi.sourceDir ".chezmoitemplates" "secrets" "CLOUDFLARE_R2_ID_USER")) }}{{- includeTemplate "secrets/CLOUDFLARE_R2_ID_USER" | decrypt | trim -}}{{ else }}{{- env "CLOUDFLARE_R2_ID_USER" -}}{{ end }}
acl = private
endpoint = {{ .user.cloudflare.r2 }}.r2.cloudflarestorage.com
provider = Cloudflare
region = auto
secret_access_key = {{ if (stat (joinPath .chezmoi.sourceDir ".chezmoitemplates" "secrets" "CLOUDFLARE_R2_SECRET_USER")) }}{{- includeTemplate "secrets/CLOUDFLARE_R2_SECRET_USER" | decrypt | trim -}}{{ else }}{{- env "CLOUDFLARE_R2_SECRET_USER" -}}{{ end }}
type = s3
# INSTALL DOCTOR MANAGED S3 END
EOT
{{- end }}
# sudo chown -f root "$CONFIG_FILE"
sudo chmod -f 600 "$CONFIG_FILE"
logg info 'Ensuring /var/cache/rclone exists'
sudo mkdir -p /var/cache/rclone
sudo chmod 750 /var/cache/rclone
### Add user / group with script in ~/.local/bin/add-usergroup, if it is available
if command -v add-usergroup > /dev/null; then
sudo add-usergroup "$USER" rclone
fi
sudo chown -Rf rclone:rclone /var/cache/rclone
logg info 'Ensuring /var/log/rclone exists'
sudo mkdir -p /var/log/rclone
sudo chmod 750 /var/log/rclone
sudo chown -Rf rclone:rclone /var/log/rclone
logg info 'Adding ~/.local/bin/rclone-mount to /usr/local/bin'
sudo cp -f "$HOME/.local/bin/rclone-mount" /usr/local/bin/rclone-mount
sudo chmod +x /usr/local/bin/rclone-mount
logg info 'Adding ~/.config/rclone/rcloneignore to /etc/rcloneignore'
sudo cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/rclone/rcloneignore" /etc/rcloneignore
sudo chown -Rf rclone:rclone /etc/rcloneignore
sudo chmod 640 /etc/rcloneignore
logg info 'Adding ~/.config/rclone/system-rclone.conf to /etc/rclone.conf'
sudo cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/rclone/system-rclone.conf" /etc/rclone.conf
sudo chown -Rf rclone:rclone /etc/rclone.conf
sudo chmod 600 /etc/rclone.conf
if [ -d /Applications ] && [ -d /System ]; then
### Enable Rclone mounts
logg info 'Ensuring Rclone mount-on-reboot definitions are in place'
if [ -f "$HOME/Library/LaunchDaemons/rclone.private.plist" ] && [ ! -f "/Library/LaunchDaemons/rclone.private.plist" ]; then
logg info 'Adding /Volumes/Private as S3 bucket mount, enabled at boot'
sudo mkdir -p /Library/LaunchDaemons
sudo cp -f "$HOME/Library/LaunchDaemons/rclone.private.plist" '/Library/LaunchDaemons/rclone.private.plist'
sudo launchctl load '/Library/LaunchDaemons/rclone.private.plist' && logg success 'launchctl load successful'
fi
if [ -f "$HOME/Library/LaunchDaemons/rclone.public.plist" ] && [ ! -f "/Library/LaunchDaemons/rclone.public.plist" ]; then
logg info 'Adding /Volumes/Public as S3 bucket mount, enabled at boot'
sudo mkdir -p /Library/LaunchDaemons
sudo cp -f "$HOME/Library/LaunchDaemons/rclone.public.plist" '/Library/LaunchDaemons/rclone.public.plist'
sudo launchctl load '/Library/LaunchDaemons/rclone.public.plist' && logg success 'launchctl load successful'
fi
if [ -f "$HOME/Library/LaunchDaemons/rclone.user.plist" ] && [ ! -f "/Library/LaunchDaemons/rclone.user.plist" ]; then
logg info "Adding /Volumes/User-$USER as S3 bucket mount, enabled at boot"
sudo mkdir -p /Library/LaunchDaemons
sudo cp -f "$HOME/Library/LaunchDaemons/rclone.user.plist" '/Library/LaunchDaemons/rclone.user.plist'
sudo launchctl load '/Library/LaunchDaemons/rclone.user.plist' && logg success 'launchctl load successful'
fi
elif [ -d /etc/systemd/system ]; then
find "${XDG_CONFIG_HOME:-$HOME/.config}/rclone/system" -mindepth 1 -maxdepth 1 -type f | while read RCLONE_SERVICE; do
### Add systemd service file
logg info "Adding S3 system mount service defined at $RCLONE_SERVICE"
FILENAME="$(basename "$RCLONE_SERVICE")"
SERVICE_ID="$(echo "$FILENAME" | sed 's/.service//')"
sudo cp -f "$RCLONE_SERVICE" "/etc/systemd/system/$(basename "$RCLONE_SERVICE")"
### Ensure mount folder is created
logg info "Ensuring /mnt/$SERVICE_ID is created with proper permissions"
sudo mkdir -p "/mnt/$SERVICE_ID"
sudo chmod 750 "/mnt/$SERVICE_ID"
### Enable / restart the service
logg info "Enabling / restarting the $SERVICE_ID S3 service"
sudo systemctl enable "$SERVICE_ID"
sudo systemctl restart "$SERVICE_ID"
done
### Add user Rclone mount
logg info 'Adding user S3 rclone mount (available at ~/.local/mnt/s3)'
sudo cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/rclone/s3-user.service" "/etc/systemd/system/s3-${USER}.service"
logg info 'Enabling / restarting the S3 user mount'
sudo systemctl enable "s3-${USER}"
sudo systemctl restart "s3-${USER}"
fi
else
logg info 'rclone is not available'
fi

View file

@ -0,0 +1,25 @@
#!/usr/bin/env bash
# @file rkhunter configuration
# @brief This script applies the rkhunter integration and updates it as well
if command -v rkhunter > /dev/null; then
if [ -d /Applications ] && [ -d /System ]; then
### macOS
logg info 'Updating file "$(brew --prefix)/Cellar/rkhunter/1.4.6/etc/rkhunter.conf"' && gsed -i "s/^#WEB_CMD.*$/WEB_CMD=curl\ -L/" "$(brew --prefix)/Cellar/rkhunter/1.4.6/etc/rkhunter.conf"
else
### Linux
logg info 'Updating file /etc/rkhunter.conf' && sed -i "s/^#WEB_CMD.*$/WEB_CMD=curl\ -L/" /etc/rkhunter.conf
fi
export PATH="$(echo "$PATH" | sed 's/VMware Fusion.app/VMwareFusion.app/')"
export PATH="$(echo "$PATH" | sed 's/IntelliJ IDEA CE.app/IntelliJIDEACE.map/')"
sudo rkhunter --propupd || RK_PROPUPD_EXIT_CODE=$?
if [ -n "$RK_PROPUPD_EXIT_CODE" ]; then
logg error "sudo rkhunter --propupd returned non-zero exit code"
fi
sudo rkhunter --update || RK_UPDATE_EXIT_CODE=$?
if [ -n "$RK_UPDATE_EXIT_CODE" ]; then
logg error "sudo rkhunter --update returned non-zero exit code"
fi
else
logg info 'rkhunter is not installed'
fi

View file

@ -0,0 +1,91 @@
#!/usr/bin/env bash
# @file Samba Configuration
# @brief This script configures Samba by applying the configuration stored in `${XDG_DATA_HOME:-$HOME/.config}/samba/config` if the `smbd` application is available
# @description
# This script applies the Samba configuration stored in `${XDG_DATA_HOME:-$HOME/.config}/samba/config` if Samba is installed.
# The script and default configuration set up two Samba shares.
#
# ## Security
#
# Both shares are configured by default to only accept connections
# from hosts with DNS that ends in `.local.PUBLIC_SERVICES_DOMAIN`, where `PUBLIC_SERVICES_DOMAIN` is an environment variable that
# can be passed into Install Doctor. So, if your `PUBLIC_SERVICES_DOMAIN` environment variable is set to `megabyte.space`, then
# a device with a FQDN of `alpha.local.megabyte.space` pointing to its LAN location will be able to connect but a device
# with a FQDN of `alpha.megabyte.space` will not be able to connect.
#
# ## Samba Shares / S3 Backup
#
# If CloudFlare R2 credentials are provided, Samba is configured to store its shared files in the Rclone mounts so that your
# Samba shares are synchronized to the S3 buckets. If not, new folders are created. Either way, the folder / symlink that the
# shares host data from are stored at `/mnt/Private` and `/mnt/Public` (*Note: Different paths are used on macOS*).
#
# 1. The **public** share (named "Public") can be accessed by anyone (including write permissions with the default settings)
# 2. The **private** share (named "Private") can be accessed by specifying the PAM credentials of anyone who has an account that is included in the `sambausers` group
#
# ## Symlinks
#
# Symlinks are disabled for security reasons. This is because, with symlinking enabled, people can create symlinks on the shares and use the symlinks to access system files outside of the
# Samba shares. There are commented-out lines in the default configuration that you can uncomment to enable the symlinks in shares.
#
# ## Printers
#
# Printer sharing is not enabled by default. There are commented lines in the default configuration that should provide a nice stepping
# stone if you want to use Samba for printer sharing (with CUPS).
#
# ## Environment Variables
#
# The following chart details some of the environment variables that are used to determine the configuration of the
# Samba shares:
#
# | Environment Variable | Description |
# |-----------------------------|-----------------------------------------------------------------------------------------------------|
# | `PUBLIC_SERVICES_DOMAIN` | Used to determine which hosts can connect to the Samba share (e.g. `.local.PUBLIC_SERVICES_DOMAIN`) |
# | `SAMBA_NETBIOS_NAME` | Determines the NetBIOS name (defaults to the `HOSTNAME` environment variable value) |
# | `SAMBA_WORKGROUP` | Controls Samba workgroup name (defaults to "BETELGEUSE") |
#
# ## Links
#
# * [Default Samba configuration](https://github.com/megabyte-labs/install.doctor/tree/master/home/dot_local/samba/config.tmpl)
# * [Secrets / Environment variables documentation](https://install.doctor/docs/customization/secrets)
### Configure Samba server
if command -v smbd > /dev/null; then
# Add user / group with script in ~/.local/bin/add-usergroup, if it is available
if command -v add-usergroup > /dev/null; then
sudo add-usergroup "$USER" rclone
fi
### Define share locations
if [ -d /Applications ] && [ -d /System ]; then
### macOS does not have `/mnt` folder so use `/Volumes` location
MNT_FOLDER='Volumes'
else
MNT_FOLDER='mnt'
fi
PRIVATE_SHARE="/$MNT_FOLDER/Private"
PUBLIC_SHARE="/$MNT_FOLDER/Public"
logg info "Ensuring $PRIVATE_SHARE is created"
sudo mkdir -p "$PRIVATE_SHARE"
sudo chmod 750 "$PRIVATE_SHARE"
sudo chown -Rf root:rclone "$PRIVATE_SHARE"
logg info "Ensuring $PUBLIC_SHARE is created"
sudo mkdir -p "$PUBLIC_SHARE"
sudo chmod 755 "$PUBLIC_SHARE"
sudo chown -Rf root:rclone "$PUBLIC_SHARE"
logg info "Ensuring $HOME/Public is created"
mkdir -p "$HOME/Public"
chmod 755 "$HOME/Public"
chown -Rf "$USER":rclone "$HOME/Public"
### Copy the Samba server configuration file
if [ -d /Applications ] && [ -d /System ]; then
sudo sharing -a "$PRIVATE_SHARE" -S "Private (System)" -n "Private (System)" -g 000 -s 001 -E 1 -R 1 && logg success "Configured $PRIVATE_SHARE as a private Samba share" || logg info 'sharing command failed - it is likely that the share was already set up'
sudo sharing -a "$PUBLIC_SHARE" -S "Public (System)" -n "Public (System)" -g 001 -s 001 -E 1 -R 0 && logg success "Configured $PUBLIC_SHARE as a public Samba share" || logg info 'sharing command failed - it is likely that the share was already set up'
sudo sharing -a "$HOME/Public" -S "Public (User)" -n "Public (User)" -g 001 -s 001 -E 1 -R 0 && logg success "Configured $HOME/Public as a public Samba share" || logg info 'sharing command failed - it is likely that the share was already set up'
else
logg info "Copying Samba server configuration to /etc/samba/smb.conf"
sudo cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/samba/config" "/etc/samba/smb.conf"
### Reload configuration file changes
logg info 'Reloading the smbd config'
smbcontrol smbd reload-config
fi
else
logg info "Samba server is not installed"
fi

View file

@ -0,0 +1,19 @@
#!/usr/bin/env bash
# @file sftpgo configuration
# @brief This script copies over the required configuration files for sftpgo and then initializes sftpgo
if command -v sftpgo > /dev/null; then
sudo mkdir -p /usr/local/etc/sftpgo
logg info 'Copying over sftpgo configuration to /usr/local/etc/sftpgo/sftpgo.json'
sudo cp -f "$HOME/.local/etc/sftpgo/sftpgo.json" /usr/local/etc/sftpgo/sftpgo.json
logg info 'Copying over sftpgo branding assets'
sudo cp -f "$HOME/.local/etc/sftpgo/banner" /usr/local/etc/sftpgo/banner
sudo mkdir -p /usr/local/etc/branding
sudo cp -f "$HOME/.local/etc/branding/favicon.ico" /usr/local/etc/branding/favicon.ico
sudo cp -f "$HOME/.local/etc/branding/logo-color-256x256.png" /usr/local/etc/branding/logo-color-256x256.png
sudo cp -f "$HOME/.local/etc/branding/logo-color-900x900.png" /usr/local/etc/branding/logo-color-900x900.png
logg info 'Running sudo sftpgo initprovider'
sudo sftpgo initprovider
else
logg info 'sftpgo is not installed'
fi

View file

@ -0,0 +1,29 @@
#!/usr/bin/env bash
# @file Tailscale
# @brief Connects the Tailscale client with the Tailscale network
# @description
# This script ensures the `tailscaled` system daemon is installed on macOS. Then, on both macOS and Linux, it connects to the Tailscale
# network if the `TAILSCALE_AUTH_KEY` variable is provided.
### Install the Tailscale system daemon
if [ -d /Applications ] && [ -d System ]; then
### macOS
if command -v tailscaled > /dev/null; then
logg info 'Ensuring tailscaled system daemon is installed'
sudo tailscaled install-system-daemon
logg info 'tailscaled system daemon is now installed and will load on boot'
else
logg info 'tailscaled does not appear to be installed'
fi
fi
### Connect to Tailscale network
if command -v tailscale > /dev/null && [ "$TAILSCALE_AUTH_KEY" != "" ]; then
logg info 'Connecting to Tailscale with user-defined authentication key'
timeout 14 tailscale up --authkey="$TAILSCALE_AUTH_KEY" --accept-routes || EXIT_CODE=$?
if [ -n "$EXIT_CODE" ]; then
logg warn 'tailscale up timed out'
else
logg success 'Connected to Tailscale network'
fi
fi

View file

@ -0,0 +1,15 @@
#!/usr/bin/env bash
# @file Timeshift Configuration
# @brief Updates the Timeshift system configuration with the Timeshift configuration stored in the `home/dot_config/timeshift/timeshift.json` location.
# @description
# This script applies a Timeshift configuration that defines how Timeshift should maintain system backups.
if command -v timeshift > /dev/null; then
logg info 'Ensuring /etc/timeshift is a directory'
sudo mkdir -p /etc/timeshift
TIMESHIFT_CONFIG="${XDG_CONFIG_HOME:-$HOME/.config}/timeshift/timeshift.json"
logg info "Copying $TIMESHIFT_CONFIG to /etc/timeshift/timeshift.json"
sudo cp -f "$TIMESHIFT_CONFIG" /etc/timeshift/timeshift.json
else
logg info 'The timeshift executable is not available'
fi

View file

@ -0,0 +1,51 @@
#!/usr/bin/env bash
# @file Tor Configuration
# @brief This script applies the Tor configuration stored at `${XDG_CONFIG_HOME:-HOME/.config}/tor/torrc` to the system and then restarts Tor
# @description
# Tor is a network that uses onion routing, originally published by the US Navy. It is leveraged by privacy enthusiasts
# and other characters that deal with sensitive material, like journalists and people buying drugs on the internet.
# This script:
#
# 1. Determines the system configuration file location
# 2. Applies the configuration stored at `${XDG_CONFIG_HOME:-HOME/.config}/tor/torrc`
# 3. Enables and restarts the Tor service with the new configuration
#
# ## Links
#
# * [Tor configuration](https://github.com/megabyte-labs/install.doctor/tree/master/home/dot_config/tor/torrc)
### Determine the Tor configuration location by checking whether the system is macOS or Linux
if [ -d /Applications ] && [ -d /System ]; then
### macOS
TORRC_CONFIG_DIR=/usr/local/etc/tor
else
### Linux
TORRC_CONFIG_DIR=/etc/tor
fi
TORRC_CONFIG="$TORRC_CONFIG_DIR/torrc"
### Apply the configuration if the `torrc` binary is available in the `PATH`
if command -v torify > /dev/null; then
if [ -d "$TORRC_CONFIG_DIR" ]; then
### Copy the configuration from `${XDG_CONFIG_HOME:-$HOME/.config}/tor/torrc` to the system configuration file location
sudo cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/tor/torrc" "$TORRC_CONFIG"
sudo chmod 600 "$TORRC_CONFIG"
### Enable and restart the Tor service
if [ -d /Applications ] && [ -d /System ]; then
### macOS
brew services restart tor
else
if [[ ! "$(test -d /proc && grep Microsoft /proc/version > /dev/null)" ]]; then
### Linux
sudo systemctl enable tor
sudo systemctl restart tor
else
logg info 'Environment is WSL so the Tor systemd service will not be enabled / restarted'
fi
fi
else
logg warn 'The '"$TORRC_CONFIG_DIR"' directory is missing'
fi
else
logg warn 'torify is missing from the PATH'
fi

View file

@ -0,0 +1,19 @@
#!/usr/bin/env bash
# @file VIM Plugins AOT Installation
# @brief This script triggers VIM to pre-install plugins so that VIM loads into the desired state the first time it is invoked
logg info "Installing VIM plugins" && vim +'PlugInstall --sync' +qall
# @description This script installs the extensions defined in `${XDG_CONFIG_HOME:-$HOME/.config}/coc/extensions/package.json`
# which should correlate to the Coc extensions defined in `${XDG_CONFIG_HOME:-$HOME/.config}/vim/vimrc`.
installCocExtensions() {
if [ -f "${XDG_CONFIG_HOME:-$HOME/.config}/coc/extensions/package.json" ]; then
logg info "Running npm i --no-progress --no-package-lock in ${XDG_CONFIG_HOME:-$HOME/.config}/coc/extensions"
cd "${XDG_CONFIG_HOME:-$HOME/.config}/coc/extensions" && npm i --no-progress --no-package-lock
logg info "Running vim +CocUpdateSync +qall" && vim +CocUpdateSync +qall
else
logg info "Skipping Coc extension installation because ${XDG_CONFIG_HOME:-$HOME/.config}/coc/extensions/package.json is missing"
fi
}
logg info "Updating VIM coc extensions" && installCocExtensions

View file

@ -0,0 +1,35 @@
#!/usr/bin/env bash
# @file VirtualBox Extension Pack
# @brief Ensures the VirtualBox extension pack is installed.
# @description
# This script ensures the VirtualBox extension pack that corresponds with VirtualBox's version is properly installed.
### Run logic if VirtualBox is installed
if command -v VirtualBox > /dev/null; then
### Install VirtualBox extension pack if it is not installed already
if [ ! -d /usr/lib/virtualbox/ExtensionPacks/Oracle_VM_VirtualBox_Extension_Pack ] && [ ! -d /Applications/VirtualBox.app/Contents/MacOS/ExtensionPacks/Oracle_VM_VirtualBox_Extension_Pack ]; then
logg info 'Acquiring VirtualBox version information'
VBOX_VERSION="$(VirtualBox --help | head -n 1 | cut -f 6 -d' ')"
VBOX_VERSION="${VBOX_VERSION//v}"
### Set up folders
# Check for macOS installation before creating ExtensionPacks folder on Linux machines
if [ ! -d /Applications/VirtualBox.app ]; then
sudo mkdir -p /usr/lib/virtualbox/ExtensionPacks
fi
mkdir -p /tmp/vbox
cd /tmp/vbox
### Download extension pack
logg info 'Downloading VirtualBox extension pack'
curl -sSL https://download.virtualbox.org/virtualbox/$VBOX_VERSION/Oracle_VM_VirtualBox_Extension_Pack-$VBOX_VERSION.vbox-extpack -o /tmp/vbox/Oracle_VM_VirtualBox_Extension_Pack-$VBOX_VERSION.vbox-extpack || logg error 'Failed to download the VirtualBox extension pack so the extension pack installation will be skipped'
### Install extension pack
if [ -f /tmp/vbox/Oracle_VM_VirtualBox_Extension_Pack-$VBOX_VERSION.vbox-extpack ]; then
logg info 'Installing VirtualBox extension pack'
echo 'y' | sudo VBoxManage extpack install --replace /tmp/vbox/Oracle_VM_VirtualBox_Extension_Pack-$VBOX_VERSION.vbox-extpack
logg success 'Successfully installed VirtualBox extension pack'
fi
else
logg info 'VirtualBox Extension pack is already installed'
fi
else
logg info 'VirtualBox is not installed so VirtualBox Extension pack will not be installed'
fi

View file

@ -0,0 +1,142 @@
#!/usr/bin/env bash
# @file VMWare Configuration
# @brief Installs VMWare Workstation Pro on Linux devices, applies a "publicly-retrieved" license key (see disclaimer), and automatically accepts the terms and conditions
# @description
# This script ensures the user included `vmware` in their software installation list. It then checks for presence of the `vmware` utility. If it is not present, then the script:
#
# 1. Downloads the [VMWare Workstation Pro](https://www.vmware.com/content/vmware/vmware-published-sites/us/products/workstation-pro.html.html) Linux installer
# 2. Installs VMWare Workstation Pro
# 3. Passes options to the installation script that automatically apply a publicly retrived license key and accept the Terms & Conditions
#
# This script first checks if `vagrant`, `vmware`, and `vagrant-vmware-utility` are available in the `PATH`. If they are present, then the script
# configures the [`vagrant-vmware-utility`](https://developer.hashicorp.com/vagrant/docs/providers/vmware/vagrant-vmware-utility) by generating the required security certificates and enabling the service.
# This system package enables the capability of controlling both VMWare Workstation and VMWare Fusion with Vagrant.
#
# Since this script runs only when `vagrant`, `vmware`, and `vagrant-vmware-utility` are in the `PATH`, this means that it will run
# when you use an installation template that includes all three pieces of software in the software list defined in
# `home/.chezmoidata.yaml`.
#
# **DISCLAIMER:** If you plan on using VMWare Workstation for anything but evaluation purposes, then we highly suggest purchasing a copy
# of VMWare Workstation. The "publicly-retrived" license keys are scattered throughout GitHub and we are not exactly
# sure why they work. You can pass in your own key by utilizing the `VMWARE_WORKSTATION_LICENSE_KEY` environment variable. More details on
# using environment variables or repository-housed encrypted secrets can be found in our [Secrets documentation](https://install.doctor/docs/customization/secrets).
#
# ## VMWare on macOS
#
# This script only installs VMWare Workstation on Linux. The macOS-variant titled VMWare Fusion can be installed using a Homebrew
# cask so a "work-around" script does not have to be used.
#
# ## VMWare vs. Parallels vs. VirtualBox vs. KVM vs. Hyper-V
#
# There are a handful of VM virtualization providers you can choose from. VMWare is a nice compromise between OS compatibility and performance.
# Parallels, on the hand, might be better for macOS since it is designed specifically for macOS. Finally, VirtualBox is a truly free,
# open-source option that does not come with the same optimizations that VMWare and Parallels provide.
#
# Other virtualization options include KVM (Linux / macOS) and Hyper-V (Windows). These options are better used for headless
# systems.
#
# ## Links
#
# * [VMWare Workstation homepage](https://www.vmware.com/content/vmware/vmware-published-sites/us/products/workstation-pro.html.html)
# * [Vagrant VMWare Utility on GitHub](https://github.com/hashicorp/vagrant-vmware-desktop)
# * [`home/.chezmoidata.yaml`](https://github.com/megabyte-labs/install.doctor/blob/master/home/.chezmoidata.yaml)
# * [Default license key gist](https://gist.github.com/PurpleVibe32/30a802c3c8ec902e1487024cdea26251)
### Run logic if VMware is installed
if command -v vmware > /dev/null; then
### Build kernel modules if they are not present
if [ ! -f "/lib/modules/$(uname -r)/misc/vmmon.ko" ] || [ ! -f "/lib/modules/$(uname -r)/misc/vmnet.ko" ]; then
### Build VMWare host modules
logg info 'Building VMware host modules'
if sudo vmware-modconfig --console --install-all; then
logg success 'Built VMWare host modules successfully with sudo vmware-modconfig --console --install-all'
else
logg info 'Acquiring VMware version from CLI'
VMW_VERSION="$(vmware --version | cut -f 3 -d' ')"
mkdir -p /tmp/vmw_patch
cd /tmp/vmw_patch
logg info 'Downloading VMware host module patches' && curl -sSL "https://github.com/mkubecek/vmware-host-modules/archive/workstation-$VMW_VERSION.tar.gz" -o /tmp/vmw_patch/workstation.tar.gz
tar -xzf /tmp/vmw_patch/workstation.tar.gz
cd vmware*
logg info 'Running sudo make and sudo make install'
sudo make
sudo make install
logg success 'Successfully configured VMware host module patches'
fi
### Sign VMware host modules if Secure Boot is enabled
if [ -f /sys/firmware/efi ]; then
logg info 'Signing host modules'
mkdir -p /tmp/vmware
cd /tmp/vmware
openssl req -new -x509 -newkey rsa:2048 -keyout MOK.priv -outform DER -out MOK.der -nodes -days 36500 -subj "/CN=VMware/"
"/usr/src/linux-headers-$(uname -r)/scripts/sign-file" sha256 ./MOK.priv ./MOK.der "$(modinfo -n vmmon)"
"/usr/src/linux-headers-$(uname -r)/scripts/sign-file" sha256 ./MOK.priv ./MOK.der "$(modinfo -n vmnet)"
echo '' | mokutil --import MOK.der
logg success 'Successfully signed VMware host modules. Reboot the host before powering on VMs'
fi
### Patch VMware with Unlocker
if [ ! -f /usr/lib/vmware/isoimages/darwin.iso ]; then
logg info 'Acquiring VMware Unlocker latest release version'
UNLOCKER_URL="$(curl -sSL 'https://api.github.com/repos/DrDonk/unlocker/releases/latest' | jq -r '.assets[0].browser_download_url')"
mkdir -p /tmp/vmware-unlocker
cd /tmp/vmware-unlocker
logg info 'Downloading unlocker.zip'
curl -sSL "$UNLOCKER_URL" -o unlocker.zip
unzip unlocker.zip
cd linux
logg info 'Running the unlocker'
echo "y" | sudo ./unlock
logg success 'Successfully unlocked VMware for macOS compatibility'
else
logg info '/usr/lib/vmware/isoimages/darwin.iso is already present on the system so VMware macOS unlocking will not be performed'
fi
if [[ ! "$(test -d /proc && grep Microsoft /proc/version > /dev/null)" ]]; then
### Start / enable VMWare service
logg info 'Ensuring vmware.service is enabled and running'
sudo systemctl enable vmware.service
sudo systemctl restart vmware.service
### Start / enable VMWare Workstation Server service
logg info 'Ensuring vmware-workstation-server.service is enabled and running'
sudo systemctl enable vmware-workstation-server.service
sudo systemctl restart vmware-workstation-server.service
### Start / enable VMWare USB Arbitrator service
if command -v vmware-usbarbitrator.service > /dev/null; then
logg info 'Ensuring vmware-usbarbitrator.service is enabled and running'
sudo systemctl enable vmware-usbarbitrator.service
sudo systemctl restart vmware-usbarbitrator.service
else
logg warn 'vmware-usbarbitrator does not exist in the PATH'
fi
fi
else
logg info 'VMware host modules are present'
fi
else
logg warn 'VMware Workstation is not installed so the VMware Unlocker will not be installed'
fi
# @description Only run logic if both Vagrant and VMWare are installed
if command -v vagrant > /dev/null && command -v vmware-id > /dev/null; then
### Vagrant VMWare Utility configuration
if command -v vagrant-vmware-utility > /dev/null; then
if [ -f /usr/local/bin/certificates/vagrant-utility.key ]; then
logg info 'Assuming Vagrant VMWare Utility certificates have been properly generated since /usr/local/bin/certificates/vagrant-utility.key is present'
else
logg info 'Generating Vagrant VMWare Utility certificates'
sudo vagrant-vmware-utility certificate generate
logg success 'Generated Vagrant VMWare Utility certificates via vagrant-vmware-utility certificate generate'
fi
logg info 'Ensuring the Vagrant VMWare Utility service is enabled'
sudo vagrant-vmware-utility service install || EXIT_CODE=$?
if [ -n "$EXIT_CODE" ]; then
logg info 'The Vagrant VMWare Utility command vagrant-vmware-utility service install failed. It is probably already setup.'
fi
fi
else
logg info 'Vagrant is not installed so the Vagrant plugins will not be installed'
logg info 'Vagrant or VMWare is not installed so the Vagrant VMWare utility will not be configured'
fi

View file

@ -0,0 +1,14 @@
#!/usr/bin/env bash
# @file Volta initialization
# @brief This script initializes Volta and ensures the latest version of node and yarn are installed
export VOLTA_HOME="${XDG_DATA_HOME:-$HOME/.local/share}/volta"
export PATH="$VOLTA_HOME/bin:$PATH"
if command -v volta > /dev/null; then
volta setup
volta install node@latest
volta install yarn@latest
else
logg info 'Volta is not installed'
fi

View file

@ -0,0 +1,118 @@
#!/usr/bin/env bash
# @file VSCode Extensions / Global NPM Modules Fallback
# @brief Installs all of the Visual Studio Code extensions specified in the [`home/dot_config/Code/User/extensions.json`](https://github.com/megabyte-labs/install.doctor/blob/master/home/dot_config/Code/User/extensions.json) file and installs NPM packages to the system `/` directory as a catch-all for tools that recursively search upwards for shared NPM configurations.
# @description
# This script loops through all the extensions listed in the [`home/dot_config/Code/User/extensions.json`](https://github.com/megabyte-labs/install.doctor/blob/master/home/dot_config/Code/User/extensions.json)
# file. It installs the extensions when either Visual Studio Code or VSCodium is installed. If both are installed, then both will
# have the plugins automatically installed.
#
# The `extensions.json` file is used to house the plugin list so that if you decide to remove this auto-installer script then
# VSCode will retain some functionality from the file. It will show a popover card that recommends installing any plugins in the
# list that are not already installed.
#
# ## Plugin Settings
#
# Most of the plugin settings have been configured and optimized to work properly with the other default settings
# included by Install Doctor. These settings can be found in the [`home/dot_config/Code/User/settings.json` file](https://github.com/megabyte-labs/install.doctor/blob/master/home/dot_config/Code/User/settings.json).
# If you manage to come up with an improvement, please open a pull request so other users can benefit from your work.
#
# ## Default Extensions
#
# The default plugins in the `extensions.json` list are catered mostly towards full-stack web development. The technologies
# that are catered to by the default extensions relate to TypeScript, JavaScript, Go, Python, Rust, and many more technologies.
# Most of the plugins are not language-specific.
#
# ## Global NPM Modules Fallback
#
# This script makes fallback linter and code auto-fixer configurations globally available. Normally, configurations, like
# the ones used for ESLint, are installed at the project level by specifying the NPM package configuration
# in the `package.json` file (or via an `.eslintrc` file). However, whenever no configuration is present, IDEs like
# Visual Studio Code will recursively search upwards in the directory tree, trying to find an ESLint configuration.
#
# This script addresses this issue by installing a set of shared NPM packages that enhance the functionality of tools like ESLint
# by placing a `package.json` with all the necessary settings into the highest directory possible and then installing the package's
# modules. This normally results in a `package.json` file and `node_modules/` folder at the root of the system.
#
# ## NPM Packages Included
#
# To reduce clutter, all the configurations are mapped out in the `package.json` file. Our default `package.json` file includes
# the following configuration:
#
# ```json
# <!-- AUTO-GENERATED:START (REMOTE:url=https://gitlab.com/megabyte-labs/install.doctor/-/raw/master/home/dot_config/Code/User/package.json) -->
# {
# ...
# // Notable dependencies listed below
# "dependencies": {
# "eslint-config-strictlint": "latest",
# "jest-preset-ts": "latest",
# "prettier-config-strictlint": "latest",
# "remark-preset-strictlint": "latest",
# "stylelint-config-strictlint": "latest"
# },
# ...
# }
# <!-- AUTO-GENERATED:END -->
# ```
#
# ## Strict Lint
#
# More details on the shared configurations can be found at [StrictLint.com](https://strictlint.com).
# Strict Lint is another brand maintained by Megabyte Labs that is home to many of the well-crafted
# shared configurations that are included in our default NPM configuration fallback settings.
#
# ## Notes
#
# * If the system root directory is not writable (even with `sudo`), then the shared modules are installed to the provisioning user's `$HOME` directory
#
# ## Links
#
# * [`package.json` configuration file](https://github.com/megabyte-labs/install.doctor/blob/master/home/dot_config/Code/User/package.json)
# * [StrictLint.com documentation](https://strictlint.com/docs)
# * [Visual Studio Code settings folder](https://github.com/megabyte-labs/install.doctor/blob/master/home/dot_config/Code/User)
# * [Visual Studio Code `extensions.json`](https://github.com/megabyte-labs/install.doctor/blob/master/home/dot_config/Code/User/extensions.json)
### Hides useless error during extension installations
# Error looks like:
# (node:53151) [DEP0005] DeprecationWarning: Buffer() is deprecated due to security and usability issues. Please use the Buffer.alloc(), Buffer.allocUnsafe(), or Buffer.from() methods instead.
# (Use `Electron --trace-deprecation ...` to show where the warning was created)
export NODE_OPTIONS=--throw-deprecation
# @description Install Visual Studio Code extensions if they are not already installed (by checking the `code --list-extensions` output)
if command -v code > /dev/null; then
EXTENSIONS="$(code --list-extensions)"
jq -r '.recommendations[]' "${XDG_CONFIG_HOME:-$HOME/.config}/Code/User/extensions.json" | while read EXTENSION; do
if ! echo "$EXTENSIONS" | grep -iF "$EXTENSION" > /dev/null; then
logg info 'Installing Visual Studio Code extension '"$EXTENSION"'' && code --install-extension "$EXTENSION"
logg success 'Installed '"$EXTENSION"''
else
logg info ''"$EXTENSION"' already installed'
fi
done
else
logg info 'code executable not available - skipping plugin install process for it'
fi
if command -v code > /dev/null && command -v npm > /dev/null && [ -f "${XDG_DATA_HOME:-$HOME/.local/share}/vscode/package.json" ]; then
### Install linter fallback node_modules / package.json to system or home directory
if sudo cp -f "${XDG_DATA_HOME:-$HOME/.local/share}/vscode/package.json" /package.json; then
logg info 'Successfully copied linter fallback configurations package.json to /package.json'
logg info 'Installing system root directory node_modules'
cd / && sudo npm i --quiet --no-progress --no-package-lock || EXIT_CODE=$?
else
logg warn 'Unable to successfully copy linter fallback configurations package.json to /package.json'
logg info 'Installing linter fallback configurations node_modules to home directory instead'
cp -f "${XDG_DATA_HOME:-$HOME/.local/share}/vscode/package.json" "$HOME/package.json"
cd ~ && npm i --quiet --no-progress --no-package-lock || EXIT_CODE=$?
fi
### Log message if install failed
if [ -n "$EXIT_CODE" ]; then
logg warn 'Possible error(s) were detected while installing linter fallback configurations to the home directory.'
logg info "Exit code: $EXIT_CODE"
else
logg info 'Installed linter fallback configuration node_modules'
fi
else
logg info 'Skipping installation of fallback linter configurations because one or more of the dependencies is missing.'
fi

View file

@ -0,0 +1,19 @@
#!/usr/bin/env bash
# @file VSCodium Extension Pre-Installation
# @brief This script pre-installs the extensions contained in ~/.config/Code/User/extensions.json
export NODE_OPTIONS=--throw-deprecation
# @description Check for the presence of the `codium` command in the `PATH` and install extensions for VSCodium if it is present
if command -v codium > /dev/null; then
EXTENSIONS="$(codium --list-extensions)"
jq -r '.recommendations[]' "${XDG_CONFIG_HOME:-$HOME/.config}/Code/User/extensions.json" | while read EXTENSION; do
if ! echo "$EXTENSIONS" | grep -iF "$EXTENSION" > /dev/null; then
logg info 'Installing VSCodium extension '"$EXTENSION"'' && codium --install-extension "$EXTENSION" && logg success 'Installed '"$EXTENSION"''
else
logg info ''"$EXTENSION"' already installed'
fi
done
else
logg info 'codium executable not available - skipping plugin install process for it'
fi

View file

@ -0,0 +1,250 @@
#!/usr/bin/env bash
# @file CloudFlare WARP
# @brief Installs CloudFlare WARP, ensures proper security certificates are in place, and connects the device to CloudFlare WARP.
# @description
# This script is intended to connect the device to CloudFlare's Zero Trust network with nearly all of its features unlocked.
# Homebrew is used to install the `warp-cli` on macOS. On Linux, it can install `warp-cli` on most Debian systems and some RedHat
# systems. CloudFlare WARP's [download page](https://pkg.cloudflareclient.com/packages/cloudflare-warp) is somewhat barren.
#
# ## MDM Configuration
#
# If CloudFlare WARP successfully installs, it first applies MDM configurations (managed configurations). If you would like CloudFlare
# WARP to connect completely headlessly (while losing some "user-posture" settings), then you can populate the following three secrets:
#
# 1. `CLOUDFLARE_TEAMS_CLIENT_ID` - The ID from a CloudFlare Teams service token. See [this article](https://developers.cloudflare.com/cloudflare-one/identity/service-tokens/).
# 2. `CLOUDFLARE_TEAMS_CLIENT_SECRET` - The secret from a CloudFlare Teams service token.
# 3. `CLOUDFLARE_TEAMS_ORG` - The ID of your Zero Trust organization. This variable must be passed in as an environment variable and is housed in the `home/.chezmoi.yaml.tmpl` file. If you do not want to pass an environment variable, you can change the default value in `home/.chezmoi.yaml.tmpl` on your own fork.
#
# The two variables above can be passed in using either of the methods described in the [Secrets documentation](https://install.doctor/docs/customization/secrets).
#
# ## Headless CloudFlare WARP Connection
#
# Even if you do not provide the two variables mentioned above, the script will still headlessly connect your device to the public CloudFlare WARP
# network, where you will get some of the benefits of a VPN for free. Otherwise, if they were passed in, then the script
# finishes by connecting to CloudFlare Teams.
#
# ## Application Certificates
#
# This script applies the techniques described on the [CloudFlare Zero Trust Install certificate manually page](https://developers.cloudflare.com/cloudflare-one/connections/connect-devices/warp/user-side-certificates/install-cloudflare-cert/)
# to configure the following utilities that leverage seperate certificate authorities:
#
# * Python
# * NPM
# * Git
# * Google Cloud SDK
# * AWS CLI
# * Google Drive for desktop
#
# Settings used to configure Firefox are housed inside of the Firefox configuration files stored as seperate configuration files
# outside of this script. **Note: The scripts that enable CloudFlare certificates for all these programs are currently commented out
# in this script.**
#
# ## Notes
#
# According to CloudFlare Teams [documentation on MDM deployment](https://developers.cloudflare.com/cloudflare-one/connections/connect-devices/warp/deployment/mdm-deployment/),
# on macOS the `com.cloudflare.warp.plist` file gets erased on reboot. Also, according to the documentation, the only way around this is to leverage
# an MDM SaaS provider like JumpCloud.
#
# ## Links
#
# * [Linux managed configuration](https://github.com/megabyte-labs/install.doctor/tree/master/home/dot_config/warp/private_mdm.xml.tmpl)
# * [macOS managed configuration](https://github.com/megabyte-labs/install.doctor/tree/master/home/Library/Managed%20Preferences/private_com.cloudflare.warp.plist.tmpl)
SSL_CERT_PATH="/etc/ssl/cert.pem"
### Install CloudFlare WARP (on non-WSL *nix systems)
if [[ ! "$(test -d /proc && grep Microsoft /proc/version > /dev/null)" ]]; then
if [ -d /System ] && [ -d /Applications ]; then
### Install on macOS
if [ ! -d "/Applications/Cloudflare WARP.app" ]; then
brew install --cask --no-quarantine --quiet cloudflare-warp
else
logg info 'Cloudflare WARP already installed'
fi
elif [ '{{ .host.distro.id }}' = 'debian' ]; then
### Add CloudFlare WARP desktop app apt-get source
if [ ! -f /etc/apt/sources.list.d/cloudflare-client.list ]; then
logg info 'Adding CloudFlare WARP keyring'
curl https://pkg.cloudflareclient.com/pubkey.gpg | sudo gpg --yes --dearmor --output /usr/share/keyrings/cloudflare-warp-archive-keyring.gpg
logg info 'Adding apt source reference'
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/cloudflare-warp-archive-keyring.gpg] https://pkg.cloudflareclient.com/ $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/cloudflare-client.list
fi
### Update apt-get and install the CloudFlare WARP CLI
sudo apt-get update && sudo apt-get install -y cloudflare-warp
elif [ '{{ .host.distro.id }}' = 'ubuntu' ]; then
### Add CloudFlare WARP desktop app apt-get source
if [ ! -f /etc/apt/sources.list.d/cloudflare-client.list ]; then
logg info 'Adding CloudFlare WARP keyring'
curl https://pkg.cloudflareclient.com/pubkey.gpg | sudo gpg --yes --dearmor --output /usr/share/keyrings/cloudflare-warp-archive-keyring.gpg
logg info 'Adding apt source reference'
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/cloudflare-warp-archive-keyring.gpg] https://pkg.cloudflareclient.com/ $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/cloudflare-client.list
fi
### Update apt-get and install the CloudFlare WARP CLI
sudo apt-get update && sudo apt-get install -y cloudflare-warp
elif command -v dnf > /dev/null && command -v rpm > /dev/null; then
### This is made for CentOS 8 and works on Fedora 36 (hopefully 36+ as well) with `nss-tools` as a dependency
sudo dnf instal -y nss-tools || NSS_TOOL_EXIT=$?
if [ -n "$NSS_TOOL_EXIT" ]; then
logg warn 'Unable to install nss-tools which was a requirement on Fedora 36 and assumed to be one on other systems as well.'
fi
### According to the download site, this is the only version available for RedHat-based systems
sudo rpm -ivh https://pkg.cloudflareclient.com/cloudflare-release-el8.rpm || RPM_EXIT_CODE=$?
if [ -n "$RPM_EXIT_CODE" ]; then
logg error 'Unable to install CloudFlare WARP using RedHat 8 RPM package'
fi
fi
fi
### Ensure certificate is installed
# Source: https://developers.cloudflare.com/cloudflare-one/static/documentation/connections/Cloudflare_CA.crt
# Source: https://developers.cloudflare.com/cloudflare-one/static/documentation/connections/Cloudflare_CA.pem
if [ -d /System ] && [ -d /Applications ] && command -v warp-cli > /dev/null; then
### Ensure certificate installed on macOS
if [ -z "$SSH_CONNECTION" ]; then
# if [ -z "$HEADLESS_INSTALL" ]; then
# logg info '**macOS Manual Security Permission** Requesting security authorization for Cloudflare trusted certificate'
# sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain "$HOME/.local/etc/ssl/cloudflare/Cloudflare_CA.crt"
# fi
logg info 'Updating the OpenSSL CA Store to include the Cloudflare certificate'
echo | sudo tee -a "$SSL_CERT_PATH" < "$HOME/.local/etc/ssl/cloudflare/Cloudflare_CA.pem" > /dev/null
echo "" | sudo tee -a "$SSL_CERT_PATH"
else
logg warn 'Session is SSH so adding Cloudflare encryption key to trusted certificates via the security program is being bypassed since it requires Touch ID / Password verification.'
fi
if [ -f "/usr/local/opt/openssl@3/bin/c_rehash" ]; then
# Location on Intel macOS
logg info 'Ensuring /usr/local/etc/openssl@3/certs directory exists' && mkdir -p /usr/local/etc/openssl@3/certs
logg info 'Adding Cloudflare certificate to /usr/local/etc/openssl@3/certs/Cloudflare_CA.pem'
echo | sudo cat - "$HOME/.local/etc/ssl/cloudflare/Cloudflare_CA.pem" >> /usr/local/etc/openssl@3/certs/Cloudflare_CA.pem
logg info 'Running /usr/local/opt/openssl@3/bin/c_rehash'
/usr/local/opt/openssl@3/bin/c_rehash > /dev/null && logg info 'OpenSSL certificate rehash successful'
elif [ -f "${HOMEBREW_PREFIX:-/opt/homebrew}/opt/openssl@3/bin/c_rehash" ]; then
# Location on arm64 macOS and custom Homebrew locations
logg info "Ensuring ${HOMEBREW_PREFIX:-/opt/homebrew}/etc/openssl@3/certs directory exists" && mkdir -p "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/openssl@3/certs"
logg info "Adding Cloudflare certificate to ${HOMEBREW_PREFIX:-/opt/homebrew}/etc/openssl@3/certs/Cloudflare_CA.pem"
echo | sudo cat - "$HOME/.local/etc/ssl/cloudflare/Cloudflare_CA.pem" >> "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/openssl@3/certs/Cloudflare_CA.pem"
logg info "Running ${HOMEBREW_PREFIX:-/opt/homebrew}/opt/openssl@3/bin/c_rehash"
"${HOMEBREW_PREFIX:-/opt/homebrew}/opt/openssl@3/bin/c_rehash" > /dev/null && logg info 'OpenSSL certificate rehash successful'
else
logg warn 'Unable to add Cloudflare_CA.pem because /usr/local/etc/openssl@3/certs and /opt/homebrew/etc/openssl@3/certs do not exist!'
fi
elif command -v warp-cli > /dev/null; then
# System is Linux
if command -v dpkg-reconfigure > /dev/null; then
if [ -d /usr/local/share/ca-certificates ]; then
logg info 'Copying CloudFlare Teams PEM file to /usr/local/share/ca-certificates/Cloudflare_CA.crt'
sudo cp -f "$HOME/.local/etc/ssl/cloudflare/Cloudflare_CA.pem" /usr/local/share/ca-certificates/Cloudflare_CA.crt
logg info 'dpkg-reconfigure executable detected so using Debian/Ubuntu method of updating system trusted certificates to include CloudFlare Teams certificate'
sudo dpkg-reconfigure ca-certificates -p high
SSL_CERT_PATH="/etc/ssl/certs/ca-certificates.crt"
else
logg warn 'No /usr/local/share/ca-certificates folder present'
fi
elif command -v update-ca-trust > /dev/null; then
if [ -d /etc/pki/ca-trust/source/anchors ]; then
logg info 'Copying CloudFlare Teams certificates to /etc/pki/ca-trust/source/anchors'
sudo cp -f "$HOME/.local/etc/ssl/cloudflare/Cloudflare_CA.crt" "$HOME/.local/etc/ssl/cloudflare/Cloudflare_CA.pem" /etc/pki/ca-trust/source/anchors
logg info 'update-ca-trust executable detected so using CentOS/Fedora method of updating system trusted certificates to include CloudFlare Teams certificate'
sudo update-ca-trust
SSL_CERT_PATH="/etc/pki/tls/certs/ca-bundle.crt"
else
logg warn '/etc/pki/ca-trust/source/anchors does not exist so skipping the system certificate update process'
fi
fi
fi
if command -v warp-cli > /dev/null; then
### Application certificate configuration
# Application-specific certificate authority modification is currently commented out because
# it is merely for traffic inspection and `npm install` fails when configured to use the CloudFlare
# certificate and the WARP client is not running.
### Git
if command -v git > /dev/null; then
logg info "Configuring git to use $SSL_CERT_PATH"
git config --global http.sslcainfo "$SSL_CERT_PATH"
fi
### NPM
if command -v npm > /dev/null; then
logg info "Configuring npm to use $SSL_CERT_PATH"
npm config set cafile "$SSL_CERT_PATH"
fi
### Python
if command -v python3 > /dev/null; then
### Ensure Certifi package is available globally
if ! pip3 list | grep certifi > /dev/null; then
if command -v brew > /dev/null; then
logg info 'Ensuring Python certifi is installed via Homebrew'
brew install --quiet certifi
else
logg info 'Ensuring certifi is installed globally for Python 3'
pip3 install certifi
fi
fi
### Copy CloudFlare PEM file to Python 3 location
logg info "Configuring python3 / python to use "$HOME/.local/etc/ssl/cloudflare/Cloudflare_CA.pem""
echo | cat - "$HOME/.local/etc/ssl/cloudflare/Cloudflare_CA.pem" >> $(python3 -m certifi)
fi
### Google Cloud SDK
if command -v gcloud > /dev/null; then
logg info "Configuring gcloud to use "$HOME/.local/etc/ssl/cloudflare/Cloudflare_CA.pem" and "$HOME/.local/etc/ssl/gcloud/ca.pem""
mkdir -p "$HOME/.local/etc/ssl/gcloud"
cat "$HOME/.local/etc/ssl/curl/cacert.pem" "$HOME/.local/etc/ssl/cloudflare/Cloudflare_CA.pem" > "$HOME/.local/etc/ssl/gcloud/ca.pem"
gcloud config set core/custom_ca_certs_file "$HOME/.local/etc/ssl/gcloud/ca.pem"
fi
### Google Drive for desktop (macOS)
if [ -d "/Applications/Google Drive.app" ]; then
if [ -d "/Applications/Google Drive.app/Contents/Resources" ]; then
logg info "Combining Google Drive roots.pem with CloudFlare certificate"
mkdir -p "$HOME/.local/etc/ssl/google-drive"
cat "$HOME/.local/etc/ssl/cloudflare/Cloudflare_CA.pem" "/Applications/Google Drive.app/Contents/Resources/roots.pem" >> "$HOME/.local/etc/ssl/google-drive/roots.pem"
sudo defaults write /Library/Preferences/com.google.drivefs.settings TrustedRootsCertsFile -string "$HOME/.local/etc/ssl/google-drive/roots.pem"
else
logg warn 'Google Drive.app installed but roots.pem is not available yet'
fi
fi
### Ensure MDM settings are applied (deletes after reboot on macOS)
### TODO: Ensure `.plist` can be added to `~/Library/Managed Preferences` and not just `/Library/Managed Preferences`
# Source: https://developers.cloudflare.com/cloudflare-one/connections/connect-devices/warp/deployment/mdm-deployment/
# Source for JumpCloud: https://developers.cloudflare.com/cloudflare-one/static/documentation/connections/CloudflareWARP.mobileconfig
if [ -d /System ] && [ -d /Applications ]; then
sudo mkdir -p "/Library/Managed Preferences"
sudo cp -f "$HOME/Library/Managed Preferences/com.cloudflare.warp.plist" '/Library/Managed Preferences/com.cloudflare.warp.plist'
sudo plutil -convert binary1 '/Library/Managed Preferences/com.cloudflare.warp.plist'
### Enable CloudFlare WARP credentials auto-populate (since file is deleted when not managed with MDM)
if [ -f "$HOME/Library/LaunchDaemons/com.cloudflare.warp.plist" ] && [ ! -f "/Library/LaunchDaemons/com.cloudflare.warp.plist" ]; then
sudo mkdir -p /Library/LaunchDaemons
sudo cp -f "$HOME/Library/LaunchDaemons/com.cloudflare.warp.plist" '/Library/LaunchDaemons/com.cloudflare.warp.plist'
sudo launchctl load "/Library/LaunchDaemons/com.cloudflare.warp.plist"
fi
elif [ -f "${XDG_CONFIG_HOME:-$HOME/.config}/warp/mdm.xml" ]; then
sudo mkdir -p /var/lib/cloudflare-warp
sudo cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/warp/mdm.xml" /var/lib/cloudflare-warp/mdm.xml
fi
### Register CloudFlare WARP
if warp-cli --accept-tos status | grep 'Registration missing' > /dev/null; then
logg info 'Registering CloudFlare WARP'
warp-cli --accept-tos register
else
logg info 'Either there is a misconfiguration or the device is already registered with CloudFlare WARP'
fi
### Connect CloudFlare WARP
if warp-cli --accept-tos status | grep 'Disconnected' > /dev/null; then
logg info 'Connecting to CloudFlare WARP'
warp-cli --accept-tos connect > /dev/null && logg success 'Connected to CloudFlare WARP'
else
logg info 'Either there is a misconfiguration or the device is already connected with CloudFlare WARP'
fi
else
logg warn 'warp-cli was not installed so CloudFlare WARP cannot be joined'
fi

View file

@ -0,0 +1,65 @@
#!/usr/bin/env bash
# @file Wazuh Client Install
# @brief Installs the Wazuh client and connects to the manager if configured to do so through secrets / environment variables
if [ -d /Applications ] && [ -d /System ]; then
### macOS
if ! csrutil status | grep enabled > /dev/null; then
cd /tmp
logg info 'Downloading the macOS Wazuh agent pkg'
curl -sSL https://packages.wazuh.com/4.x/macos/wazuh-agent-4.4.4-1.pkg > wazuh-agent.pkg
sudo launchctl setenv WAZUH_MANAGER "$WAZUH_MANAGER"
logg info 'Installing the Wazuh agent pkg'
sudo installer -pkg wazuh-agent.pkg -target /
sudo chmod 755 /Library/Ossec
sudo chmod 755 /Library/Ossec/bin
rm /tmp/wazuh-agent.pkg
logg info 'Running sudo wazuh-control start'
sudo wazuh-control start
else
logg warn "Skipping Wazuh Agent installation because System Integrity Protection is enabled. Disabling it requires booting into recovery and running csrutil disable, installing Wazuh Agent normally, and then re-enabling it again in recovery mode."
fi
else
if command -v apt-get > /dev/null; then
logg info 'Importing GPG-KEY-WAZUH'
curl -s https://packages.wazuh.com/key/GPG-KEY-WAZUH | sudo gpg --no-default-keyring --keyring gnupg-ring:/usr/share/keyrings/wazuh.gpg --import
sudo chmod 644 /usr/share/keyrings/wazuh.gpg
echo "deb [signed-by=/usr/share/keyrings/wazuh.gpg] https://packages.wazuh.com/4.x/apt/ stable main" | sudo tee -a /etc/apt/sources.list.d/wazuh.list
sudo apt-get update
logg info 'Installing the Wazuh agent'
sudo apt-get install -y wazuh-agent
elif command -v dnf > /dev/null; then
logg info 'Configuring /etc/yum.repos.d/wazuh.repo'
echo "[wazuh]" | sudo tee -a /etc/yum.repos.d/wazuh.repo && echo "gpgcheck=1" | sudo tee -a /etc/yum.repos.d/wazuh.repo
echo "gpgkey=https://packages.wazuh.com/key/GPG-KEY-WAZUH" | sudo tee -a /etc/yum.repos.d/wazuh.repo
echo "enabled=1" | sudo tee -a /etc/yum.repos.d/wazuh.repo
echo "name=EL-\$releasever - Wazuh" | sudo tee -a /etc/yum.repos.d/wazuh.repo
echo "baseurl=https://packages.wazuh.com/4.x/yum/" | sudo tee -a /etc/yum.repos.d/wazuh.repo
echo "protect=1" | sudo tee -a /etc/yum.repos.d/wazuh.repo
logg info 'Importing GPG-KEY-WAZUH'
sudo rpm --import https://packages.wazuh.com/key/GPG-KEY-WAZUH
logg info 'Installing Wazuh agent'
sudo dnf install -y wazuh-agent
elif command -v zypper > /dev/null; then
logg info 'Configuring /etc/zypp/repos.d/wazuh.repo'
echo "[wazuh]" | sudo tee -a /etc/zypp/repos.d/wazuh.repo
echo "gpgcheck=1" | sudo tee -a /etc/zypp/repos.d/wazuh.repo
echo "gpgkey=https://packages.wazuh.com/key/GPG-KEY-WAZUH" | sudo tee -a /etc/zypp/repos.d/wazuh.repo
echo "enabled=1" | sudo tee -a /etc/zypp/repos.d/wazuh.repo
echo "name=EL-$releasever - Wazuh" | sudo tee -a /etc/zypp/repos.d/wazuh.repo
echo "baseurl=https://packages.wazuh.com/4.x/yum/" | sudo tee -a /etc/zypp/repos.d/wazuh.repo
echo "protect=1" | sudo tee -a /etc/zypp/repos.d/wazuh.repo
logg info 'Importing GPG-KEY-WAZUH'
sudo rpm --import https://packages.wazuh.com/key/GPG-KEY-WAZUH
logg info 'Installing Wazuh agent'
sudo zypper install -y wazuh-agent
elif command -v apk > /dev/null; then
logg info 'Importing Wazuh repository'
sudo wget -O /etc/apk/keys/alpine-devel@wazuh.com-633d7457.rsa.pub https://packages.wazuh.com/key/alpine-devel%40wazuh.com-633d7457.rsa.pub
echo "https://packages.wazuh.com/4.x/alpine/v3.12/main" | sudo tee -a /etc/apk/repositories
logg info 'Running sudo apk update'
sudo apk update
logg info 'Installing Wazuh agent'
sudo apk add wazuh-agent
fi
fi

View file

@ -0,0 +1,46 @@
#!/usr/bin/env bash
# @file macOS WireGuard Profiles
# @brief Installs WireGuard VPN profiles on macOS devices
# @description
# This script installs WireGuard VPN profiles on macOS. It scans `${XDG_CONFIG_HOME:-$HOME/.config}/vpn` for all the `*.conf` files
# and then copies those profiles to `/etc/wireguard`. It also performs a couple preparation tasks like ensuring the target
# WireGuard system configuration file directory exists and is assigned the proper permissions.
#
# ## Creating VPN Profiles
#
# More details on embedding your VPN profiles into your Install Doctor fork can be found by reading the [Secrets documentation](https://install.doctor/docs/customization/secrets#vpn-profiles).
#
# ## TODO
#
# * Populate Tunnelblick on macOS using the VPN profiles located in `${XDG_CONFIG_HOME:-$HOME/.config}/vpn`
# * For the Tunnelblick integration, ensure the username / password is populated from the `OVPN_USERNAME` and `OVPN_PASSWORD` variables
#
# ## Links
#
# * [VPN profile folder](https://github.com/megabyte-labs/install.doctor/blob/master/home/dot_config/vpn)
# * [VPN profile documentation](https://install.doctor/docs/customization/secrets#vpn-profiles)
# TODO - Populate Tunnelblick on macOS using the .ovpn profiles located in $HOME/.config/vpn (execpt in the `openvpn` entry of software.yml)
# along with the secrets for the protonVPN OpenVPN (check vpn-linux.tmpl)
### Backs up previous network settings to `/Library/Preferences/com.apple.networkextension.plist.old` before applying new VPN profiles
if [ -f /Library/Preferences/com.apple.networkextension.plist ] && [ ! -f "/Library/Preferences/com.apple.networkextension.plist.old" ]; then
logg info 'Backing up /Library/Preferences/com.apple.networkextension.plist to /Library/Preferences/com.apple.networkextension.plist.old'
sudo cp -f /Library/Preferences/com.apple.networkextension.plist /Library/Preferences/com.apple.networkextension.plist.old
else
logg info 'The /Library/Preferences/com.apple.networkextension.plist does not exist or is already backed up to com.apple.networkextension.plist.old'
fi
### Ensures the `/etc/wireguard` directory exists and has the lowest possible permission-level
if [ ! -d /etc/wireguard ]; then
logg info 'Creating /etc/wireguard since it does not exist yet'
sudo mkdir -p /etc/wireguard
sudo chmod 600 /etc/wireguard
fi
### TODO - Should adding the .conf files to /etc/wireguard only be done on macOS or is this useful on Linux as well?
### Cycles through the `*.conf` files in `${XDG_CONFIG_HOME:-$HOME/.config}/vpn` and adds them to the `/etc/wireguard` folder
find "${XDG_CONFIG_HOME:-$HOME/.config}/vpn" -mindepth 1 -maxdepth 1 -type f -name "*.conf" | while read WG_CONF; do
WG_FILE="$(basename "$WG_CONF")"
logg info 'Adding '"$WG_FILE"' to /etc/wireguard'
sudo cp -f "$WG_CONF" "/etc/wireguard/$WG_FILE"
done

View file

@ -0,0 +1,55 @@
#!/usr/bin/env bash
### Load AWS secrets
if [ -d /Applications ] && [ -d /System ] && [ ! -d /Applications/Xcode.app ]; then
### Remove old files
logg info 'Removing old ~/.xcodeinstall folder' && rm -rf ~/.xcodeinstall
### Ensure xcodeinstall installed
if ! command -v xcodeinstall > /dev/null; then
logg info 'Installing xcodeinstall'
brew install sebsto/macos/xcodeinstall
fi
### Authenticate
logg info 'Authenticating with AWS via xcodeinstall'
xcodeinstall authenticate -s "$AWS_DEFAULT_REGION"
### Download files
while read XCODE_DOWNLOAD_ITEM; do
if [[ "$XCODE_DOWNLOAD_ITEM" != *"Command Line Tools"* ]]; then
DOWNLOAD_ID="$(echo "$XCODE_DOWNLOAD_ITEM" | sed 's/^\[\(.*\)\] .*/\1/')"
logg info "Downloading $XCODE_DOWNLOAD_ITEM"
echo "$DOWNLOAD_ID" | xcodeinstall download -s "$AWS_DEFAULT_REGION"
fi
done < <(xcodeinstall list -s "$AWS_DEFAULT_REGION" | grep --invert-match 'Release Candidate' | grep --invert-match ' beta ' | grep ' Xcode \d\d ')
### Install Xcode
logg info 'Installing Xcode'
xcodeinstall install --name "$(basename "$(find ~/.xcodeinstall/download -maxdepth 1 -name "*.xip")")"
### Install Command Line Tools
# Commentted out because it is already installed by xcode-select in the provision.sh script
# xcodeinstall install --name "$(basename "$(find ~/.xcodeinstall/download -maxdepth 1 -name "*Command Line Tools*")")"
### Install Additional Tools
logg info 'Installing Additional Tools'
while read ADDITIONAL_TOOLS; do
hdiutil attach "$ADDITIONAL_TOOLS"
rm -rf "/Applications/Additional Tools"
cp -rf "/Volumes/Additional Tools" "/Applications/Additional Tools"
hdiutil detach "$(find /Volumes -name "Additional Tools")"
done < <(find ~/.xcodeinstall/download -name "Additional Tools*")
### Install Font Tools
logg info 'Installing Font Tools'
while read FONT_TOOLS; do
hdiutil attach "$FONT_TOOLS"
cd "$(find /Volumes -maxdepth 1 -name "*Font Tools*")"
sudo installer -pkg "$(find . -maxdepth 1 -name "*Font Tools*.pkg")" -target /
cd / && hdiutil detach "$(find /Volumes -maxdepth 1 -name "*Font Tools*")"
done < <(find ~/.xcodeinstall/download -name "Font Tools*")
### Remove cache / downloaded files
rm -rf ~/.xcodeinstall
fi

View file

@ -84,8 +84,8 @@ zstyle ':completion:*' verbose true
zstyle ':completion:*:kill:*' command 'ps -u $USER -o pid,%cpu,tty,cputime,cmd'
zstyle ':completion:*:git-checkout:*' sort false
zstyle ':completion:*:descriptions' format '[%d]'
if command -v fzf > /dev/null && command -v exa > /dev/null; then
zstyle ':fzf-tab:complete:cd:*' fzf-preview 'exa -1 --color=always $realpath'
if command -v fzf > /dev/null && command -v eza > /dev/null; then
zstyle ':fzf-tab:complete:cd:*' fzf-preview 'eza -1 --color=always $realpath'
zstyle ':fzf-tab:*' switch-group ',' '.'
fi
@ -267,7 +267,7 @@ esac
[ ! -f "$HOME/.local/scripts/antigen.zsh" ] || source "$HOME/.local/scripts/antigen.zsh"
if command -v antigen > /dev/null; then
# Fix for oh-my-zsh overriding exa aliases
# Fix for oh-my-zsh overriding eza aliases
export DISABLE_LS_COLORS=true
# Official Oh-My-ZSH plugins
antigen use oh-my-zsh

File diff suppressed because it is too large Load diff