101 Commits

Author SHA1 Message Date
97f50bf4e2 no solarized color scheme 2021-01-20 19:41:31 -08:00
5dd9e21593 update dockprom playbook to use machine_name var (still hard-coding vpn ip addr) 2021-01-20 19:41:16 -08:00
18b143c82f run commands as regular user, not root 2021-01-20 19:40:46 -08:00
434d1acc24 update firewall 2021-01-20 19:39:57 -08:00
2579bb8aab update group_vars 2021-01-20 19:39:41 -08:00
61c286d1e0 expand on ssh role, set system and user configuration 2021-01-20 19:39:30 -08:00
ac6334eff5 add firewall role 2021-01-20 19:38:34 -08:00
b137995503 scrub local and linode hosts 2021-01-20 19:38:14 -08:00
ce1e9af2f1 update local cfg file 2020-12-31 13:34:57 -08:00
e9f70d71f3 update localhosts 2020-12-31 13:34:32 -08:00
b551f68def update linodehosts 2020-12-31 13:34:08 -08:00
9fc34261db add dockprom playbook 2020-12-31 13:32:41 -08:00
2a8356d118 add pod-dockprom role 2020-12-31 13:32:29 -08:00
a948092be2 update grup vars for pod-dockprom role 2020-12-31 13:32:17 -08:00
937a266fe7 remove unused port numbers 2020-12-31 13:31:15 -08:00
463411de88 fix domain in postfix 2020-06-25 21:33:03 -07:00
6ca3ea3ecf move vim-go to goenv role 2020-06-25 21:32:41 -07:00
124a791c16 add dotfiles to barebones 2020-06-25 21:32:28 -07:00
70140fb353 update ansible vault command 2020-06-25 21:07:43 -07:00
d4b8948474 finally turn off the docker-compose syslog firehose once and for all 2020-06-18 20:37:12 -07:00
c6e8081406 mention bot playbook in readme 2020-06-18 20:36:53 -07:00
9938adfabc update vault 2020-06-18 20:36:40 -07:00
3cd11712b0 add bots playbook info to ansible playbooks docs page 2020-06-18 20:36:25 -07:00
3f010d9090 smol 2020-06-18 20:36:02 -07:00
3c7689c398 add a barebones playbook that only creates nonroot user 2020-06-18 20:35:45 -07:00
f5e6d4a5f6 fix typo 2020-06-09 18:58:12 -07:00
3f9b78468b fix unzip command 2020-05-30 20:23:17 -07:00
a1f4cd4948 add encrypted secrets 2020-05-30 20:10:10 -07:00
4209b797f4 fix commands and paths 2020-05-30 20:09:42 -07:00
50c50ad305 add memo with notes on what to update in docs 2020-05-30 15:18:00 -07:00
ec10c1559c split uptime into separate playbook 2020-05-30 15:14:38 -07:00
8ebf1094e6 typo fix 2020-05-30 15:14:22 -07:00
62c27441fe update uptime variable names 2020-05-30 15:14:09 -07:00
873d95ecd4 update uptime var names 2020-05-30 15:13:51 -07:00
fec7e737b7 add git tag to clone tasks in pod-charlesreid1 role 2020-05-30 15:13:37 -07:00
4d14872994 use machine_name to run base.yml on select hosts 2020-05-30 15:13:19 -07:00
f8ea8078bb move dorylus to postfix, cut out 2020-05-30 15:13:02 -07:00
e94d281cad update bot install paths 2020-05-30 15:12:23 -07:00
159bda1ae7 fix var name in bot task 2020-05-30 15:11:20 -07:00
effdb54247 add "target" param to base.yml 2020-05-30 14:56:56 -07:00
f8406b2c74 add bots playbook and notes 2020-05-30 14:28:49 -07:00
8c506f015b update apollo, ginsberg and milton bot tasks 2020-05-30 14:28:16 -07:00
a6ccad8b09 flesh out apollo bot flock task 2020-05-30 14:20:31 -07:00
738b2ff5c1 add non-encrypted contents of secrets folder to gitignore 2020-05-30 13:48:21 -07:00
ed1f5b77a4 add notes on secrets 2020-05-30 13:48:07 -07:00
341b1657a5 add bots role 2020-05-16 23:00:40 -07:00
b2a516a32d update uptime template variables 2020-05-05 14:23:33 -07:00
a06a266616 specify pyenv vars in main group_vars file 2020-05-05 04:07:55 -07:00
ae42b8aec4 update pyenv invocation 2020-05-05 04:07:37 -07:00
6a66b46bfb service scripts should not be executable 2020-05-05 04:07:17 -07:00
e86d4384fc fix path to python - use pyenv python 2020-05-05 04:07:00 -07:00
0160ee59d4 update uptime tags, add virtualenv task 2020-05-05 02:13:39 -07:00
26a6af122f updaate uptime playbooks and vars 2020-05-05 01:39:47 -07:00
17e2da3439 fixup comments 2020-05-05 01:15:06 -07:00
f303a750e2 rename and expand on postfix role to dorylus 2020-05-05 01:14:25 -07:00
0f104ab562 add uptime readme 2020-05-05 01:14:17 -07:00
c7efdfe66d add var defaults to uptime role 2020-05-05 01:13:53 -07:00
a0ed839a4a add config portion to uptime role 2020-05-05 01:13:06 -07:00
30fe175d09 add safe linodehosts 2020-05-04 18:35:57 -07:00
6526e90691 update server names that podcharlesreid1.yml playbook applies to 2020-05-04 18:34:47 -07:00
c79cc877df update vault/default vars with uptime vars 2020-05-04 18:34:28 -07:00
824b930034 fix and tag machine-name tasks 2020-05-04 18:17:40 -07:00
dd960cd5e7 apply hash function to system password 2020-05-04 18:17:22 -07:00
6e26443266 add postfix playbook 2020-05-04 18:16:59 -07:00
2fb416d3d7 add postfix role 2020-05-04 18:16:13 -07:00
fe76fa5999 remove go setup from base.yml 2020-05-03 12:04:07 -07:00
c1522a8602 swap init-nonroot and docker steps in base.yml 2020-05-03 12:03:37 -07:00
34cb10ec7a add an apt-get update to the provision step 2020-05-03 12:03:10 -07:00
03f4a0cc48 better use of variables in roles 2020-05-03 12:02:57 -07:00
3019cf244e update vars in group_vars to remove unused vars 2020-05-03 12:02:44 -07:00
b670144aa0 update init-root and init-nonroot: create temp dir 2020-05-03 10:25:03 -07:00
e1f967e946 update readme 2020-05-02 12:01:28 -07:00
f46afced7f disable nginx service first, enable pod-charlesreid1 service first 2020-05-02 10:10:12 -07:00
e0f39de77d add a systemctl enable command for pod-charlesreid1 service 2020-05-01 22:44:22 -07:00
3708983a3c split pod-charlesreid1 tasks 2020-04-28 18:36:04 -07:00
0c0d74a837 the way it ought to be 2020-04-27 20:49:30 -07:00
bbe109fdc1 udpate todo list 2020-04-27 20:49:16 -07:00
e58a08b2bf remove unused subdomain var names 2020-04-26 15:37:37 -07:00
24e2147a0a remove subdomain letsencrypt tasks from pod-charlesreid1 role 2020-04-26 15:37:13 -07:00
d5b884636e fix path to certbot dhparams 2020-04-26 15:36:31 -07:00
bda35b7a88 add kludge fix for new ssh keys to prevent locking 2020-04-26 15:36:13 -07:00
c51fc85d77 fix broken docker-compose install, by using sort 2020-04-26 15:35:40 -07:00
40eb447e8b remove pod-bots 2020-04-26 15:35:07 -07:00
542e606733 fix toc 2020-04-25 22:29:42 -07:00
e72a14556e fix link to file options-ssl-nginx.conf 2020-04-25 19:39:14 -07:00
257293f1bb fix broken link to docker-compose bash completion 2020-04-25 19:31:22 -07:00
8577cf5e14 fill in pod-charlesreid1 readme, remove unused steps 2020-04-25 16:00:28 -07:00
ef9ff803c0 add linode docs page to mkdocs.yml 2020-04-19 11:13:26 -07:00
e4217e0b99 update docs 2020-04-19 11:13:12 -07:00
b3c6f57136 fixup quickstart 2020-04-18 17:46:29 -07:00
7a786a9fa1 update before you get started list with links on index doc page 2020-04-18 17:41:08 -07:00
8d6ff2595e update list of pods on playbooks doc page 2020-04-18 17:40:44 -07:00
8b51d48b4d re-add do hosts 2020-04-18 17:26:27 -07:00
929b89639c add ansible linode doc page, update ansible digital ocean doc page 2020-04-18 17:24:24 -07:00
33abcfc235 remove unused subdomains 2020-04-18 17:15:07 -07:00
a904fe5fe2 remove unused files 2020-04-18 17:12:48 -07:00
6c57f90894 rm pod-webhooks 2020-04-18 17:12:29 -07:00
d53b8e52a3 add crusty todo notes 2019-07-25 12:58:55 -07:00
ee13c237ff update nonroot init tasks to set a system password 2019-07-24 22:58:21 -07:00
bb919f3b61 Merge branch 'new-app-ini'
* new-app-ini:
  fix gitea config file task - use new repo path
2019-07-23 16:57:52 -07:00
63b63382c5 remove run_once from template tasks 2019-07-23 16:56:46 -07:00
109 changed files with 3690 additions and 1571 deletions

3
.gitignore vendored
View File

@@ -3,3 +3,6 @@
*.log
*.retry
site/
secrets/*
!secrets/*.enc

3
.gitmodules vendored
View File

@@ -1,3 +1,6 @@
[submodule "mkdocs-material"]
path = mkdocs-material
url = https://git.charlesreid1.com/charlesreid1/mkdocs-material.git
[submodule "roles/ansible-postfix"]
path = roles/postfix
url = https://github.com/Oefenweb/ansible-postfix.git

View File

@@ -21,8 +21,6 @@ are ready to run these docker pods.
| Pod | Link |
|------------------|------------------------------------------------------|
| pod-charlesreid1 | <https://git.charlesreid1.com/docker/pod-charlesreid1> |
| pod-webhooks | <https://git.charlesreid1.com/docker/pod-webhooks> |
| pod-bots | <https://git.charlesreid1.com/docker/pod-bots> |
## Playbooks
@@ -32,8 +30,7 @@ and a provision playbook.
| Playbook | Description |
|------------------------|----------------------------------------------------------------------------------------------------------------------|
| `podcharlesreid1.yml` | Playbook to install and run the charlesreid1.com docker pod (<https://git.charlesreid1.com/docker/pod-charlesreid1>) |
| `podwebhooks.yml` | (TBA) Playbook to install and run the webhooks pod (<https://git.charlesreid1.com/docker/pod-webhooks>) |
| `podbots.yml` | (TBA) Playbook to install and run the bot pod (<https://git.charlesreid1.com/docker/pod-bots>) |
| `bots.yml` | Playbook to install and run the apollo, ginsberg, and milton bot flocks. See <https://bots.charlesreid1.com>. |
| `base.yml` | Base playbook run by all of the pod playbooks above. |
| `provision.yml` | Playbook to provision new Ubuntu machines with `/usr/bin/python`. |
@@ -41,33 +38,33 @@ and a provision playbook.
## Roles
### Base Playbook Roles
### Playbook Roles
The following roles carry out groups of tasks for setting up the base machine
to run charlesreid1.com infrastructure.
**Base roles:**
| Role Name | Description |
|-----------------------|-----------------------------------------------------------|
| init-root | Prepare root user account |
| init-nonroot | Prepare nonroot user account(s) |
| dotfiles | Install and configure dotfiles for nonroot user |
| install-stuff | Install stuff with aptitude |
| letsencrypt | Install letsencrypt |
| postfix | Install postfix mail server |
| pyenv | Install pyenv for nonroot user |
| goenv | Install goenv for nonroot user |
| sshkeys | Set up ssh keys for all users |
| vim | Set up vim for nonroot user |
| dotfiles | Install and configure dotfiles for nonroot user |
### Pod-Specific Roles
The following roles are run by playbooks specific to the
respective docker pod.
**Machine-specific roles:**
| Role Name | Description |
|-----------------------|--------------------------------------------------------------|
| pod-charlesreid1 | Role specific to the charlesreid1.com docker pod |
| pod-webhooks | Role specific to \{hooks,pages\}.charlesreid1.com docker pod |
| pod-bots | Role specific to bots docker pod |
|-----------------------|-------------------------------------------------------------------------|
| bots | Install and run the apollo, ginsberg, and milton bot flocks. See <https://bots.charlesreid1.com>. |
| pod-charlesreid1 | Install the charlesreid1.com pod <https://github.com/charlesreid1-docker/pod-charlesreid1>). |
| uptime | Install the uptime bot (<https://github.com/charlesreid1-bots/uptime>). |
## Getting Started with Playbooks

20
Secrets.md Normal file
View File

@@ -0,0 +1,20 @@
# Secrets
Two kinds of secrets:
- secret variables (in secrets vault)
- secret files (encrypted and kept in `secrets/` directory)
## Secret Variables
Use `ansible-vault edit` command.
## Secret Files
Encrypt a file using `ansible-vault encrypt` command.
Standard procedure is to add .enc extension to filename.
```
$ ansible-vault encrypt --vault-password-file .vault_secret hello.txt --output hello.txt.enc
```

27
TODO Normal file
View File

@@ -0,0 +1,27 @@
mediawiki skin:
- we made it templatable already
- actually template it
- charlesreid1-config git repo
charlesreid1 theme:
- make links templatable
letsencrypt:
- set up script to renew the stupid letsencrypt every month
more letsencrypt:
- the pod-charlesreid1 role defaults has a top_domain set to charlesreid1.com
- it says, "check for letsencrypt certs to this domain (top level domain of entire pod)"
- this does not match up with the nginx config files... which is how things are REALLY set
- top domain is used by gitea...
pod-charlesreid1 /www setup
https://git.charlesreid1.com/charlesreid1/charlesreid1.com
/www/charlesreid1.com/
charlesreid1.com-src/ <-- clone of charlesreid1.com repo, src branch
git/ <-- .git dir for charlesreid1.com repo gh-pages branch
htdocs/ <-- clone of charlesreid1.com repo gh-pages branch

21
barebones.yml Normal file
View File

@@ -0,0 +1,21 @@
---
# barebones playbook
- name: Initial setup root
hosts: "{{ machine_name }}"
roles:
- role: init-root
tags: init-root
become: yes
- name: Initial setup non-root
hosts: "{{ machine_name }}"
roles:
- role: init-nonroot
tags: init-nonroot
- name: Set up dotfiles
hosts: "{{ machine_name }}"
roles:
- role: dotfiles
tags: dotfiles

View File

@@ -1,10 +1,10 @@
---
# main playbook for
# all charlesreid1 nodes
# all nodes
- name: Initial setup root
hosts: servers
hosts: "{{ machine_name }}"
roles:
- role: init-root
tags: init-root
@@ -12,63 +12,78 @@
- name: Install packages with aptitude
hosts: servers
hosts: "{{ machine_name }}"
roles:
- role: install-stuff
tags: install-stuff
become: yes
- name: Install docker and docker-compose
hosts: servers
roles:
- role: docker
become: yes
- name: Initial setup non-root
hosts: servers
hosts: "{{ machine_name }}"
roles:
- role: init-nonroot
tags: init-nonroot
- name: Install docker and docker-compose
hosts: "{{ machine_name }}"
roles:
- role: docker
become: yes
- name: Set up SSH keys
hosts: servers
hosts: "{{ machine_name }}"
roles:
- role: sshkeys
tags: sshkeys
- name: Install firewall
hosts: "{{ machine_name }}"
vars:
- firewall_allowed_tcp_ports:
- "{{ ssh_port }}"
- "80"
- "443"
- "8080" # cadvisor
- "9100" # nodeexporter
- "3100" # loki
- "9113" # nginxexporter
roles:
- role: firewall
become: yes
- name: Set up dotfiles
hosts: servers
hosts: "{{ machine_name }}"
roles:
- role: dotfiles
tags: dotfiles
- name: Set up vim
hosts: servers
hosts: "{{ machine_name }}"
roles:
- role: vim
tags: vim
- name: Install pyenv
hosts: servers
hosts: "{{ machine_name }}"
roles:
- role: pyenv
tags: pyenv
become: yes
- name: Install goenv
hosts: servers
roles:
- role: goenv
tags: goenv
become: yes
#- name: Install goenv
# hosts: servers
# roles:
# - role: goenv
# tags: goenv
# become: yes
#- name: Install AWS credentials and tools
@@ -78,5 +93,3 @@
# tags: aws
# aws_secret_access_key: "{{ charlesreid1_aws_secret_access_key }}"
# aws_access_key_id: "{{ charlesreid1_aws_access_key_id }}"

26
bots.md Normal file
View File

@@ -0,0 +1,26 @@
strategy for dealing with bot keys:
step 1: encrypt file with ansible-vault
```
$ ansible-vault encrypt server.key
```
step 2: refer to the encrypted file in the copy module
```
---
- hosts: all
tasks:
- name: Copy server private key
copy:
src: server.key
dest: /etc/env/server.key
decrypt: yes
owner: root
group: root
mode: 400
backup: no
```

10
bots.yml Normal file
View File

@@ -0,0 +1,10 @@
---
# Playbook for installing bot flocks
- name: Install bot flock
hosts: dorky
become: yes
roles:
- role: bots

11
dockprom.yml Normal file
View File

@@ -0,0 +1,11 @@
---
# deploy dockprom pod to dev stage
- name: Install dockprom docker pod
hosts: "{{ machine_name }}"
vars:
- install_client_service: "true"
- dockprom_bind_ip: "192.168.30.40"
roles:
- role: pod-dockprom
become: yes

View File

@@ -15,7 +15,7 @@ Table of Contents
## Droplet setup
Start by logging in to your digital ocean account
Start by logging in to your Digital Ocean account
and creating a droplet. You should be able to
create or specify an SSH key.
@@ -55,6 +55,8 @@ Now you can run the base playbook.
defined by default. Define it using the
`--extra-vars` flag.
Specifying a machine name using the `--extra-vars` flag:
```plain
ANSIBLE_CONFIG="do.cfg" \
ansible-playbook \
@@ -65,7 +67,8 @@ ANSIBLE_CONFIG="do.cfg" \
## Run pod playbooks
Once you've run the base playbook, you can install the
docker pod with the corresponding playbook.
docker pod with the corresponding playbook by specifying
`ANSIBLE_CONFIG` and pointing to the Digital Ocean config file.
pod-charlesreid1:

81
docs/ansible_linode.md Normal file
View File

@@ -0,0 +1,81 @@
# Linode Quickstart
This quickstart walks through the process
of setting up a Linode node
using these Ansible playbooks.
Table of Contents
=================
* [Node setup](#node-setup)
* [Run provision and base playbooks](#run-provision-and-base-playbooks)
* [Run pod playbooks](#run-pod-playbooks)
## Node setup
Start by logging in to your Linode account
and creating a new node. You should be able to
create or specify an SSH key.
!!! warning
You must modify the path to the SSH private
key, specified in `linode.cfg` (the Linode
Ansible config file), to match the SSH key that
you added to the droplet at its creation.
!!! warning
Once you create your droplet and it is connected
to the internet via a public IP, you must update
the file `linodehosts` (the Linode Ansible
inventory file) to point to the correct IP address
for the node.
## Run provision and base playbooks
Once you have the correct SSH key in `linode.cfg`
and the correct droplet IP address in `linodehosts`,
you are ready to run the Ansible playbooks.
Run the provision playbook to prepare the droplet for Ansible:
```plain
ANSIBLE_CONFIG="linode.cfg" \
ansible-playbook \
provision.yml
```
Now you can run the base playbook.
!!! warning
You must provide a `machine_name` parameter to
the base playbook. This variable is **_not_**
defined by default. Define it using the
`--extra-vars` flag.
Specifying a machine name using the `--extra-vars` flag:
```plain
ANSIBLE_CONFIG="linode.cfg" \
ansible-playbook \
--extra-vars "machine_name=redbeard" \
base.yml
```
## Run pod playbooks
Once you've run the base playbook, you can install the
docker pod with the corresponding playbook by specifying
`ANSIBLE_CONFIG` and pointing to the Linode config file.
pod-charlesreid1:
```plain
ANSIBLE_CONFIG="linode.cfg" \
ansible-playbook \
--extra-vars "machine_name=redbeard" \
podcharlesreid1.yml
```

View File

@@ -10,8 +10,7 @@ Table of Contents
* [provision\.yml: Provision Your Remote Node](#provisionyml-provision-your-remote-node)
* [base\.yml: the base plays](#baseyml-the-base-plays)
* [podcharlesreid1\.yml: charlesreid1 docker pod play](#podcharlesreid1yml-charlesreid1-docker-pod-play)
* [charlesreid1hooks\.yml: webhooks server docker pod play](#charlesreid1hooksyml-webhooks-server-docker-pod-play)
* [charlesreid1bots\.yml: bots docker pod play](#charlesreid1botsyml-bots-docker-pod-play)
* [bots\.yml: bot play](#botsyml-charlesreid1-bots-play)
* [List of Tags](#list-of-tags)
@@ -27,11 +26,15 @@ step installs `/usr/bin/python`.
ANSIBLE_CONFIG="vagrant.cfg" vagrant provision
```
Running plays against a Digital Ocean node requires
Running plays against a Linode/Digital Ocean node requires
the provision playbook to be run explicitly with the
command:
```plain
# Linode
ANSIBLE_CONFIG="linode.cfg" ansible-playbook provision.yml
# Digital Ocean
ANSIBLE_CONFIG="do.cfg" ansible-playbook provision.yml
```
@@ -58,8 +61,17 @@ ANSIBLE_CONFIG="vagrant.cfg" \
base.yml
```
To run on Digital Ocean, use the same command
but specify the corrsponding config file:
To run on Linode:
```plain
ANSIBLE_CONFIG="linode.cfg" \
ansible-playbook \
--vault-password-file=.vault_secret \
--extra-vars "machine_name=yoyo" \
base.yml
```
To run on Digital Ocean:
```plain
ANSIBLE_CONFIG="do.cfg" \
@@ -72,17 +84,12 @@ ANSIBLE_CONFIG="do.cfg" \
## podcharlesreid1.yml: charlesreid1 docker pod play
**host: krash**
**host: redbeard**
The charlesreid1 docker pod runs the following:
- nginx
- letsencrypt/certs
- mediawiki
- gitea
- files/etc
**Example:** Deploy the charlesreid1 docker pod play
on a Vagrant machine.
@@ -107,8 +114,17 @@ ANSIBLE_CONFIG="vagrant.cfg" \
podcharlesreid1.yml
```
**Example:** Deploy the charlesreid1 docker pod play
to a Digital Ocean droplet.
**Linode Example:**
```plain
ANSIBLE_CONFIG="linode.cfg" \
ansible-playbook \
--vault-password-file=.vault_secret \
--extra-vars "machine_name=yoyo" \
podcharlesreid1.yml
```
**Digital Ocean Example:**
```plain
ANSIBLE_CONFIG="do.cfg" \
@@ -119,30 +135,32 @@ ANSIBLE_CONFIG="do.cfg" \
```
## charlesreid1hooks.yml: webhooks server docker pod play
## bots.yml: charlesreid1 bots play
**host: bluebear**
The bot playbook installs the following bot flocks:
**host: bluebeard**
- [apollo bot flock](https://github.com/charlesreid1-bots/apollo-space-junk)
- [ginsberg bot flock](https://github.com/charlesreid1-bots/ginsberg-bot-flock)
- [milton bot flock](https://github.com/charlesreid1-bots/milton-bot-flock)
The webhooks server docker pod runs the following:
It also installs the Twitter keys these bots require using the Ansible
vault, which stores an encrypted zip file with keys for each bot flock.
- captain hook webhook server
- hooks.charlesreid1.com domain
- static site hosting for pages.charlesreid1.com
- pages.charlesreid1.com domain
```plain
ANSIBLE_CONFIG="vagrant.cfg" \
ansible-playbook \
--vault-password-file=.vault_secret \
bots.yml
```
**Linode Example:**
## charlesreid1bots.yml: bots docker pod play
**host: bluebear**
The bots docker pod runs several Python
scripts to keep some Twitter bots going:
- Ginsberg bot flock
- Milton bot flock
- Apollo Space Junk bot flock
```plain
ANSIBLE_CONFIG="linode.cfg" \
ansible-playbook \
--vault-password-file=.vault_secret \
bots.yml
```
## List of Tags

View File

@@ -15,6 +15,7 @@ Table of Contents
* [How to edit the vault file?](#how-to-edit-the-vault-file)
* [How to use the vault file?](#how-to-use-the-vault-file)
* [Adding new secret variables](#adding-new-secret-variables)
* [Adding new encrypted files](#adding-new-encrypted-files)
## What is Ansible Vault?
@@ -166,3 +167,14 @@ This is where you put the real API key:
vault_api_key: "ABCXYZ123456"
```
## Adding new encrypted files
Encrypt a file using `ansible-vault encrypt` command.
Standard procedure is to add .enc extension to filename.
```
$ ansible-vault encrypt --vault-password-file .vault_secret hello.txt --output hello.txt.enc
```

View File

@@ -20,11 +20,24 @@ Table of Contents
Before you get started:
* Provision a compute node (vagrant or cloud provider)
* Provision a compute node (Vagrant or cloud provider)
* If using Vagrant, see the [Ansible Vagrant](ansible_vagrant.md) page for
instructions on how to provision virtual machines.
* If using a cloud provider, follow the instructions provided by your
cloud provider.
* Configure and enable SSH access
* Run Ansible with the `base.yml` playbook
* Run Ansible with the pod playbook of your choice
* Configure DNS to point to compute node IP address
* If using Vagrant, see the [Ansible Vagrant](ansible_vagrant.md) page for
instructions on how to get SSH key information from Vagrant virtual machines.
* If using a cloud provider, you should be provided with an SSH key or
SSH access instructions by your cloud provider.
* Run Ansible with the `base.yml` playbook - see [Ansible Playbooks](ansible_playbooks.md#baseyml-the-base-plays)
and `base.yml` for information and details about this playbook.
* Run Ansible with the pod-charlesreid1 playbook `pod-charlesreid1.yml`
* Configure DNS to point to the IP address of the compute node
## Docker Pods
@@ -36,6 +49,11 @@ are ready to run these docker pods.
| Pod | Link |
|------------------|--------------------------------------------------------|
| pod-charlesreid1 | <https://git.charlesreid1.com/docker/pod-charlesreid1> |
The following pods **HAVE BEEN DEACTIVATED:**
| Pod | Link |
|------------------|--------------------------------------------------------|
| pod-webhooks | <https://git.charlesreid1.com/docker/pod-webhooks> |
| pod-bots | <https://git.charlesreid1.com/docker/pod-bots> |
@@ -45,14 +63,11 @@ are ready to run these docker pods.
There is one playbook per docker pod, plus a base playbook
and a provision playbook.
| Playbook | Description |
|------------------------|----------------------------------------------------------------------------------------------------------------------|
| `provision.yml` | (Vagrant-only) Playbook to provision new Ubuntu machines with `/usr/bin/python`. |
| `base.yml` | Base playbook run by all of the pod playbooks above. |
| `podcharlesreid1.yml` | Playbook to install and run the charlesreid1.com docker pod (<https://git.charlesreid1.com/docker/pod-charlesreid1>) |
| `podwebhooks.yml` | (TBA) Playbook to install and run the webhooks pod (<https://git.charlesreid1.com/docker/pod-webhooks>) |
| `podbots.yml` | (TBA) Playbook to install and run the bot pod (<https://git.charlesreid1.com/docker/pod-bots>) |
| Playbook | Description | Link |
|------------------------|----------------------------------------------------------------------------------------------------------------------|----------------|
| `provision.yml` | (Vagrant-only) Playbook to provision new Ubuntu machines with `/usr/bin/python`. | [link](ansible_playbooks.md#provisionyml-provision-your-remote-node) |
| `base.yml` | Base playbook run by all of the pod playbooks above. | [link](ansible_playbooks.md#baseyml-the-base-plays) |
| `podcharlesreid1.yml` | Playbook to install and run the charlesreid1.com docker pod | [link](https://git.charlesreid1.com/docker/pod-charlesreid1) |
## Roles
@@ -82,8 +97,6 @@ respective docker pod.
| Role Name | Description |
|-----------------------|--------------------------------------------------------------|
| pod-charlesreid1 | Role specific to the charlesreid1.com docker pod |
| pod-webhooks | Role specific to \{hooks,pages\}.charlesreid1.com docker pod |
| pod-bots | Role specific to bots docker pod |
## Getting Started with Playbooks
@@ -92,6 +105,7 @@ respective docker pod.
|-----------------------------------------------|-----------------------------------------------------------------|
| [docs/index.md](index.md) | Documentation index |
| [docs/quickstart.md](quickstart.md) | Quick start for the impatient (uses Vagrant) |
| [docs/ansible_linode.md](ansible_linode.md) | Guide for running charlesreid1.com playbooks on Linode |
| [docs/ansible_do.md](ansible_do.md) | Guide for running charlesreid1.com playbooks on Digital Ocean |
| [docs/ansible_vagrant.md](ansible_vagrant.md) | Guide for running charlesreid1.com playbooks on Vagrant |
@@ -182,6 +196,12 @@ on how to set up a Vagrant virtual machine to run the
Ansible playbook against, for testing purposes.
## Linode Deployment
See [Ansible Linode](ansible_linode.md) for instructions on how to set up a Linode node
to run the Ansible playbook against.
## Digital Ocean Deployment
See [Ansible Digital Ocean](ansible_do.md) for instructions on how to set up an Digital Ocean

View File

@@ -13,7 +13,6 @@ Table of Contents
* [Provision Vagrant Machines](#provision-vagrant-machines)
* [Configure Ansible-Vagrant SSH Info](#configure-ansible-vagrant-ssh-info)
* [Cloud Node Setup](#cloud-node-setup)
* [Installing SSH Keys](#installing-ssh-keys)
* [Run Ansible](#run-ansible)
* [Set Up Vault Secret](#set-up-vault-secret)
* [Run the Base Playbook](#run-the-base-playbook)

25
firewall.yml Normal file
View File

@@ -0,0 +1,25 @@
---
# Playbook for firewall role
- name: Set up SSH keys
hosts: "{{ machine_name }}"
roles:
- role: sshkeys
tags: sshkeys
- name: Install firewall
hosts: "{{ machine_name }}"
vars:
- firewall_allowed_tcp_ports:
- "{{ ssh_port }}"
- "80"
- "443"
- "8080" # cadvisor
- "9100" # nodeexporter
- "3100" # loki
- "9113" # nginxexporter
roles:
- role: firewall
become: yes

View File

@@ -22,43 +22,66 @@ ssh_key_email: "charlesreid1@gmail.com"
charlesreid1_admin_email: "charles@charlesreid1.com"
# nginx variables
charlesreid1_port_default: "80"
charlesreid1_port_gitea: "80"
charlesreid1_port_files: "80"
charlesreid1_port_pages: "80"
charlesreid1_port_hooks: "80"
charlesreid1_port_bots: "80"
charlesreid1_port_ssl_default: "443"
charlesreid1_port_ssl_gitea: "443"
charlesreid1_port_ssl_files: "443"
charlesreid1_port_ssl_pages: "443"
charlesreid1_port_ssl_hooks: "443"
charlesreid1_port_ssl_bots: "443"
# pyenv variables
pyenv_root: "/home/{{ username }}/.pyenv"
pyenv_versions:
- 3.7.9
# - miniconda3-4.3.30
# - 3.7.5
# - 3.8.0
pyenv_global_version: 3.7.9
pyenv_python: "{{ pyenv_root }}/versions/{{ pyenv_global_version }}/bin/python"
pyenv_pip: "{{ pyenv_root }}/versions/{{ pyenv_global_version }}/bin/pip"
# ports
ssh_port: 5778
########################
# vault variables
####################
# dockprom
charlesreid1_dockprom_admin_user: "{{ vault_dockprom_admin_user }}"
charlesreid1_dockprom_admin_pass: "{{ vault_dockprom_admin_pass }}"
####################
# base vault vars
# unix system password
charlesreid1_system_password: "{{ vault_system_password }}"
base_system_password: "{{ vault_system_password }}"
####################
# uptime vault vars
# uptime details
charlesreid1_uptime_user: "{{ username }}"
charlesreid1_uptime_gmail_email: "{{ vault_uptime_gmail_email }}"
charlesreid1_uptime_gmail_password: "{{ vault_uptime_gmail_password }}"
charlesreid1_uptime_recipient_name: "{{ vault_uptime_recipient_name }}"
charlesreid1_uptime_recipient_email: "{{ vault_uptime_recipient_email }}"
charlesreid1_uptime_slack_apikey: "{{ vault_uptime_slack_apikey }}"
charlesreid1_uptime_slack_channel: "{{ vault_uptime_slack_channel }}"
####################
# charlesreid1 vault vars
# AWS credentials:
charlesreid1_aws_secret_access_key: "{{ vault_aws_secret_access_key }}"
charlesreid1_aws_access_key_id: "{{ vault_aws_access_key_id }}"
# set the IP address of our two servers
#
# pod-charlesreid
charlesreid1_nginx_charlesreid1_ip: "{{ vault_nginx_charlesreid1_ip }}"
#
# pod-webhooks
charlesreid1_nginx_subdomains_ip: "{{ vault_nginx_subdomains_ip }}"
# Secrets and stuff
# pod-charlesreid1 requires a mysql password
@@ -70,8 +93,3 @@ charlesreid1_gitea_internal_token: "{{ vault_gitea_internal_token }}"
# mediawiki secret key for web sessions
charlesreid1_mediawiki_secretkey: "{{ vault_mediawiki_secretkey }}"
# pod-webhooks requires a secret to be sent
# with the webhook.
charlesreid1_captain_hook_secret: "{{ vault_captain_hook_secret }}"

View File

@@ -1,46 +1,59 @@
$ANSIBLE_VAULT;1.1;AES256
66323230386436663663343339376232383737666330323161643535323434363962366336663061
6264653564626462323934633730326535643935393233370a323031333835616137666634343934
32613038323132376566353837373337393931633239373332653063333838333633373831653938
6139316565646531650a663864383930383038373763363236653634373832613931643139313432
38356133356635356637646132313631623834326331333362623932363363313030633533633361
33666162613931636238646136366466633933336233326236636139613266613735633832383463
64366435343138366136663433363332663633643963626633646361343566623461393437396632
33353366336330383164646438656237363532366563333066323838633539663331613866346161
39346166393064333230623235323432333235396637356432656132343833323336623633616235
38393430613039333232353339356665373132366235333438383562393066313537646338323236
34366431636435366438333236623330613565343033663839316562616330303365383233396334
66333831643162653631643062616535353965393837336264613938383636633664653935383064
66396536663065303438663932616435313061643930326533316530383234393230666332616233
66333366316634313861396330303561616136316562613530303364643639353366363937356537
38376266376237646662656232333532303532373832633630663530326361623131626631363035
36313264626334373136323461623339363730653636393939373562356130656564613964373334
31326131323465663861333063613132363838396630616236386166336164353262326335356138
64636264633232623864383431616364303465316662353337333632346238633136633933643234
37383731333133363139643835376563653462346138646663636537346331363864306536623139
63626239643561383363313764363435376666666533323664386565376435383361306634633239
34653637303338646634643965363235633635633736386435373334326461663464616637646236
62303139623738646432356337626464346136396536356538643736303137626438633362356332
33306564333131636530666534316136386534623962633438663639663235316164346136383262
30363234396564663438373463653164656434656661356530616639663533386231396462656234
32663133366130376135353664653036376161633063353237323033376662643265396235623339
36633434613732346437303837393164626538316336616437303566353936623863313561666238
30663263386663376263313139653737666135306237353737323862636565666566343034643133
36653066316633343837623432393561336663646432363965393463613330313935663934623030
66653066396664393765363536376336353962373466373163366331353835316561313533333536
63346331633337343766393335313836366336656533653233636162636534363261356366336432
66323035386430653230646539623765333134383136306634646136333235636635336337356465
61626265613565346437656233306438333939323132396437643034636531303763373932393935
63633731643436376630373662613461383333623162323534626131336438313961333431656636
63383733633166646230663762373466303732616437636265646266626238636463313732316661
39653734626463313065313064383631356565613463323465393836613962653438336666313066
65316333623061396436366136643365653563376337653463613565336231623534633831656639
65616462363738323935393334653162613562633239326634643533303565643764646236353936
37353930383566373364376132383861393835613738386562393365316439373665393535356435
39396564646138323865333063336563636163356637363065316433393733613262643232663564
64366539653262386334363766346162373037623033626439316464616636376461643034633464
62616130383630613864353432313432326464353866623132383963323438396266656566303331
64613732393662386633656263333861663738313530303361313464316337346261336563363334
65663336333735316134636262643037373134373461633466613233343135336130626430396334
33366330326662313163636362633532333437616537363335393133363065626235373830356131
64303639656534326130333234643338346436356533636465356437653366663764
62626164613766613162653233616565393064366366303463653761626435663236366237663931
3938316262353261666435313766306438656330653561330a303036313536363263633635636435
33363266643233363838616239333061316432346362383063326630623532363862666331373137
6161643632353730350a656335663536366264633634323263386461646161386233646639393862
32663162613130343463646363653663363237303436623138633366316163323164623366616538
31306336383434656536383339383535646461326539653934363436333363633963313239383938
30666333373537653338316633643436313732346261656330643162343230636163343136353464
33646237663338636134613832623338316463366338623662363665633561316565306664663533
62336636626136613465346533316237626335656632373535383137353264306337633637653762
34366561386462306464373263363537303465306533303935383130393161343030323337343932
33323839326665643734643064353838643436626363643733363232386665323761303165383236
33393533333361383566616335343336303730656432306632326134653239306334306438646437
65323339303038656239333230323037343466393134353731643033643065333431623333663264
61306132636637353734373064343965386233663031313836306639313533303130306663316666
30626564373066333561633363383733313063346564336338653737346130313432653231353732
64636661346434616536636638623265396330343639613139623965373131336363376333626162
65386562613362613266336565303065663132336263636535623639383035343131336532393466
66666563623863653566336464363738366566343462366263653434303364623237633763333864
34313362643665613834303533653533326531396132613539363434363463303263643433363866
35353331633436346238616231656166343030613935343332363132363135353063386563366438
61653739373534313164373262326233613032353835616334396332643262616665326130386462
37363734633964363937633336326361313561373066643766356462333562373565643138333065
66613165393539663239396561393235653236646537656637356430323731643761613061393665
32636262343861386264326666613230373966316561653637336465653831343531363439323433
65343430303361663437666230383236656538326466636366373366326637633063383538643461
64643431656535623961313164623764376130633839306632376237633734343635393164356363
39636261666639626261313962386434626533313538393463623365643065633432386630386434
34313164313366353862653838356431323764633133303962346663303836333361613333666463
34633032393861386332383236366432396337353539616132336537326663303263613464346235
37373163383164306233653265356136393364316637626361353432333436306634643462333530
63623330666237636138633131646232663531326462303837393236656662666233316532373162
62353366326238313131366234646532626565666563393139376536643936313736626166313466
61383461383538386566356333396464373636626266373239623266356263323532646366343966
39666566623964303834326330303437626431356261396663373031306164636131383338313661
38373034663266663763656436666137336235646635326664326633616662383039386139616266
61306630373838333234613566386431633534653961633234653364326437356233343965666465
65326266656665633331356665363435343438613134343339393762373762643530376363343930
35333735386331343530343239393864323838633364363338373734323434393736333837373363
38383464303434316436343764373934643162616237333930383239353862366532316263303461
66333031323563626461363134656636393734323531343163373736353965323865613963646332
32653363336366643261323063323662326239346135316664393366623532333865343461666532
34343761636135363035313338353934653533366165633361653738333836336630383538336264
61633538623663313136363636393332616335626137326332613131363934373235306662356163
65643334376634626665316136393236313437376233333963316134613861623035666132386136
63363062653235663136383665356661306538373566313136336564356563326138656635353466
31646333373334623931353037663863636366386530383435623139336630353261633339323961
65646332623336616536343063643666646634326462366131613930653538613433373230326633
38353733656561353938306235303231623438396366356235666131323366633061313361656533
38646331336636303138623962646464363062313462366664653466326335393437333336366133
37383462303635316661343935353762666633366334343430326562663434313239373235356235
32303962653437366363363739646263663264376665353362383033383466336435303736313731
35646361306535373532393038383030336634353737343534663461393830346464386138623139
61623664626164386630623633363237643161656434343465633530653836373439376339313831
39343739336461333535663264626230393737306137653864323734626639313133626132626436
66616465386333626332663064396137666561663162383337333634303037366234633632623538
36356464323333613861383432356263636438316133333531393331323262316438343633643333
39363130376562373163663633363363306133643161313063303165643934633266613330616130
6633313739623562656533376639346132333338373030303561

View File

@@ -1,6 +1,6 @@
[defaults]
inventory = linodehosts
remote_user = root
remote_user = charles
private_key_file = ~/.ssh/id_rsa
host_key_checking = False
vault_password_file = .vault_secret

View File

@@ -1,5 +1,13 @@
[servers:children]
linodeservers
bear
dorky
dracaena
[linodeservers]
linode ansible_host=50.116.7.163 ansible_port=22 ansible_python_interpreter=/usr/bin/python3
[bear]
linode_bear ansible_host=300.300.300.300 ansible_port=22 ansible_python_interpreter=/usr/bin/python3
[dorky]
linode_dorky ansible_host=400.400.400.400 ansible_port=22 ansible_python_interpreter=/usr/bin/python3
[dracaena]
linode_dracaena ansible_host=500.500.500.500 ansible_port=22 ansible_python_interpreter=/usr/bin/python3

8
local.cfg Normal file
View File

@@ -0,0 +1,8 @@
[defaults]
inventory = localhosts
remote_user = charles
private_key_file = ~/.ssh/id_rsa
host_key_checking = False
vault_password_file = .vault_secret
log_path = ansible_linode.log
command_warnings = raise

5
localhosts Normal file
View File

@@ -0,0 +1,5 @@
[servers:children]
bespin
[bespin]
localhost_bespin ansible_host=192.168.0.0 ansible_port=22 ansible_python_interpreter=/usr/bin/python3

7
memo Normal file
View File

@@ -0,0 +1,7 @@
changes:
- base uses machine name
- postfix is separate playbook
- uptime is separate playbook
- bots playbook
- pod charlesreid1 playbook

View File

@@ -25,6 +25,7 @@ nav:
- 'Index': 'index.md'
- 'Quickstart': 'quickstart.md'
- 'Ansible on Vagrant': 'ansible_vagrant.md'
- 'Ansible on Linode': 'ansible_linode.md'
- 'Ansible on DigitalOcean': 'ansible_do.md'
- 'Ansible Playbooks': 'ansible_playbooks.md'
- 'Ansible Vault': 'ansible_vault.md'

View File

@@ -4,27 +4,24 @@
- name: Install SSL certificates for charlesreid1 docker pod
hosts: servers
hosts: bear
become: yes
roles:
- role: letsencrypt
tags: letsencrypt
site_email: "charles@charlesreid1.com"
domains:
- "charlesreid1.red"
- "www.charlesreid1.red"
- "git.charlesreid1.red"
- "pages.charlesreid1.red"
- "bots.charlesreid1.red"
- "hooks.charlesreid1.red"
- "charlesreid1.com"
- "www.charlesreid1.com"
- "git.charlesreid1.com"
- name: Install charlesreid1 docker pod
hosts: servers
hosts: bear
become: yes
roles:
- role: pod-charlesreid1
tags: pod-charlesreid1
charlesreid1_server_name_default: "charlesreid1.red"
charlesreid1_server_name_default: "charlesreid1.com"

View File

@@ -1,14 +0,0 @@
---
# main playbook for webhooks docker pod
# SSL certs are all handled by the pod-charlesreid1 compute node
- name: Install webhooks docker pod (pages.* and hooks.* and bots.* subdomains)
hosts: servers
become: yes
roles:
- role: pod-webhooks
tags: pod-webhooks
charlesreid1_server_name_default: "charlesreid1.red"

15
postfix.yml Normal file
View File

@@ -0,0 +1,15 @@
---
# Playbook for postfix server
- name: Install postfix
hosts: dorky
roles:
- postfix
vars:
postfix_raw_options:
- |
append_dot_mydomain = yes
myorigin = /etc/mailname
postfix_hostname: "charlesreid1.party"
postfix_mailname: "charlesreid1.party"

View File

@@ -10,6 +10,8 @@
gather_facts: no
remote_user: root
pre_tasks:
- name: "Update aptitude"
raw: sudo apt-get -y update
- name: "Install python2"
raw: sudo apt-get -y install python
- name: Add the non-root user

View File

@@ -0,0 +1,12 @@
---
# variables for installing bots
apollo_user: "{{ nonroot_user }}"
apollo_path: "/home/{{ apollo_user }}/apollo"
ginsberg_user: "{{ nonroot_user }}"
ginsberg_path: "/home/{{ apollo_user }}/ginsberg"
milton_user: "{{ nonroot_user }}"
milton_path: "/home/{{ milton_user }}/milton"

183
roles/bots/tasks/apollo.yml Normal file
View File

@@ -0,0 +1,183 @@
---
# apollo bot flock
#
# Process:
# - clone repo
# - install api keys
# - install bot keys
# - install startup service
# - enable startup service
# - start startup service
# #####################################
# CLONE APOLLO SPACE JUNK
- name: Check if apollo repo is already cloned
stat:
path: "{{ apollo_path }}"
register: apollo_clone_check
tags:
- bots
- apollo
- git
- name: Clone apollo repo
become: yes
become_user: "{{ username }}"
git:
repo: "https://github.com/charlesreid1-bots/apollo-space-junk.git"
dest: "{{ apollo_path }}"
recursive: yes
when:
- "not apollo_clone_check.stat.exists"
tags:
- bots
- apollo
- git
- name: Pull apollo
become: yes
become_user: "{{ username }}"
command: "git pull"
args:
chdir: "{{ apollo_path }}"
when:
- "apollo_clone_check.stat.exists"
tags:
- bots
- apollo
- git
# #####################################
# SET UP BOT KEYS
- name: "Install API keys"
become: yes
become_user: "{{ username }}"
copy:
src: "../../../secrets/apikeys.json.enc"
dest: "{{ apollo_path }}/bot/apikeys.json"
decrypt: yes
tags:
- bots
- apollo
- keys
- name: "Install zipped Apollo Twitter keys"
become: yes
become_user: "{{ username }}"
copy:
src: "../../../secrets/apollo_keys.zip.enc"
dest: "{{ apollo_path }}/bot/apollo_keys.zip"
decrypt: yes
tags:
- bots
- apollo
- keys
- name: "Unzip Apollo Twitter keys"
become: yes
become_user: "{{ username }}"
command: "unzip -o {{ apollo_path }}/bot/apollo_keys.zip -d {{ apollo_path }}/bot"
tags:
- bots
- apollo
- keys
# #####################################
# SET UP VIRTUALENV
#
# Procedure:
# - install virtualenv package
# - create the virtual env dir
# - source the activate script
# - pip install requirements.txt
- name: Pip install virtualenv
become: yes
become_user: "{{ username }}"
command: "/home/{{ username }}/.pyenv/shims/pip install virtualenv"
tags:
- bots
- apollo
- bot-virtualenv
- name: Create apollo virtual environment
become: yes
become_user: "{{ username }}"
command: "{{ pyenv_python }} -m virtualenv -p python3.6 {{ apollo_path }}/vp"
tags:
- bots
- apollo
- bot-virtualenv
- name: Pip install apollo requirements.txt into the uptime virtual environment
become: yes
become_user: "{{ username }}"
command: "{{ apollo_path }}/vp/bin/pip install -r {{ apollo_path }}/requirements.txt"
tags:
- bots
- apollo
- bot-virtualenv
# #####################################
# INSTALL STARTUP SERVICES
#
# Check if bot startup service is installed.
# If not, install it.
- name: Check if apollo startup service is installed
stat:
path: /etc/systemd/system/apollo.service
register: apollo_service_check
tags:
- bots
- apollo
- bot-services
- name: Fetch the apollo startup service jinja template
fetch:
src: "{{ apollo_path }}/service/apollo.service.j2"
dest: "/tmp/apollo.service.j2"
flat: yes
fail_on_missing: yes
tags:
- bots
- apollo
- bot-services
- name: Install the apollo startup service file
become: yes
template:
src: "/tmp/apollo.service.j2"
dest: "/etc/systemd/system/apollo.service"
mode: 0774
when:
- "not apollo_service_check.stat.exists"
tags:
- bots
- apollo
- bot-services
- name: Enable apollo startup service
become: yes
command: "systemctl enable apollo"
when:
- "apollo_service_check.stat.exists"
tags:
- bots
- apollo
- bot-services

View File

@@ -0,0 +1,177 @@
---
# ginsberg bot flock
#
# Process:
# - clone repo
# - install api keys
# - install bot keys
# - install startup service
# - enable startup service
# - start startup service
# #####################################
# CLONE GINSBERG BOT FLOCK
- name: Check if ginsberg repo is already cloned
stat:
path: "{{ ginsberg_path }}"
register: ginsberg_clone_check
tags:
- bots
- ginsberg
- git
- name: Clone ginsberg repo
become: yes
become_user: "{{ username }}"
git:
repo: "https://github.com/charlesreid1-bots/ginsberg-bot-flock.git"
dest: "{{ ginsberg_path }}"
recursive: yes
when:
- "not ginsberg_clone_check.stat.exists"
tags:
- bots
- ginsberg
- git
- name: Pull ginsberg
become: yes
become_user: "{{ username }}"
command: "git pull"
args:
chdir: "{{ ginsberg_path }}"
when:
- "ginsberg_clone_check.stat.exists"
tags:
- bots
- ginsberg
- git
# #####################################
# SET UP BOT KEYS
- name: "Install API keys"
copy:
src: "../../../secrets/apikeys.json.enc"
dest: "{{ ginsberg_path }}/bot/apikeys.json"
decrypt: yes
tags:
- bots
- ginsberg
- keys
- name: "Install zipped ginsberg Twitter keys"
copy:
src: "../../../secrets/ginsberg_keys.zip.enc"
dest: "{{ ginsberg_path }}/bot/ginsberg_keys.zip"
decrypt: yes
tags:
- bots
- ginsberg
- keys
- name: "Unzip ginsberg Twitter keys"
command: "unzip -o {{ ginsberg_path }}/bot/ginsberg_keys.zip -d {{ ginsberg_path }}/bot"
tags:
- bots
- ginsberg
- keys
# #####################################
# SET UP VIRTUALENV
#
# Procedure:
# - install virtualenv package
# - create the virtual env dir
# - source the activate script
# - pip install requirements.txt
- name: Pip install virtualenv
become: yes
become_user: "{{ username }}"
command: "/home/{{ username }}/.pyenv/shims/pip install virtualenv"
tags:
- bots
- ginsberg
- bot-virtualenv
- name: Create ginsberg virtual environment
become: yes
become_user: "{{ username }}"
command: "{{ pyenv_python }} -m virtualenv -p python3.6 {{ ginsberg_path }}/vp"
tags:
- bots
- ginsberg
- bot-virtualenv
- name: Pip install ginsberg requirements.txt into the uptime virtual environment
become: yes
become_user: "{{ username }}"
command: "{{ ginsberg_path }}/vp/bin/pip install -r {{ ginsberg_path }}/requirements.txt"
tags:
- bots
- ginsberg
- bot-virtualenv
# #####################################
# INSTALL STARTUP SERVICES
#
# Check if bot startup service is installed.
# If not, install it.
- name: Check if ginsberg startup service is installed
stat:
path: /etc/systemd/system/ginsberg.service
register: ginsberg_service_check
tags:
- bots
- ginsberg
- bot-services
- name: Fetch the ginsberg startup service jinja template
fetch:
src: "{{ ginsberg_path }}/service/ginsberg.service.j2"
dest: "/tmp/ginsberg.service.j2"
flat: yes
fail_on_missing: yes
tags:
- bots
- ginsberg
- bot-services
- name: Install the ginsberg startup service file
become: yes
template:
src: "/tmp/ginsberg.service.j2"
dest: "/etc/systemd/system/ginsberg.service"
mode: 0774
when:
- "not ginsberg_service_check.stat.exists"
tags:
- bots
- ginsberg
- bot-services
- name: Enable ginsberg startup service
become: yes
command: "systemctl enable ginsberg"
when:
- "ginsberg_service_check.stat.exists"
tags:
- bots
- ginsberg
- bot-services

View File

@@ -0,0 +1,7 @@
---
# tasks file for installing bots
- include: apollo.yml
- include: ginsberg.yml
- include: milton.yml

178
roles/bots/tasks/milton.yml Normal file
View File

@@ -0,0 +1,178 @@
---
# milton bot flock
#
# Process:
# - clone repo
# - install api keys
# - install bot keys
# - install startup service
# - enable startup service
# - start startup service
# #####################################
# CLONE MILTON BOT FLOCK
- name: Check if milton repo is already cloned
stat:
path: "{{ milton_path }}"
register: milton_clone_check
tags:
- bots
- milton
- git
- name: Clone milton repo
become: yes
become_user: "{{ username }}"
git:
repo: "https://github.com/charlesreid1-bots/milton-bot-flock.git"
dest: "{{ milton_path }}"
recursive: yes
when:
- "not milton_clone_check.stat.exists"
tags:
- bots
- milton
- git
- name: Pull milton
become: yes
become_user: "{{ username }}"
command: "git pull"
args:
chdir: "{{ milton_path }}"
when:
- "milton_clone_check.stat.exists"
tags:
- bots
- milton
- git
# #####################################
# SET UP BOT KEYS
- name: "Install API keys"
copy:
src: "../../../secrets/apikeys.json.enc"
dest: "{{ milton_path }}/bot/apikeys.json"
decrypt: yes
tags:
- bots
- milton
- keys
- name: "Install zipped milton Twitter keys"
copy:
src: "../../../secrets/milton_keys.zip.enc"
dest: "{{ milton_path }}/bot/milton_keys.zip"
decrypt: yes
tags:
- bots
- milton
- keys
- name: "Unzip milton Twitter keys"
command: "unzip -o {{ milton_path }}/bot/milton_keys.zip -d {{ milton_path }}/bot"
tags:
- bots
- milton
- keys
# #####################################
# SET UP VIRTUALENV
#
# Procedure:
# - install virtualenv package
# - create the virtual env dir
# - source the activate script
# - pip install requirements.txt
- name: Pip install virtualenv
become: yes
become_user: "{{ username }}"
command: "/home/{{ username }}/.pyenv/shims/pip install virtualenv"
tags:
- bots
- milton
- bot-virtualenv
- name: Create milton virtual environment
become: yes
become_user: "{{ username }}"
command: "{{ pyenv_python }} -m virtualenv -p python3.6 {{ milton_path }}/vp"
tags:
- bots
- milton
- bot-virtualenv
- name: Pip install milton requirements.txt into the uptime virtual environment
become: yes
become_user: "{{ username }}"
command: "{{ milton_path }}/vp/bin/pip install -r {{ milton_path }}/requirements.txt"
tags:
- bots
- milton
- bot-virtualenv
# #####################################
# INSTALL STARTUP SERVICES
#
# Check if bot startup service is installed.
# If not, install it.
- name: Check if milton startup service is installed
stat:
path: /etc/systemd/system/milton.service
register: milton_service_check
tags:
- bots
- milton
- bot-services
- name: Fetch the milton startup service jinja template
fetch:
src: "{{ milton_path }}/service/milton.service.j2"
dest: "/tmp/milton.service.j2"
flat: yes
fail_on_missing: yes
tags:
- bots
- milton
- bot-services
- name: Install the milton startup service file
become: yes
template:
src: "/tmp/milton.service.j2"
dest: "/etc/systemd/system/milton.service"
mode: 0774
when:
- "not milton_service_check.stat.exists"
tags:
- bots
- milton
- bot-services
- name: Enable milton startup service
become: yes
command: "systemctl enable milton"
when:
- "milton_service_check.stat.exists"
tags:
- bots
- milton
- bot-services
#

View File

@@ -17,10 +17,9 @@ sudo true
wget -qO- https://get.docker.com/ | sh
# Install docker-compose
COMPOSE_VERSION=`git ls-remote https://github.com/docker/compose | grep refs/tags | grep -oP "[0-9]+\.[0-9][0-9]+\.[0-9]+$" | tail -n 1`
COMPOSE_VERSION=`git ls-remote https://github.com/docker/compose | grep refs/tags | grep -oP "[0-9]+\.[0-9][0-9]+\.[0-9]+$" | sort | tail -n 1`
sudo sh -c "curl -L https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose"
sudo chmod +x /usr/local/bin/docker-compose
sudo sh -c "curl -L https://raw.githubusercontent.com/docker/compose/${COMPOSE_VERSION}/contrib/completion/bash/docker-compose > /etc/bash_completion.d/docker-compose"
# Install docker-cleanup command
cd /tmp

3
roles/firewall/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
*.retry
*/__pycache__
*.pyc

20
roles/firewall/LICENSE Normal file
View File

@@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2017 Jeff Geerling
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

4
roles/firewall/README.md Normal file
View File

@@ -0,0 +1,4 @@
# Firewall Ansible Role
Source: <https://github.com/geerlingguy/ansible-role-firewall>

View File

@@ -0,0 +1,22 @@
---
firewall_state: started
firewall_enabled_at_boot: true
firewall_flush_rules_and_chains: true
firewall_allowed_tcp_ports:
- "22"
- "80"
- "443"
firewall_allowed_udp_ports: []
firewall_forwarded_tcp_ports: []
firewall_forwarded_udp_ports: []
firewall_additional_rules: []
firewall_enable_ipv6: true
firewall_ip6_additional_rules: []
firewall_log_dropped_packets: true
# Set to true to ensure other firewall management software is disabled.
firewall_disable_firewalld: true
firewall_disable_ufw: true

View File

@@ -0,0 +1,3 @@
---
- name: restart firewall
service: name=firewall state=restarted

View File

@@ -0,0 +1,52 @@
---
- name: Ensure iptables is present.
package: name=iptables state=present
- name: Flush iptables the first time playbook runs.
command: >
iptables -F
creates=/etc/firewall.bash
- name: Copy firewall script into place.
template:
src: firewall.bash.j2
dest: /etc/firewall.bash
owner: root
group: root
mode: 0744
notify: restart firewall
- name: Copy firewall init script into place.
template:
src: firewall.init.j2
dest: /etc/init.d/firewall
owner: root
group: root
mode: 0755
when: "ansible_service_mgr != 'systemd'"
- name: Copy firewall systemd unit file into place (for systemd systems).
template:
src: firewall.unit.j2
dest: /etc/systemd/system/firewall.service
owner: root
group: root
mode: 0644
when: "ansible_service_mgr == 'systemd'"
- name: Configure the firewall service.
service:
name: firewall
state: "restarted"
#state: "{{ firewall_state }}"
enabled: "{{ firewall_enabled_at_boot }}"
- name: Stop the docker service.
service:
name: docker
state: stopped
- name: Start the docker service.
service:
name: docker
state: started

View File

@@ -0,0 +1,138 @@
#!/bin/bash
# iptables firewall.
#
# This file should be located at /etc/firewall.bash, and is meant to work with
# the `geerlingguy.firewall` Ansible role.
#
# Common port reference:
# 22: SSH
# 25: SMTP
# 80: HTTP
# 123: NTP
# 443: HTTPS
# 2222: SSH alternate
# 8080: HTTP alternate
#
# @author Jeff Geerling
# No spoofing.
if [ -e /proc/sys/net/ipv4/conf/all/rp_filter ]
then
for filter in /proc/sys/net/ipv4/conf/*/rp_filter
do
echo 1 > $filter
done
fi
# Set the default rules.
iptables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -P OUTPUT ACCEPT
{% if firewall_flush_rules_and_chains %}
# Remove all rules and chains.
iptables -t nat -F
iptables -t mangle -F
iptables -F
iptables -X
{% endif %}
# Accept traffic from loopback interface (localhost).
iptables -A INPUT -i lo -j ACCEPT
# Forwarded ports.
{# Add a rule for each forwarded port #}
{% for forwarded_port in firewall_forwarded_tcp_ports %}
iptables -t nat -I PREROUTING -p tcp --dport {{ forwarded_port.src }} -j REDIRECT --to-port {{ forwarded_port.dest }}
iptables -t nat -I OUTPUT -p tcp -o lo --dport {{ forwarded_port.src }} -j REDIRECT --to-port {{ forwarded_port.dest }}
{% endfor %}
{% for forwarded_port in firewall_forwarded_udp_ports %}
iptables -t nat -I PREROUTING -p udp --dport {{ forwarded_port.src }} -j REDIRECT --to-port {{ forwarded_port.dest }}
iptables -t nat -I OUTPUT -p udp -o lo --dport {{ forwarded_port.src }} -j REDIRECT --to-port {{ forwarded_port.dest }}
{% endfor %}
# Open ports.
{# Add a rule for each open port #}
{% for port in firewall_allowed_tcp_ports %}
iptables -A INPUT -p tcp -m tcp --dport {{ port }} -j ACCEPT
{% endfor %}
{% for port in firewall_allowed_udp_ports %}
iptables -A INPUT -p udp -m udp --dport {{ port }} -j ACCEPT
{% endfor %}
# Accept icmp ping requests.
iptables -A INPUT -p icmp -j ACCEPT
# Allow NTP traffic for time synchronization.
iptables -A OUTPUT -p udp --dport 123 -j ACCEPT
iptables -A INPUT -p udp --sport 123 -j ACCEPT
# Additional custom rules.
{% for rule in firewall_additional_rules %}
{{ rule }}
{% endfor %}
# Allow established connections:
iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
# Log EVERYTHING (ONLY for Debug).
# iptables -A INPUT -j LOG
{% if firewall_log_dropped_packets %}
# Log other incoming requests (all of which are dropped) at 15/minute max.
iptables -A INPUT -m limit --limit 15/minute -j LOG --log-level 7 --log-prefix "Dropped by firewall: "
{% endif %}
# Drop all other traffic.
iptables -A INPUT -j DROP
{% if firewall_enable_ipv6 %}
# Configure IPv6 if ip6tables is present.
if [ -x "$(which ip6tables 2>/dev/null)" ]; then
{% if firewall_flush_rules_and_chains %}
# Remove all rules and chains.
ip6tables -F
ip6tables -X
{% endif %}
# Accept traffic from loopback interface (localhost).
ip6tables -A INPUT -i lo -j ACCEPT
# Open ports.
{# Add a rule for each open port #}
{% for port in firewall_allowed_tcp_ports %}
ip6tables -A INPUT -p tcp -m tcp --dport {{ port }} -j ACCEPT
{% endfor %}
{% for port in firewall_allowed_udp_ports %}
ip6tables -A INPUT -p udp -m udp --dport {{ port }} -j ACCEPT
{% endfor %}
# Accept icmp ping requests.
ip6tables -A INPUT -p icmpv6 -j ACCEPT
# Allow NTP traffic for time synchronization.
ip6tables -A OUTPUT -p udp --dport 123 -j ACCEPT
ip6tables -A INPUT -p udp --sport 123 -j ACCEPT
# Additional custom rules.
{% for rule in firewall_ip6_additional_rules %}
{{ rule }}
{% endfor %}
# Allow established connections:
ip6tables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
# Log EVERYTHING (ONLY for Debug).
# ip6tables -A INPUT -j LOG
{% if firewall_log_dropped_packets %}
# Log other incoming requests (all of which are dropped) at 15/minute max.
ip6tables -A INPUT -m limit --limit 15/minute -j LOG --log-level 7 --log-prefix "Dropped by firewall: "
{% endif %}
# Drop all other traffic.
ip6tables -A INPUT -j DROP
fi
{% endif %}

View File

@@ -0,0 +1,52 @@
#! /bin/sh
# /etc/init.d/firewall
#
# Firewall init script, to be used with /etc/firewall.bash by Jeff Geerling.
#
# @author Jeff Geerling
### BEGIN INIT INFO
# Provides: firewall
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start firewall at boot time.
# Description: Enable the firewall.
### END INIT INFO
# Carry out specific functions when asked to by the system
case "$1" in
start)
echo "Starting firewall."
/etc/firewall.bash
;;
stop)
echo "Stopping firewall."
iptables -F
if [ -x "$(which ip6tables 2>/dev/null)" ]; then
ip6tables -F
fi
;;
restart)
echo "Restarting firewall."
/etc/firewall.bash
;;
status)
echo -e "`iptables -L -n`"
EXIT=4 # program or service status is unknown
NUMBER_OF_RULES=$(iptables-save | grep '^\-' | wc -l)
if [ 0 -eq $NUMBER_OF_RULES ]; then
EXIT=3 # program is not running
else
EXIT=0 # program is running or service is OK
fi
exit $EXIT
;;
*)
echo "Usage: /etc/init.d/firewall {start|stop|status|restart}"
exit 1
;;
esac
exit 0

View File

@@ -0,0 +1,12 @@
[Unit]
Description=Firewall
After=syslog.target network.target
[Service]
Type=oneshot
ExecStart=/etc/firewall.bash
ExecStop=/sbin/iptables -F
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target

View File

@@ -4,6 +4,9 @@
# Install any system packages required by goenv
- include: packages.yml
# Configure vim-go
- include: vim.yml
# Install profile environment variables
- include: envvars.yml

12
roles/goenv/tasks/vim.yml Normal file
View File

@@ -0,0 +1,12 @@
#############################
# install vim-go
- name: Clone vim-go to ~/.vim/bundle
become: yes
become_user: "{{ username }}"
git:
repo: "https://github.com/fatih/vim-go.git"
dest: "/home/{{ username }}/.vim/bundle/vim-go"

View File

@@ -2,3 +2,4 @@
# variables for init-nonroot
username: "{{ nonroot_user }}"
system_password: "{{ base_system_password }}"

View File

@@ -1,13 +1,30 @@
---
# tasks file for init-charles
# tasks file for init-nonroot
###################
# Make the user
- name: "Add the user {{ username }}"
become: yes
user:
name: "{{ username }}"
password: "{{ system_password | password_hash('sha512') }}"
shell: /bin/bash
groups: wheel
append: yes
state: present
createhome: yes
###################
# Make temp dir
- name: Make ~/tmp dir
become: yes
file:
path: "/home/{{ username }}/temp"
state: directory
owner: "{{ username }}"
group: "{{ username }}"
mode: 0700

View File

@@ -8,11 +8,20 @@
- name: Set machine name with hostname command
become: yes
command: "hostname {{ machine_name }}"
tags:
- machine-name
- name: Create new /etc/hostname
become: yes
command: "echo {{ machine_name }} > /etc/hostname"
copy:
dest: /etc/hostname
content: "{{ machine_name }}"
owner: root
group: root
mode: u=rw,g=r,o=r
tags:
- machine-name
- name: Set hostname entry to 127.0.0.1 in /etc/hosts
@@ -22,6 +31,8 @@
regexp: "^127.0.0.1"
line: "127.0.0.1 {{ machine_name }}"
create: yes
tags:
- machine-name
############
@@ -52,8 +63,6 @@
state: present
############
# System
@@ -71,4 +80,3 @@
path: /temp
state: directory
mode: 0777

View File

@@ -17,6 +17,7 @@
- build-essential
- curl
- wget
- unzip
- graphviz
- openssh-server
- ncdu

View File

@@ -65,7 +65,7 @@
- name: "Install /etc/letsencrypt/options-nginx-ssl.conf"
become: yes
get_url:
url: "https://raw.githubusercontent.com/certbot/certbot/master/certbot-nginx/certbot_nginx/options-ssl-nginx.conf"
url: "https://raw.githubusercontent.com/certbot/certbot/master/certbot-nginx/certbot_nginx/_internal/tls_configs/options-ssl-nginx.conf"
dest: /etc/letsencrypt/options-ssl-nginx.conf
when:
- not ssl_options_installed.stat.exists
@@ -79,7 +79,7 @@
- name: "Install /etc/letsencrypt/ssl-dhparams.conf"
become: yes
get_url:
url: "https://raw.githubusercontent.com/certbot/certbot/master/certbot/ssl-dhparams.pem"
url: "https://raw.githubusercontent.com/certbot/certbot/master/certbot/certbot/ssl-dhparams.pem"
dest: /etc/letsencrypt/ssl-dhparams.pem
when:
- not dhparams_installed.stat.exists

View File

@@ -1,2 +0,0 @@
---
# defaults file for pod-bots

View File

@@ -1,6 +0,0 @@
---
# handlers file for pod-charlesreid1
#
- name: restart pod-charlesreid1
service: name=pod-charlesreid1 state=restarted

View File

@@ -1,60 +0,0 @@
galaxy_info:
author: your name
description: your description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Some suggested licenses:
# - BSD (default)
# - MIT
# - GPLv2
# - GPLv3
# - Apache
# - CC-BY
license: license (GPLv2, CC-BY, etc)
min_ansible_version: 2.4
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
# Optionally specify the branch Galaxy will use when accessing the GitHub
# repo for this role. During role install, if no tags are available,
# Galaxy will use this branch. During import Galaxy will access files on
# this branch. If Travis integration is configured, only notifications for this
# branch will be accepted. Otherwise, in all cases, the repo's default branch
# (usually master) will be used.
#github_branch:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@@ -1,2 +0,0 @@
---
# tasks file for pod-bots

View File

@@ -1,2 +0,0 @@
localhost

View File

@@ -1,5 +0,0 @@
---
- hosts: localhost
remote_user: root
roles:
- pod-bots

View File

@@ -1,2 +0,0 @@
---
# vars file for pod-bots

View File

@@ -1,17 +1,72 @@
Role Name
=========
pod-charlesreid1 ansible role
=============================
A brief description of the role goes here.
This ansible role installs pod-charlesreid1, a docker pod that runs charlesreid1.com.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
???
Tasks
-----
phase 1:
- clone pod contents
phase 2:
- /www setup
- server_name_default top level domain clone
- docker and docker compose checks
- mediawiki prep
- gitea prep
phase 3:
- construct the pod (docker-compose build)
- install service
- (port mapping in Dockerfile)
- (letsencrypt cert check)
- enable service
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
List of role variables (set in `defaults/main.yml`):
- `username`
- `pod_install_dir`
- `admin_email`
- `server_name_default`
- `nginx_subdomains_ip`
- `port_default`
- `port_gitea`
- `port_ssl_default`
- `port_ssl_gitea`
- `gitea_app_name`
- `gitea_domain`
- `gitea_secret_key`
- `gitea_internal_token`
- `mysql_password`
- `mediawiki_secretkey`
Most of these have default values set from top-level Ansible variables
prefixed with `charlesreid1`:
- `nonroot_user` (used to set `username`)
- `charlesreid1_admin_email`
- `charlesreid1_server_name_default`
- `charlesreid1_nginx_subdomains_ip`
- `charlesreid1_port_default`
- `charlesreid1_port_gitea`
- `charlesreid1_port_ssl_default`
- `charlesreid1_port_ssl_gitea`
- `charlesreid1_gitea_secret_key`
- `charlesreid1_gitea_internal_token`
- `charlesreid1_mysql_password`
- `charlesreid1_mediawiki_secretkey`
Dependencies
------------

View File

@@ -18,17 +18,9 @@ nginx_subdomains_ip: "{{ charlesreid1_nginx_subdomains_ip }}"
port_default: "{{ charlesreid1_port_default }}"
port_gitea: "{{ charlesreid1_port_gitea }}"
port_files: "{{ charlesreid1_port_files }}"
port_pages: "{{ charlesreid1_port_pages }}"
port_hooks: "{{ charlesreid1_port_hooks }}"
port_bots: "{{ charlesreid1_port_bots }}"
port_ssl_default: "{{ charlesreid1_port_ssl_default }}"
port_ssl_gitea: "{{ charlesreid1_port_ssl_gitea }}"
port_ssl_files: "{{ charlesreid1_port_ssl_files }}"
port_ssl_pages: "{{ charlesreid1_port_ssl_pages }}"
port_ssl_hooks: "{{ charlesreid1_port_ssl_hooks }}"
port_ssl_bots: "{{ charlesreid1_port_ssl_bots }}"
# end nginx configuration variables
# ----------------

View File

@@ -0,0 +1,45 @@
---
# #####################################
# CHECK SSL CERTIFICATES
#
# LetsEncrypt role will install certs as needed,
# but should probably check certs anyway.
- name: Check if LetsEncrypt cert for default server name is present
tags:
- pod-charlesreid1
- pod-charlesreid1-certs
- letsencrypt
stat:
path: "/etc/letsencrypt/live/{{ server_name_default }}"
register: register_letsencrypt_livecert_default
- name: Check if LetsEncrypt cert for gitea server name is present
tags:
- letsencrypt
- pod-charlesreid1
- pod-charlesreid1-certs
stat:
path: "/etc/letsencrypt/live/git.{{ server_name_default }}"
register: register_letsencrypt_livecert_gitea
# If top level and subdomain certs are present, start/restart the
# pod-charlesreid1 service.
- name: Enable pod-charlesreid1 service
become: yes
service:
name: pod-charlesreid1
enabled: yes
state: restarted
when:
- "pod_register_docker_compose.stat.executable"
- "register_letsencrypt_livecert_default.stat.exists"
- "register_letsencrypt_livecert_gitea.stat.exists"
tags:
- pod-charlesreid1
- pod-charlesreid1-certs
- pod-charlesreid1-services

View File

@@ -0,0 +1,30 @@
---
# #####################################
# DOCKER/DOCKER COMPOSE
# The docker role, in the base playbook,
# will install docker-compose, but we want
# to double check that the executable exists
- name: Check that docker compose executable is available
stat:
path: "/usr/local/bin/docker-compose"
register: pod_register_docker_compose
tags:
- pod-charlesreid1
- pod-charlesreid1-docker
# Also make sure the docker daemon is running
- name: Enable docker service
become: yes
service:
name: docker
enabled: yes
state: restarted
tags:
- pod-charlesreid1
- pod-charlesreid1-docker
- pod-charlesreid1-services

View File

@@ -0,0 +1,44 @@
---
# #####################################
# GITEA PREP
#
# We have to create an app.ini file,
# use a jinja template in the pod-charlesreid1
# repository
- name: Fetch the app.ini jinja template
fetch:
src: "{{ pod_install_dir }}/d-gitea/custom/conf/app.ini.j2"
dest: "/tmp/app.ini.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-gitea
- name: Install the app.ini file
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/app.ini.j2"
dest: "{{ pod_install_dir }}/d-gitea/custom/conf/app.ini"
owner: "{{ username }}"
group: "{{ username }}"
mode: 0640
tags:
- pod-charlesreid1
- pod-charlesreid1-gitea
- name: Fix ownership of all custom dirs for gitea
become: yes
command: "chown -R {{ username }}:{{ username }} {{pod_install_dir }}/d-gitea/custom"
tags:
- pod-charlesreid1
- pod-charlesreid1-gitea

View File

@@ -1,9 +1,5 @@
---
###########################
# Set up charlesreid1.com docker pod
#
# git.charlesreid1.com/docker/pod-charlesreid1
# git.charlesreid1.com/docker/d-nginx-charlesreid1
# tasks file for pod-charlesreid1
#
# Tasks:
# ------
@@ -11,7 +7,7 @@
# clone pod contents
#
# /www setup
# server-name_default top level domain clone
# server_name_default top level domain clone
# docker and docker compose checks
# mediawiki prep
# gitea prep
@@ -21,645 +17,36 @@
# (port mapping in Dockerfile)
# (letsencrypt cert check)
# enable service
#
###########################
# clone pod contents
- include: pod_clone.yml
# #####################################
# CLONE POD-CHARLESREID1
# /www setup
- include: www_setup.yml
# docker and docker compose checks
- include: docker_checks.yml
# Check if we already cloned it
- name: Check if pod-charlesreid1 repo is already cloned
stat:
path: "{{ pod_install_dir }}"
register: pod_charlesreid1_clone_check
tags:
- pod-charlesreid1
# nginx
- include: nginx_prep.yml
# mediawiki
- include: mw_prep.yml
# Clone it
- name: Clone pod-charlesreid1
become: yes
become_user: "{{ username }}"
git:
repo: 'https://github.com/charlesreid1-docker/pod-charlesreid1.git'
dest: "{{ pod_install_dir }}"
recursive: yes
when:
- "not pod_charlesreid1_clone_check.stat.exists"
tags:
- pod-charlesreid1
# gitea
- include: gitea_prep.yml
# construct pod
- include: pod_construct.yml
# Pull it
- name: Pull pod-charlesreid1
become: yes
become_user: "{{ username }}"
command: "git pull"
args:
chdir: "{{ pod_install_dir }}"
when:
- "pod_charlesreid1_clone_check.stat.exists"
tags:
- pod-charlesreid1
# install pod service
- include: pod_services.yml
# certs
- include: certs.yml
# Pull submodules
- name: Pull pod-charlesreid1 submodules
become: yes
become_user: "{{ username }}"
command: "git submodule update --remote"
args:
chdir: "{{ pod_install_dir }}"
when:
- "pod_charlesreid1_clone_check.stat.exists"
tags:
- pod-charlesreid1
# #####################################
# BUILD DOCKER-COMPOSE FILE FROM TEMPLATE
#
# Note: Don't use sed to replace the MySQL password placeholder.
# Use the fetch module to copy the template from the remote machine
# (i.e., the one in the git repo) to the local directory.
# Then use the template module to use the template.
- name: Fetch the docker-compose template from the remote machine
run_once: true
fetch:
src: "{{ pod_install_dir }}/docker-compose.yml.j2"
dest: "/tmp/pod-charlesreid1-docker-compose.yml.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-docker
- name: Install the docker-compose file
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/pod-charlesreid1-docker-compose.yml.j2"
dest: "{{ pod_install_dir }}/docker-compose.yml"
mode: 0640
force: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-docker
# #####################################
# SET UP /WWW DIRECTORY
#
#
# Create /www directory
# for main domain content
- name: Create the /www directory
become: yes
file:
path: "/www"
state: directory
recurse: yes
owner: "{{ username }}"
group: "{{ username }}"
tags:
- pod-charlesreid1
- pod-charlesreid1-content
# Template scripts to populate /www
# with content is done in the
# rules below...
# #####################################
# MAIN DOMAIN PAGE SETUP (ALL)
#
# /www/<domain>/
# git/ <-- .git dir for charlesreid1.com repo gh-pages branch
# git.data/ <-- .git dir for charlesreid1-data
# htdocs/ <-- clone of charlesreid1.com repo gh-pages branch
# data/ <-- clone of charlesreid1-data
# -------------
# Install and run the clone www script
- name: "Fetch the charlesreid1.com clone www script template"
fetch:
src: "{{ pod_install_dir }}/scripts/git_clone_www.py.j2"
dest: "/tmp/git_clone_www.py.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-content
- name: "Install the charlesreid1.com clone www script"
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/git_clone_www.py.j2"
dest: "{{ pod_install_dir }}/scripts/git_clone_www.py"
mode: 0755
force: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-content
- name: "Run the charlesreid1.com clone www script to set up {{ server_name_default }}"
command: "python {{ pod_install_dir }}/scripts/git_clone_www.py"
become: yes
become_user: "{{ username }}"
tags:
- pod-charlesreid1
- pod-charlesreid1-content
# ------------------
# Install and run the pull www script
- name: "Fetch the charlesreid1.com pull www script template"
fetch:
src: "{{ pod_install_dir }}/scripts/git_pull_www.py.j2"
dest: "/tmp/git_pull_www.py.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-content
- name: "Install the charlesreid1.com pull www script"
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/git_pull_www.py.j2"
dest: "{{ pod_install_dir }}/scripts/git_pull_www.py"
mode: 0755
force: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-content
- name: "Run the charlesreid1.com pull www script to update {{ server_name_default }}"
command: "python {{ pod_install_dir }}/scripts/git_pull_www.py"
become: yes
become_user: "{{ username }}"
tags:
- pod-charlesreid1
- pod-charlesreid1-content
# #####################################
# DOCKER/DOCKER COMPOSE
# The docker role, in the base playbook,
# will install docker-compose, but we want
# to double check that the executable exists
- name: Check that docker compose executable is available
stat:
path: "/usr/local/bin/docker-compose"
register: pod_register_docker_compose
tags:
- pod-charlesreid1
- pod-charlesreid1-docker
# Also make sure the docker daemon is running
- name: Enable docker service
become: yes
service:
name: docker
enabled: yes
state: restarted
tags:
- pod-charlesreid1
- pod-charlesreid1-docker
- pod-charlesreid1-services
# #####################################
# NGIX CONFIG PREP
#
# prepare the config files for the
# charlesreid1.com nginx server:
# - copy templates from remote machine
# - clean conf.d directory
# - copy rendered templates to remote machine
- name: Clean d-nginx-charlesreid1 conf.d directory
become: yes
become_user: "{{ username }}"
command: "python {{ pod_install_dir }}/d-nginx-charlesreid1/scripts/clean_config.py"
tags:
- pod-charlesreid1
- pod-charlesreid1-nginx
# Install the d-nginx-charlesreid1 configuration templates
#
# -------------
# HTTP
- name: Fetch d-nginx-charlesreid1 http configuration templates from remote machine
run_once: true
fetch:
src: "{{ pod_install_dir }}/d-nginx-charlesreid1/conf.d_templates/http.DOMAIN.conf.j2"
dest: "/tmp/http.DOMAIN.conf.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-nginx
- name: Install the d-nginx-charlesreid1 http configuration templates
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/http.DOMAIN.conf.j2"
dest: "{{ pod_install_dir }}/d-nginx-charlesreid1/conf.d/http.{{ server_name_default }}.conf"
force: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-nginx
# -------------
# HTTPS
- name: Fetch d-nginx-charlesreid1 https configuration templates from remote machine
run_once: true
fetch:
src: "{{ pod_install_dir }}/d-nginx-charlesreid1/conf.d_templates/https.DOMAIN.conf.j2"
dest: "/tmp/https.DOMAIN.conf.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-nginx
- name: Install the d-nginx-charlesreid1 https configuration templates
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/https.DOMAIN.conf.j2"
dest: "{{ pod_install_dir }}/d-nginx-charlesreid1/conf.d/https.{{ server_name_default }}.conf"
force: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-nginx
# -------------
# HTTPS subdomains
- name: Fetch d-nginx-charlesreid1 https subdomains configuration templates from remote machine
run_once: true
fetch:
src: "{{ pod_install_dir }}/d-nginx-charlesreid1/conf.d_templates/https.DOMAIN.subdomains.conf.j2"
dest: "/tmp/https.DOMAIN.subdomains.conf.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-nginx
- name: Install the d-nginx-charlesreid1 https subdomains configuration templates
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/https.DOMAIN.subdomains.conf.j2"
dest: "{{ pod_install_dir }}/d-nginx-charlesreid1/conf.d/https.{{ server_name_default }}.subdomains.conf"
force: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-nginx
# #####################################
# MEDIAWIKI PREP
#
# We have to build the extensions dir for the MediaWiki container
# /pod-charlesreid1/d-mediawiki/charlesreid1-config/mediawiki/build_extensions_dir.sh
#
# Then we have to use the LocalSettings.php and
# Apache config file templates to configure
# the mediawiki container to run correctly.
- name: Check if extensions dir already exists
stat:
path: "{{ pod_install_dir }}/d-mediawiki/charlesreid1-config/mediawiki/extensions"
register: extensions_dir_exists
tags:
- pod-charlesreid1
- pod-charlesreid1-mw
- name: Make mediawiki extensions dir build script executable
become: yes
become_user: "{{ username }}"
file:
path: "{{ pod_install_dir }}/d-mediawiki/charlesreid1-config/mediawiki/build_extensions_dir.sh"
mode: "u+x"
when:
- "not extensions_dir_exists.stat.exists"
tags:
- pod-charlesreid1
- pod-charlesreid1-mw
- name: Build the mediawiki extensions dir
become: yes
become_user: "{{ username }}"
command: "{{ pod_install_dir }}/d-mediawiki/charlesreid1-config/mediawiki/build_extensions_dir.sh"
args:
chdir: "{{ pod_install_dir }}/d-mediawiki/charlesreid1-config/mediawiki"
when:
- "not extensions_dir_exists.stat.exists"
tags:
- pod-charlesreid1
- pod-charlesreid1-mw
# Deal with MediaWiki configuration templates:
# - LocalSettings.php
# - Apache config file
- name: Fetch the LocalSettings.php jinja template
fetch:
src: "{{ pod_install_dir }}/d-mediawiki/charlesreid1-config/mediawiki/LocalSettings.php.j2"
dest: "/tmp/LocalSettings.php.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-mw
- name: Install the LocalSettings.php file
become: yes
template:
src: "/tmp/LocalSettings.php.j2"
dest: "{{ pod_install_dir }}/d-mediawiki/charlesreid1-config/mediawiki/LocalSettings.php"
owner: "{{ username }}"
group: "{{ username }}"
mode: 0640
tags:
- pod-charlesreid1
- pod-charlesreid1-mw
- name: Fetch the Apache config jinja template
fetch:
src: "{{ pod_install_dir }}/d-mediawiki/charlesreid1-config/apache/charlesreid1.wiki.conf.j2"
dest: "/tmp/charlesreid1.wiki.conf.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-mw
- name: Install the Apache config file
become: yes
template:
src: "/tmp/charlesreid1.wiki.conf.j2"
dest: "{{ pod_install_dir }}/d-mediawiki/charlesreid1-config/apache/charlesreid1.wiki.conf"
owner: "{{ username }}"
group: "{{ username }}"
mode: 0640
tags:
- pod-charlesreid1
- pod-charlesreid1-mw
# #####################################
# GITEA PREP
#
# We have to create an app.ini file,
# use a jinja template in the pod-charlesreid1
# repository
- name: Fetch the app.ini jinja template
fetch:
src: "{{ pod_install_dir }}/d-gitea/custom/conf/app.ini.j2"
dest: "/tmp/app.ini.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-gitea
- name: Install the app.ini file
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/app.ini.j2"
dest: "{{ pod_install_dir }}/d-gitea/custom/conf/app.ini"
owner: "{{ username }}"
group: "{{ username }}"
mode: 0640
tags:
- pod-charlesreid1
- pod-charlesreid1-gitea
- name: Fix ownership of all custom dirs for gitea
become: yes
command: "chown -R {{ username }}:{{ username }} {{pod_install_dir }}/d-gitea/custom"
tags:
- pod-charlesreid1
- pod-charlesreid1-gitea
# #####################################
# CONSTRUCT THE POD
#
# This task is very time-consuming.
- name: Build pod-charlesreid1 from scratch
become: yes
become_user: "{{ username }}"
command: "/usr/local/bin/docker-compose build --no-cache"
args:
chdir: "{{ pod_install_dir }}"
when:
- "pod_register_docker_compose.stat.exists"
tags:
- pod-charlesreid1
- pod-charlesreid1-docker
# #####################################
# INSTALL STARTUP SERVICE
#
# Check if the charlesreid1 docker pod service
# is installed. If not, install it.
- name: Check if pod-charlesreid1 service is installed
stat:
path: /etc/systemd/system/pod-charlesreid1.service
register: pod_charlesreid1_service_check
tags:
- pod-charlesreid1
- pod-charlesreid1-services
- name: Install pod-charlesreid1 service
become: yes
template:
src: pod-charlesreid1.service.j2
dest: /etc/systemd/system/pod-charlesreid1.service
mode: 0774
when:
- "not pod_charlesreid1_service_check.stat.exists"
tags:
- pod-charlesreid1
- pod-charlesreid1-services
# #####################################
# CHECK SSL CERTIFICATES
#
# LetsEncrypt role will install certs as needed,
# but should probably check certs anyway.
- name: Check if LetsEncrypt cert for default server name is present
tags:
- pod-charlesreid1
- pod-charlesreid1-certs
- letsencrypt
stat:
path: "/etc/letsencrypt/live/{{ server_name_default }}"
register: register_letsencrypt_livecert_default
- name: Check if LetsEncrypt cert for gitea server name is present
tags:
- letsencrypt
- pod-charlesreid1
- pod-charlesreid1-certs
stat:
path: "/etc/letsencrypt/live/git.{{ server_name_default }}"
register: register_letsencrypt_livecert_gitea
#- name: Check if LetsEncrypt cert for files server name is present
# tags:
# - letsencrypt
# - pod-charlesreid1
# - pod-charlesreid1-certs
# stat:
# path: "/etc/letsencrypt/live/files.{{ server_name_default }}"
# register: register_letsencrypt_livecert_files
- name: Check if LetsEncrypt cert for pages server name is present
tags:
- letsencrypt
- pod-charlesreid1
- pod-charlesreid1-certs
stat:
path: "/etc/letsencrypt/live/pages.{{ server_name_default }}"
register: register_letsencrypt_livecert_pages
- name: Check if LetsEncrypt cert for hooks server name is present
tags:
- letsencrypt
- pod-charlesreid1
- pod-charlesreid1-certs
stat:
path: "/etc/letsencrypt/live/hooks.{{ server_name_default }}"
register: register_letsencrypt_livecert_hooks
- name: Check if LetsEncrypt cert for bots server name is present
tags:
- letsencrypt
- pod-charlesreid1
- pod-charlesreid1-certs
stat:
path: "/etc/letsencrypt/live/bots.{{ server_name_default }}"
register: register_letsencrypt_livecert_bots
# If top level and subdomain certs are present, start/restart the
# pod-charlesreid1 service.
- name: Enable pod-charlesreid1 service
become: yes
service:
name: pod-charlesreid1
enabled: yes
state: restarted
when:
- "pod_register_docker_compose.stat.executable"
- "register_letsencrypt_livecert_default.stat.exists"
- "register_letsencrypt_livecert_gitea.stat.exists"
tags:
- pod-charlesreid1
- pod-charlesreid1-certs
- pod-charlesreid1-services
# See the pod-charlesreid1 documentation
# pages for what to do from here.
#
# Specifically, restore:
# - mediawiki database backups
# - mediawiki files backups
# - gitea dump zip file
# - gitea avatars zip file
#
# Restore scripts are located in the
# pod-charlesreid1 repository:
# - database restore script: utils-mysql
# - mediawiki image restore script: utils-mw
# - gitea database and avatar: utils-gitea
- name: Wait for 10 seconds
command: "sleep 15"
- name: Run fix_skins.sh script
become: yes
become_user: "{{ username }}"
command: "bash {{ pod_install_dir }}/d-mediawiki/fix_skins.sh"
args:
chdir: "{{ pod_install_dir }}/d-mediawiki"
tags:
- pod-charlesreid1
- pod-charlesreid1-mw
- name: Run fix_LocalSettings.sh script
become: yes
become_user: "{{ username }}"
command: "bash {{ pod_install_dir }}/d-mediawiki/fix_LocalSettings.sh"
args:
chdir: "{{ pod_install_dir }}/d-mediawiki"
tags:
- pod-charlesreid1
- pod-charlesreid1-mw
# mediawiki fixes
- include: mw_fixes.yml
# enable pod service
- include: pod_enable.yml

View File

@@ -0,0 +1,29 @@
---
# #####################################
# MEDIAWIKI FIXES
#
# Install and set up the Mediawiki skin,
# LocalSettings.php file, etc.
- name: Wait for 10 seconds
command: "sleep 15"
- name: Run fix_skins.sh script
become: yes
become_user: "{{ username }}"
command: "bash {{ pod_install_dir }}/d-mediawiki/fix_skins.sh"
args:
chdir: "{{ pod_install_dir }}/d-mediawiki"
tags:
- pod-charlesreid1
- pod-charlesreid1-mw
- name: Run fix_LocalSettings.sh script
become: yes
become_user: "{{ username }}"
command: "bash {{ pod_install_dir }}/d-mediawiki/fix_LocalSettings.sh"
args:
chdir: "{{ pod_install_dir }}/d-mediawiki"
tags:
- pod-charlesreid1
- pod-charlesreid1-mw

View File

@@ -0,0 +1,99 @@
---
# #####################################
# MEDIAWIKI PREP
#
# We have to build the extensions dir for the MediaWiki container
# /pod-charlesreid1/d-mediawiki/charlesreid1-config/mediawiki/build_extensions_dir.sh
#
# Then we have to use the LocalSettings.php and
# Apache config file templates to configure
# the mediawiki container to run correctly.
- name: Check if extensions dir already exists
stat:
path: "{{ pod_install_dir }}/d-mediawiki/charlesreid1-config/mediawiki/extensions"
register: extensions_dir_exists
tags:
- pod-charlesreid1
- pod-charlesreid1-mw
- name: Make mediawiki extensions dir build script executable
become: yes
become_user: "{{ username }}"
file:
path: "{{ pod_install_dir }}/d-mediawiki/charlesreid1-config/mediawiki/build_extensions_dir.sh"
mode: "u+x"
when:
- "not extensions_dir_exists.stat.exists"
tags:
- pod-charlesreid1
- pod-charlesreid1-mw
- name: Build the mediawiki extensions dir
become: yes
become_user: "{{ username }}"
command: "{{ pod_install_dir }}/d-mediawiki/charlesreid1-config/mediawiki/build_extensions_dir.sh"
args:
chdir: "{{ pod_install_dir }}/d-mediawiki/charlesreid1-config/mediawiki"
when:
- "not extensions_dir_exists.stat.exists"
tags:
- pod-charlesreid1
- pod-charlesreid1-mw
# Deal with MediaWiki configuration templates:
# - LocalSettings.php
# - Apache config file
- name: Fetch the LocalSettings.php jinja template
fetch:
src: "{{ pod_install_dir }}/d-mediawiki/charlesreid1-config/mediawiki/LocalSettings.php.j2"
dest: "/tmp/LocalSettings.php.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-mw
- name: Install the LocalSettings.php file
become: yes
template:
src: "/tmp/LocalSettings.php.j2"
dest: "{{ pod_install_dir }}/d-mediawiki/charlesreid1-config/mediawiki/LocalSettings.php"
owner: "{{ username }}"
group: "{{ username }}"
mode: 0640
tags:
- pod-charlesreid1
- pod-charlesreid1-mw
- name: Fetch the Apache config jinja template
fetch:
src: "{{ pod_install_dir }}/d-mediawiki/charlesreid1-config/apache/charlesreid1.wiki.conf.j2"
dest: "/tmp/charlesreid1.wiki.conf.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-mw
- name: Install the Apache config file
become: yes
template:
src: "/tmp/charlesreid1.wiki.conf.j2"
dest: "{{ pod_install_dir }}/d-mediawiki/charlesreid1-config/apache/charlesreid1.wiki.conf"
owner: "{{ username }}"
group: "{{ username }}"
mode: 0640
tags:
- pod-charlesreid1
- pod-charlesreid1-mw

View File

@@ -0,0 +1,100 @@
---
# #####################################
# NGIX CONFIG PREP
#
# prepare the config files for the
# charlesreid1.com nginx server:
# - copy templates from remote machine
# - clean conf.d directory
# - copy rendered templates to remote machine
- name: Clean d-nginx-charlesreid1 conf.d directory
become: yes
become_user: "{{ username }}"
command: "python {{ pod_install_dir }}/d-nginx-charlesreid1/scripts/clean_config.py"
tags:
- pod-charlesreid1
- pod-charlesreid1-nginx
# Install the d-nginx-charlesreid1 configuration templates
#
# -------------
# HTTP
- name: Fetch d-nginx-charlesreid1 http configuration templates from remote machine
fetch:
src: "{{ pod_install_dir }}/d-nginx-charlesreid1/conf.d_templates/http.DOMAIN.conf.j2"
dest: "/tmp/http.DOMAIN.conf.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-nginx
- name: Install the d-nginx-charlesreid1 http configuration templates
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/http.DOMAIN.conf.j2"
dest: "{{ pod_install_dir }}/d-nginx-charlesreid1/conf.d/http.{{ server_name_default }}.conf"
force: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-nginx
# -------------
# HTTPS
- name: Fetch d-nginx-charlesreid1 https configuration templates from remote machine
fetch:
src: "{{ pod_install_dir }}/d-nginx-charlesreid1/conf.d_templates/https.DOMAIN.conf.j2"
dest: "/tmp/https.DOMAIN.conf.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-nginx
- name: Install the d-nginx-charlesreid1 https configuration templates
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/https.DOMAIN.conf.j2"
dest: "{{ pod_install_dir }}/d-nginx-charlesreid1/conf.d/https.{{ server_name_default }}.conf"
force: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-nginx
# -------------
# HTTPS subdomains
- name: Fetch d-nginx-charlesreid1 https subdomains configuration templates from remote machine
fetch:
src: "{{ pod_install_dir }}/d-nginx-charlesreid1/conf.d_templates/https.DOMAIN.subdomains.conf.j2"
dest: "/tmp/https.DOMAIN.subdomains.conf.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-nginx
- name: Install the d-nginx-charlesreid1 https subdomains configuration templates
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/https.DOMAIN.subdomains.conf.j2"
dest: "{{ pod_install_dir }}/d-nginx-charlesreid1/conf.d/https.{{ server_name_default }}.subdomains.conf"
force: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-nginx

View File

@@ -0,0 +1,105 @@
---
# #####################################
# CLONE POD-CHARLESREID1
# Check if we already cloned it
- name: Check if pod-charlesreid1 repo is already cloned
stat:
path: "{{ pod_install_dir }}"
register: pod_charlesreid1_clone_check
tags:
- git
- pod-charlesreid1
# Clone it
- name: Clone pod-charlesreid1
become: yes
become_user: "{{ username }}"
git:
repo: 'https://github.com/charlesreid1-docker/pod-charlesreid1.git'
dest: "{{ pod_install_dir }}"
recursive: yes
when:
- "not pod_charlesreid1_clone_check.stat.exists"
tags:
- git
- pod-charlesreid1
# Pull it
- name: Pull pod-charlesreid1
become: yes
become_user: "{{ username }}"
command: "git pull"
args:
chdir: "{{ pod_install_dir }}"
when:
- "pod_charlesreid1_clone_check.stat.exists"
tags:
- git
- pod-charlesreid1
# Init submodules
- name: Initialize pod-charlesreid1 submodules
become: yes
become_user: "{{ username }}"
command: "git submodule update --init"
args:
chdir: "{{ pod_install_dir }}"
when:
- "pod_charlesreid1_clone_check.stat.exists"
tags:
- git
- pod-charlesreid1
# Pull submodules
- name: Pull pod-charlesreid1 submodules
become: yes
become_user: "{{ username }}"
command: "git submodule update --remote"
args:
chdir: "{{ pod_install_dir }}"
when:
- "pod_charlesreid1_clone_check.stat.exists"
tags:
- git
- pod-charlesreid1
# #####################################
# BUILD DOCKER-COMPOSE FILE FROM TEMPLATE
#
# Note: Don't use sed to replace the MySQL password placeholder.
# Use the fetch module to copy the template from the remote machine
# (i.e., the one in the git repo) to the local directory.
# Then use the template module to use the template.
- name: Fetch the docker-compose template from the remote machine
fetch:
src: "{{ pod_install_dir }}/docker-compose.yml.j2"
dest: "/tmp/pod-charlesreid1-docker-compose.yml.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-docker
- name: Install the docker-compose file
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/pod-charlesreid1-docker-compose.yml.j2"
dest: "{{ pod_install_dir }}/docker-compose.yml"
mode: 0640
force: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-docker

View File

@@ -0,0 +1,18 @@
---
# #####################################
# CONSTRUCT THE POD
#
# This task is very time-consuming.
- name: Build pod-charlesreid1 from scratch
become: yes
become_user: "{{ username }}"
command: "/usr/local/bin/docker-compose build --no-cache"
args:
chdir: "{{ pod_install_dir }}"
when:
- "pod_register_docker_compose.stat.exists"
tags:
- pod-charlesreid1
- pod-charlesreid1-docker

View File

@@ -0,0 +1,59 @@
---
# Services related to pod-charlesreid
#######################################
# DISABLE STARTUP SERVICES
#
# Disable annoying services like nginx.
- name: Disable nginx service (annoying)
become: yes
command: "systemctl disable nginx"
tags:
- pod-charlesreid1
- pod-charlesreid1-services
- name: Stop nginx service
become: yes
command: "systemctl stop nginx"
tags:
- pod-charlesreid1
- pod-charlesreid1-services
# #####################################
# INSTALL STARTUP SERVICES
#
# Check if the charlesreid1 docker pod service
# is installed. If not, install it.
- name: Check if pod-charlesreid1 service is installed
stat:
path: /etc/systemd/system/pod-charlesreid1.service
register: pod_charlesreid1_service_check
tags:
- pod-charlesreid1
- pod-charlesreid1-services
- name: Install pod-charlesreid1 service
become: yes
template:
src: pod-charlesreid1.service.j2
dest: /etc/systemd/system/pod-charlesreid1.service
mode: 0774
when:
- "not pod_charlesreid1_service_check.stat.exists"
tags:
- pod-charlesreid1
- pod-charlesreid1-services
- name: Enable pod-charlesreid1 service
become: yes
command: "systemctl enable pod-charlesreid1"
tags:
- pod-charlesreid1
- pod-charlesreid1-services

View File

@@ -0,0 +1,101 @@
---
# #####################################
# SET UP /WWW DIRECTORY
#
#
# Create /www directory
# for main domain content
- name: Create the /www directory
become: yes
file:
path: "/www"
state: directory
recurse: yes
owner: "{{ username }}"
group: "{{ username }}"
tags:
- pod-charlesreid1
- pod-charlesreid1-content
# Template scripts to populate /www
# with content is done in the
# rules below...
# #####################################
# MAIN DOMAIN PAGE SETUP (ALL)
#
# /www/<domain>/
# git/ <-- .git dir for charlesreid1.com repo gh-pages branch
# htdocs/ <-- clone of charlesreid1.com repo gh-pages branch
# -------------
# Install and run the clone www script
- name: "Fetch the charlesreid1.com clone www script template"
fetch:
src: "{{ pod_install_dir }}/scripts/git_clone_www.py.j2"
dest: "/tmp/git_clone_www.py.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-content
- name: "Install the charlesreid1.com clone www script"
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/git_clone_www.py.j2"
dest: "{{ pod_install_dir }}/scripts/git_clone_www.py"
mode: 0755
force: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-content
- name: "Run the charlesreid1.com clone www script to set up {{ server_name_default }}"
command: "{{ pyenv_python }} {{ pod_install_dir }}/scripts/git_clone_www.py"
become: yes
become_user: "{{ username }}"
tags:
- pod-charlesreid1
- pod-charlesreid1-content
# ------------------
# Install and run the pull www script
- name: "Fetch the charlesreid1.com pull www script template"
fetch:
src: "{{ pod_install_dir }}/scripts/git_pull_www.py.j2"
dest: "/tmp/git_pull_www.py.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-content
- name: "Install the charlesreid1.com pull www script"
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/git_pull_www.py.j2"
dest: "{{ pod_install_dir }}/scripts/git_pull_www.py"
mode: 0755
force: yes
tags:
- pod-charlesreid1
- pod-charlesreid1-content
- name: "Run the charlesreid1.com pull www script to update {{ server_name_default }}"
command: "{{ pyenv_python }} {{ pod_install_dir }}/scripts/git_pull_www.py"
become: yes
become_user: "{{ username }}"
tags:
- pod-charlesreid1
- pod-charlesreid1-content

View File

@@ -5,6 +5,8 @@ After=docker.service
[Service]
Restart=always
StandardError=null
StandardOutput=null
ExecStart=/usr/local/bin/docker-compose -f {{ pod_install_dir }}/docker-compose.yml up
ExecStop=/usr/local/bin/docker-compose -f {{ pod_install_dir }}/docker-compose.yml stop

View File

@@ -0,0 +1,5 @@
pod-dockprom ansible role
=============================
This ansible role installs pod-dockprom, a pod for monitoring instances via a dashboard.

View File

@@ -0,0 +1,20 @@
---
# defaults file for pod-golly
username: "{{ nonroot_user }}"
dockprom_install_dir: "/home/{{ username }}/pod-dockprom"
dockprom_branch_name: "main"
env_file: "environment"
# Boolean to determine whether the
# pod-dockprom service installed
# runs the master server pod,
# or the (monitored) client pod.
install_client_service: "true"
dockprom_bind_ip: "127.0.0.1"
dockprom_admin_user: "{{ default_dockprom_admin_user }}"
dockprom_admin_pass: "{{ default_dockprom_admin_pass }}"

View File

@@ -0,0 +1,76 @@
---
# #####################################
# CLONE GOLLY-API
# Check if we already cloned it
- name: Check if pod-dockprom is already cloned
stat:
path: "{{ dockprom_install_dir }}"
register: dockprom_clone_check
tags:
- git
- pod-dockprom
# Clone it
- name: Clone pod-dockprom
become: yes
become_user: "{{ username }}"
git:
repo: "https://git.charlesreid1.com/docker/pod-dockprom.git"
dest: "{{ dockprom_install_dir }}"
version: "{{ dockprom_branch_name }}"
when:
- "not dockprom_clone_check.stat.exists"
tags:
- git
- pod-dockprom
# Fetch
- name: Fetch from pod-dockprom remote
become: yes
become_user: "{{ username }}"
command: "git fetch --all"
args:
chdir: "{{ dockprom_install_dir }}"
when:
- "dockprom_clone_check.stat.exists"
tags:
- git
- pod-dockprom
# Git reset
- name: Reset hard to head
become: yes
become_user: "{{ username }}"
command: "git reset --hard HEAD"
args:
chdir: "{{ dockprom_install_dir }}"
tags:
- git
- pod-dockprom
# Clean it
- name: Clean the repo
become: yes
become_user: "{{ username }}"
command: "git clean -f -d"
args:
chdir: "{{ dockprom_install_dir }}"
tags:
- git
- pod-dockprom
# Git check out
- name: Check out the correct pod-dockprom branch
become: yes
become_user: "{{ username }}"
command: "git checkout -B {{ dockprom_branch_name }} --track origin/{{ dockprom_branch_name }}"
args:
chdir: "{{ dockprom_install_dir }}"
tags:
- git
- pod-dockprom

View File

@@ -0,0 +1,24 @@
---
# #####################################
# DOCKER/DOCKER COMPOSE
# The docker role, in the base playbook,
# will install docker-compose, but we want
# to double check that the executable exists
- name: Check that docker compose executable is available
stat:
path: "/usr/local/bin/docker-compose"
register: dockprom_register_docker_compose
tags:
- pod-dockprom
# Also make sure the docker daemon is running
- name: Enable docker service
become: yes
service:
name: docker
enabled: yes
state: restarted
tags:
- pod-dockprom

View File

@@ -0,0 +1,92 @@
---
# pod-dockprom construct the pod
#
# steps:
# - fetch docker-compose template from remote to local
# - install the docker-compose file from local to remote, applying variables
# ---------------------
# client pod
- name: Fetch pod-dockprom client compose template from remote machine
fetch:
src: "{{ dockprom_install_dir }}/docker-compose.exporters.yml.j2"
dest: "/tmp/pod-dockprom-docker-compose.exporters.yml.j2"
flat: yes
fail_on_missing: yes
when:
- "dockprom_register_docker_compose.stat.exists"
- install_client_service | lower == "true"
tags:
- pod-dockprom
- name: Install the pod-dockprom client compose file onto the remote machine
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/pod-dockprom-docker-compose.exporters.yml.j2"
dest: "{{ dockprom_install_dir }}/docker-compose.exporters.yml"
mode: 0640
force: yes
when:
- "dockprom_register_docker_compose.stat.exists"
- install_client_service | lower == "true"
tags:
- pod-dockprom
- name: Build pod-dockprom client pod from scratch
become: yes
become_user: "{{ username }}"
command: "/usr/local/bin/docker-compose -f {{ dockprom_install_dir }}/docker-compose.exporters.yml build --no-cache"
args:
chdir: "{{ dockprom_install_dir }}"
when:
- "dockprom_register_docker_compose.stat.exists"
- install_client_service | lower == "true"
tags:
- pod-dockprom
# None of this is tested... we're not worrying about it right now.
### # ------------------
### # master pod
###
### - name: Fetch pod-dockprom master compose template from remote machine
### fetch:
### src: "{{ dockprom_install_dir }}/docker-compose.yml.j2"
### dest: "/tmp/pod-dockprom-docker-compose.yml.j2"
### flat: yes
### fail_on_missing: yes
### when:
### - "dockprom_register_docker_compose.stat.exists"
### - install_master_service | lower == "true"
### tags:
### - pod-dockprom
###
### - name: Install the pod-dockprom master compose file onto the remote machine
### become: yes
### become_user: "{{ username }}"
### template:
### src: "/tmp/pod-dockprom-docker-compose.exporters.yml.j2"
### dest: "{{ dockprom_install_dir }}/docker-compose.exporters.yml"
### mode: 0640
### force: yes
### when:
### - "dockprom_register_docker_compose.stat.exists"
### - install_master_service | lower == "true"
### tags:
### - pod-dockprom
###
### - name: Build pod-dockprom master pod from scratch
### become: yes
### become_user: "{{ username }}"
### command: "/usr/local/bin/docker-compose -f {{ dockprom_install_dir }}/docker-compose.yml build --no-cache"
### args:
### chdir: "{{ dockprom_install_dir }}"
### when:
### - "dockprom_register_docker_compose.stat.exists"
### - install_master_service | lower == "true"
### tags:
### - pod-dockprom

View File

@@ -0,0 +1,27 @@
---
# If top level and subdomain certs are present,
# start the golly-api-pod service.
- name: Enable pod-dockprom client service
become: yes
service:
name: pod-dockprom-client
enabled: yes
state: restarted
when:
- "dockprom_register_docker_compose.stat.executable"
- install_client_service | lower == "true"
tags:
- golly-api
### - name: Enable pod-dockprom master service
### become: yes
### service:
### name: pod-dockprom-client
### enabled: yes
### state: restarted
### when:
### - "dockprom_register_docker_compose.stat.executable"
### - install_master_service | lower == "true"
### tags:
### - golly-api

View File

@@ -0,0 +1,26 @@
---
# pod-dockprom startup services
- name: Install pod-dockprom client service
become: yes
template:
src: pod-dockprom-client.service.j2
dest: /etc/systemd/system/pod-dockprom-client.service
mode: 0774
when:
- install_client_service | lower == "true"
tags:
- pod-dockprom
- name: Install pod-dockprom master service
become: yes
template:
src: pod-dockprom-master.service.j2
dest: /etc/systemd/system/pod-dockprom-master.service
mode: 0774
when:
- install_client_service | lower != "true"
tags:
- pod-dockprom

View File

@@ -0,0 +1,24 @@
---
# tasks file for pod-dockprom
#
# clone pod contents
# render the docker-compose jinja template
# check docker and docker-compose are present
# construct the pod (docker-compose build)
# install service
# enable service
# clone/clean checkout out pod contents
- include: clone_pod.yml
# docker and docker compose checks
- include: docker_checks.yml
# render docker-compose templates and construct pod
- include: dockprom_construct.yml
# install pod service
- include: dockprom_services.yml
# enable pod service
- include: dockprom_enable.yml

View File

@@ -0,0 +1,15 @@
[Unit]
Description=Golly API docker pod service
Requires=docker.service
After=docker.service
[Service]
Restart=always
StandardError=null
StandardOutput=null
ExecStartPre=/usr/local/bin/docker-compose -f {{ dockprom_install_dir }}/docker-compose.exporters.yml build
ExecStart=/usr/local/bin/docker-compose -f {{ dockprom_install_dir }}/docker-compose.exporters.yml up
ExecStop=/usr/local/bin/docker-compose -f {{ dockprom_install_dir }}/docker-compose.exporters.yml stop
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,15 @@
[Unit]
Description=Golly API docker pod service
Requires=docker.service
After=docker.service
[Service]
Restart=always
StandardError=null
StandardOutput=null
ExecStartPre=/usr/local/bin/docker-compose -f {{ dockprom_install_dir }}/docker-compose.yml build
ExecStart=/usr/local/bin/docker-compose -f {{ dockprom_install_dir }}/docker-compose.yml up
ExecStop=/usr/local/bin/docker-compose -f {{ dockprom_install_dir }}/docker-compose.yml stop
[Install]
WantedBy=default.target

View File

@@ -1,20 +0,0 @@
---
# defaults file for pod-webhooks
username: "{{ nonroot_user }}"
# where pod-webhooks is installed
webhooks_install_dir: "/home/{{ username }}/pod-webhooks"
# shared secret
# # (must be entered every time you create a webhook)
captain_hook_secret: "{{ charlesreid1_captain_hook_secret }}"
# ----------------
# subpages nginx
# configuration variables
server_name_default: "{{ charlesreid1_server_name_default }}"
# end nginx configuration variables
# ----------------
#

View File

@@ -1,6 +0,0 @@
---
# handlers file for pod-charlesreid1
#
- name: restart pod-charlesreid1
service: name=pod-charlesreid1 state=restarted

View File

@@ -1,524 +0,0 @@
---
###########################
# Set up webhooks pod
#
# git.charlesreid1.com/docker/pod-webhooks
# git.charlesreid1.com/docker/d-nginx-subdomains
#
# Tasks:
# ------
#
# clone pod contents
#
# /www setup
# pages subdomain clone
# hooks subdomain clone
# bots subdomain clone
# docker and docker compose checks
# pages subdomain prep
# captain hook setup
# captain hook canary setup
#
# construct the pod (docker-compose build)
# install service
# (port mapping in Dockerfile)
# (letsencrypt cert check)
# enable service
#
# NOTE: This is almost identical to
# pod-charlesreid1, except for a few
# different sections. We could have
# made everything shared, but f--k it
# this has dragged on long enough.
#
###########################
# #####################################
# CLONE POD-WEBHOOKS
# Check if we already cloned it
- name: Check if pod-webhooks repo is cloned
stat:
path: "{{ webhooks_install_dir }}"
register: pod_webhooks_clone_check
tags:
- pod-webhooks
# Clone it
- name: Clone pod-webhooks
become: yes
become_user: "{{ username }}"
git:
repo: 'https://github.com/charlesreid1-docker/pod-webhooks.git'
dest: "{{ webhooks_install_dir }}"
recursive: yes
when:
- "not pod_webhooks_clone_check.stat.exists"
tags:
- pod-webhooks
# Pull it
- name: Pull pod-webhooks
become: yes
become_user: "{{ username }}"
command: "git pull"
args:
chdir: "{{ webhooks_install_dir }}"
when:
- "pod_webhooks_clone_check.stat.exists"
tags:
- pod-webhooks
# Pull submodules
- name: Pull pod-webhooks submodules
become: yes
become_user: "{{ username }}"
command: "git submodule update --remote"
args:
chdir: "{{ webhooks_install_dir }}"
when:
- "pod_webhooks_clone_check.stat.exists"
tags:
- pod-webhooks
# #####################################
# BUILD DOCKER-COMPOSE FILE FROM TEMPLATE
#
- name: Fetch the docker-compose template from the remote machine
run_once: true
fetch:
src: "{{ webhooks_install_dir }}/docker-compose.yml.j2"
dest: "/tmp/pod-webhooks-docker-compose.yml.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-webhooks
- pod-webhooks-docker
- name: Install the docker-compose file
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/pod-webhooks-docker-compose.yml.j2"
dest: "{{ webhooks_install_dir }}/docker-compose.yml"
mode: 0640
force: yes
tags:
- pod-webhooks
- pod-webhooks-docker
# #####################################
# SET UP /WWW DIRECTORY
#
# Create /www directory
# for subdomains content
- name: Create the /www directory
become: yes
file:
path: "/www"
state: directory
recurse: yes
owner: "{{ username }}"
group: "{{ username }}"
tags:
- pod-webhooks
- pod-webhooks-content
# Template scripts to populate /www
# with subdomain pages is done in the
# rules below...
# #####################################
# SUBDOMAIN PAGES SETUP (ALL)
#
# Initializes the /www folder structure for
# /www/pages.*
# /www/hooks.*
# /www/bots.*
#
# This is done with template python scripts
#
# /www/<subdomain>.charlesreid1.com/
# <subdomain>.charlesreid1.com-src/
# git/
# htdocs/
- name: "Fetch the initial subdomain clone commands script template"
fetch:
src: "{{ webhooks_install_dir }}/scripts/subdomains_init_setup.py.j2"
dest: "/tmp/subdomains_init_setup.py.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-webhooks
- pod-webhooks-content
- name: "Install the initial subdomain clone commands script"
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/subdomains_init_setup.py.j2"
dest: "{{ webhooks_install_dir }}/scripts/subdomains_init_setup.py"
mode: 0755
force: yes
tags:
- pod-webhooks
- pod-webhooks-content
- name: Run initial clone commands to set up bots/pages/hooks subdomains at /www/
command: "python {{ webhooks_install_dir }}/scripts/subdomains_init_setup.py"
tags:
- pod-webhooks
- pod-webhooks-content
# #####################################
# PAGES SETUP
#
# Initializes the contents of /www/pages.*/*
- name: Fetch the initial pages script
fetch:
src: "{{ webhooks_install_dir }}/scripts/pages_init_setup.py.j2"
dest: "/tmp/pages_init_setup.py.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-webhooks
- pod-webhooks-content
- name: Install the pages init setup script
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/pages_init_setup.py.j2"
dest: "{{ webhooks_install_dir }}/scripts/pages_init_setup.py"
mode: 0755
force: yes
tags:
- pod-webhooks
- pod-webhooks-content
- name: Run initial clone commands to set up pages at /www/pages.charlesreid1.com
command: "python {{ webhooks_install_dir }}/scripts/pages_init_setup.py"
tags:
- pod-webhooks
- pod-webhooks-content
# #####################################
# DOCKER/DOCKER COMPOSE
# The docker role, in the base playbook,
# will install docker-compose, but we want
# to double check that the executable exists
- name: Check that docker compose executable is available
stat:
path: "/usr/local/bin/docker-compose"
register: webhooks_register_docker_compose
tags:
- pod-webhooks
- pod-webhooks-docker
# Also make sure the docker daemon is running
- name: Enable docker service
become: yes
service:
name: docker
enabled: yes
state: restarted
tags:
- pod-webhooks
- pod-webhooks-docker
- pod-webhooks-services
# #####################################
# NGIX CONFIG PREP
#
# prepare the config files for the
# subdomains nginx server:
# - copy templates from remote machine
# - clean conf.d directory
# - copy rendered templates to remote machine
- name: Clean d-nginx-subdomains conf.d directory
become: yes
become_user: "{{ username }}"
command: "python {{ webhooks_install_dir }}/d-nginx-subdomains/scripts/clean_config.py"
tags:
- pod-webhooks
# Install the d-nginx-subdomains configuration templates
#
- name: Fetch d-nginx-subdomains configuration templates from remote machine
run_once: true
fetch:
src: "{{ webhooks_install_dir }}/d-nginx-subdomains/conf.d_templates/http.subdomains.conf.j2"
dest: "/tmp/http.subdomains.conf.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-webhooks
- name: Install the d-nginx-subdomains configuration templates
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/http.subdomains.conf.j2"
dest: "{{ webhooks_install_dir }}/d-nginx-subdomains/conf.d/http.subdomains.conf"
force: yes
tags:
- pod-webhooks
# #####################################
# CAPTAIN HOOK SETUP
- name: Fetch the captain hook config file template
fetch:
src: "{{ webhooks_install_dir }}/b-captain-hook/config.json.j2"
dest: "/tmp/captain_hook_config.json.j2"
flat: yes
fail_on_missing: yes
tags:
- captain-hook
- name: Install the captain hook config file
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/captain_hook_config.json.j2"
dest: "{{ webhooks_install_dir }}/b-captain-hook/config.json"
mode: 0755
force: yes
tags:
- captain-hook
# #####################################
# CAPTAIN HOOK CANARY SCRIPT SETUP
#
# Start with the canary script first.
#
# The whole pod has to be built and the
# pod startup service installed
# before the canary service can be
# installed.
# Script 1 - canary script itself
# Use the template provided to make it
#
- name: Fetch the captain hook canary script template from the remote machine
run_once: true
fetch:
src: "{{ webhooks_install_dir }}/scripts/captain_hook_canary.sh.j2"
dest: "/tmp/captain_hook_canary.sh.j2"
flat: yes
fail_on_missing: yes
tags:
- captain-hook
# Install the captain hook canary script
#
- name: Install the captain hook canary script
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/captain_hook_canary.sh.j2"
dest: "{{ webhooks_install_dir }}/scripts/captain_hook_canary.sh"
mode: 0755
force: yes
tags:
- captain-hook
# Script 2 - pull host script
# Do it all again for the pull host script
# Use the template provided to make it
#
- name: Fetch the captain hook pull host script template from the remote machine
run_once: true
fetch:
src: "{{ webhooks_install_dir }}/scripts/captain_hook_pull_host.py.j2"
dest: "/tmp/captain_hook_pull_host.py.j2"
flat: yes
fail_on_missing: yes
tags:
- captain-hook
# Install the captain hook pull host script
- name: Install the captain hook pull host script
become: yes
become_user: "{{ username }}"
template:
src: "/tmp/captain_hook_pull_host.py.j2"
dest: "{{ webhooks_install_dir }}/scripts/captain_hook_pull_host.py"
mode: 0755
force: yes
tags:
- captain-hook
# #####################################
# CONSTRUCT THE POD
#
# This task is very time-consuming.
- name: Build pod-webhooks from scratch
become: yes
become_user: "{{ username }}"
command: "/usr/local/bin/docker-compose build --no-cache"
args:
chdir: "{{ webhooks_install_dir }}"
when:
- "webhooks_register_docker_compose.stat.exists"
# #####################################
# INSTALL STARTUP SERVICE
#
# Check if the webhooks docker pod service
# is installed. If not, install it.
### # Just kidding - don't bother.
### # Always reinstall the startup service.
### #
### - name: Check if pod-webhooks service is installed
### stat:
### path: "/etc/systemd/system/pod-webhooks.service"
### register: pod_webhooks_service_check
### tags:
### - pod-webhooks-services
# Fetch the pod-webhooks service template
#
- name: Fetch the pod-webhooks template from remote host machine
run_once: true
fetch:
src: "{{ webhooks_install_dir }}/scripts/pod-webhooks.service.j2"
dest: "/tmp/pod-webhooks.service.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-webhooks-services
# Apply the template and install it for goodness sake
#
- name: Install pod-webhooks service
become: yes
template:
src: "/tmp/pod-webhooks.service.j2"
dest: "/etc/systemd/system/pod-webhooks.service"
mode: 0774
tags:
- pod-webhooks-services
# Now enable the pod-webhooks service.
# Don't worry about SSL cert checks, not our problem.
- name: Enable pod-webhooks service
become: yes
service:
name: pod-webhooks
enabled: yes
state: restarted
when:
- "webhooks_register_docker_compose.stat.executable"
tags:
- pod-webhooks-services
# #####################################
# CAPTAIN HOOK CANARY SERVICE SETUP
### # Begin by checking to see if installed
### # Just kidding - always reinstall the canary service from the repo template
### #
### - name: Check if the captain hook canary service is installed
### stat:
### path: "/etc/systemd/system/captain-hook-canary.service"
### register: canary_service_check
### tags:
### - pod-webhooks-services
### - captain-hook
# Fetch the captain hook canary startup service template onto local computer
# #
- name: Fetch the captain hook canary service template file from the remote machine
run_once: true
fetch:
src: "{{ webhooks_install_dir }}/scripts/captain-hook-canary.service.j2"
dest: "/tmp/captain-hook-canary.service.j2"
flat: yes
fail_on_missing: yes
tags:
- pod-webhooks-services
- captain-hook
# Apply the captain hook canary startup service template
#
- name: Install the captain hook canary startup service
become: yes
template:
src: "/tmp/captain-hook-canary.service.j2"
dest: "/etc/systemd/system/captain-hook-canary.service"
mode: 0774
force: yes
tags:
- pod-webhooks-services
- captain-hook
# Now enable the captain hook canary startup service.
#
- name: Enable the captain hook canary startup service
become: yes
service:
name: captain-hook-canary
enabled: yes
state: restarted
tags:
- pod-webhooks-services
- captain-hook

View File

@@ -1,27 +0,0 @@
# Service script for starting up the
# captain hook canary service.
#
# The main purpose of this service is to
# allow the captain hook webhook container
# to send a signal to the host machine
# (by touching a file in a shared directory).
#
# Each repository has its own webhooks,
# and each repository can create their own
# canary files and have custom actions to
# deal with them.
[Unit]
Description=captain hook canary script
Requires=pod-webhooks.service
After=pod-webhooks.service
[Service]
Restart=always
ExecStart=/home/charles/blackbeard_scripts/captain_hook_canary.sh
ExecStop=/usr/bin/pgrep -f captain_hook_canary | /usr/bin/xargs /bin/kill
[Install]
WantedBy=default.target

View File

@@ -1,16 +0,0 @@
# Service script for starting up the webhooks docker pod
# # (hooks subdomain, pages subdomain)
[Unit]
Description=webhooks and subdomains docker pod
Requires=docker.service
After=docker.service
[Service]
Restart=always
ExecStart=/usr/local/bin/docker-compose -f /home/charles/codes/docker/pod-webhooks/docker-compose.yml up
ExecStop=/usr/local/bin/docker-compose -f /home/charles/codes/docker/pod-webhooks/docker-compose.yml down
[Install]
WantedBy=default.target

1
roles/postfix Submodule

Submodule roles/postfix added at d0529a70c1

View File

@@ -1,7 +1,8 @@
---
# defaults file for pyenv
pyenv_root: "/home/{{ nonroot_user }}/.pyenv"
username: "{{ nonroot_user }}"
pyenv_root: "/home/{{ username }}/.pyenv"
pyenv_versions:
- miniconda3-4.3.30

View File

@@ -3,14 +3,14 @@
- name: Install pyenv
become: yes
become_user: "{{ nonroot_user }}"
become_user: "{{ username }}"
git:
repo: https://github.com/pyenv/pyenv.git
dest: "{{ pyenv_root }}"
- name: Install pyenv plugins
become: yes
become_user: "{{ nonroot_user }}"
become_user: "{{ username }}"
become_flags: logon_type=interactive logon_flags=with_profile
git:
repo: "https://github.com/pyenv/{{ item }}.git"

View File

@@ -4,7 +4,7 @@
- name: Install python versions
become: yes
become_user: "{{ nonroot_user }}"
become_user: "{{ username }}"
command: "{{ pyenv_root }}/bin/pyenv install -s {{ item }}"
args:
creates: "{{ pyenv_root }}/versions/{{ item }}"
@@ -17,7 +17,7 @@
- name: Set global python version
become: yes
become_user: "{{ nonroot_user }}"
become_user: "{{ username }}"
lineinfile:
path: "{{ pyenv_root }}/version"
regexp: "^{{ pyenv_global_version }}"

View File

@@ -7,3 +7,4 @@ username: "{{ nonroot_user }}"
# link it with this email.
ssh_key_email: ""
ssh_port: 22

View File

@@ -0,0 +1,2 @@
Host *
StrictHostKeyChecking accept-new

View File

@@ -0,0 +1,6 @@
---
- name: restart ssh
service:
name: "ssh"
state: "restarted"

View File

@@ -12,6 +12,7 @@
path: /root/.ssh
state: directory
tags:
- ssh
- root-ssh
- name: Check if a root user SSH key already exists
@@ -20,6 +21,7 @@
path: "/root/.ssh/id_rsa"
register: root_key_check
tags:
- ssh
- root-ssh
- name: "Generate SSH keys for root user ({{ ssh_key_email }})"
@@ -28,6 +30,7 @@
when:
- "not root_key_check.stat.exists"
tags:
- ssh
- root-ssh
- name: Set permissions on root .ssh directory
@@ -40,22 +43,7 @@
when:
- "not root_key_check.stat.exists"
tags:
- root-ssh
###############################
# root: copy authorized keys
- name: Copy (overwrite) the authorized keys file into the root .ssh directory
become: yes
copy:
src: authorized_keys
dest: /root/.ssh/authorized_keys
mode: 0600
force: yes
tags:
- ssh
- root-ssh
@@ -72,6 +60,7 @@
owner: "{{ username }}"
group: "{{ username }}"
tags:
- ssh
- nonroot-ssh
- name: Check if a nonroot user SSH key already exists
@@ -81,6 +70,7 @@
path: "/home/{{ username }}/.ssh/id_rsa"
register: nonroot_key_check
tags:
- ssh
- nonroot-ssh
- name: "Generate SSH keys for nonroot user {{ username }} ({{ ssh_key_email }})"
@@ -90,6 +80,7 @@
when:
- "not nonroot_key_check.stat.exists"
tags:
- ssh
- nonroot-ssh
- name: Set permissions on nonroot .ssh directory
@@ -103,6 +94,7 @@
owner: "{{ username }}"
group: "{{ username }}"
tags:
- ssh
- nonroot-ssh
@@ -121,6 +113,74 @@
owner: "{{ username }}"
group: "{{ username }}"
tags:
- ssh
- nonroot-ssh
##################################
# nonroot: copy ssh configuration
- name: Copy the ssh configuration to the nonroot user .ssh directory
become: yes
become_user: "{{ username }}"
copy:
src: config
dest: "/home/{{ username }}/.ssh/config"
mode: 0600
force: yes
owner: "{{ username }}"
group: "{{ username }}"
tags:
- ssh
- nonroot-ssh
##################################
# system ssh configuration
- name: Ensure SSH daemon is running.
service:
name: "ssh"
state: "started"
- name: Update SSH configuration to be more secure.
lineinfile:
dest: "/etc/ssh/sshd_config"
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
state: present
validate: 'sshd -T -f %s'
mode: 0644
with_items:
- regexp: "^PasswordAuthentication"
line: "PasswordAuthentication no"
- regexp: "^PermitRootLogin"
line: "PermitRootLogin no"
- regexp: "^Port"
line: "Port {{ ssh_port }}"
- regexp: "^UseDNS"
line: "UseDNS no"
- regexp: "^PermitEmptyPasswords"
line: "PermitEmptyPasswords no"
- regexp: "^ChallengeResponseAuthentication"
line: "ChallengeResponseAuthentication no"
- regexp: "^GSSAPIAuthentication"
line: "GSSAPIAuthentication no"
- regexp: "^X11Forwarding"
line: "X11Forwarding no"
notify: restart ssh
tags:
- ssh
- root-ssh
##
## - name: Automatically accept new SSH keys
## become: yes
## become_user: "{{ username }}"
## command: "echo 'StrictHostKeyChecking=accept-new' > ~/.ssh/config"
## tags:
## - nonroot-ssh

View File

@@ -1,7 +1,7 @@
Role Name
=========
A brief description of the role goes here.
**uptime** - installs <https://github.com/charlesreid1-bots/uptime>
Requirements
------------

View File

@@ -0,0 +1,15 @@
---
# defaults file for uptime
username: "{{ nonroot_user }"
# uptime details
uptime_user: "{{ charlesreid1_uptime_user }}"
uptime_path: "/home/{{ username }}/uptime"
uptime_gmail_email: "{{ charlesreid1_uptime_gmail_email }}"
uptime_gmail_password: "{{ charlesreid1_uptime_gmail_password }}"
uptime_recipient_name: "{{ charlesreid1_uptime_recipient_name }}"
uptime_recipient_email: "{{ charlesreid1_uptime_recipient_email }}"
uptime_slack_apikey: "{{ charlesreid1_uptime_slack_apikey }}"
uptime_slack_channel: "{{ charlesreid1_uptime_slack_channel }}"

View File

@@ -0,0 +1,11 @@
---
# tasks file for installing uptime and associated service
- include: uptime_clone.yml
- include: uptime_config.yml
- include: uptime_virtualenv.yml
- include: uptime_service.yml

View File

@@ -0,0 +1,41 @@
---
# Clone the uptime repo/script/package
# Check if we already cloned the repo
- name: Check if charlesreid1-bots/uptime repo is already cloned
stat:
path: "{{ uptime_path }}"
register: uptime_clone_check
tags:
- uptime
- uptime-clone
# Clone it
- name: Clone charlesreid1-bots/uptime
become: yes
become_user: "{{ username }}"
git:
repo: 'https://github.com/charlesreid1-bots/uptime'
dest: "{{ uptime_path }}"
recursive: yes
when:
- "not uptime_clone_check.stat.exists"
tags:
- uptime
- uptime-clone
# Pull it
- name: Pull charlesreid1-bots/uptime
become: yes
become_user: "{{ username }}"
command: "git pull"
args:
chdir: "{{ uptime_path }}"
when:
- "uptime_clone_check.stat.exists"
tags:
- uptime
- uptime-clone

Some files were not shown because too many files have changed in this diff Show More