Compare commits
134 Commits
service-up
...
claude-pla
| Author | SHA1 | Date | |
|---|---|---|---|
| a907947e78 | |||
| b44d535a93 | |||
| 98d9368d65 | |||
| b475f2142e | |||
| 647e24013e | |||
| dba56b4d43 | |||
| 134450d922 | |||
| 2772e3447c | |||
| c6f0183d4b | |||
| 1b3d1776b4 | |||
| 1f47029098 | |||
| 0a89fe68c8 | |||
| d0b8e83ffc | |||
| 89d5849708 | |||
| 13a3a1cb5e | |||
| 892eddcbbb | |||
| c95fcfaaf2 | |||
| 0e09187b3e | |||
| 8afcd3073b | |||
| efb3fa0140 | |||
| 3048c35647 | |||
| 86997b5a55 | |||
| fadee2ea91 | |||
| 87582b77b2 | |||
| aaa226d82a | |||
| 5f26ebac25 | |||
| 940e21f507 | |||
| 99ab12a2ba | |||
| 6c49dd3171 | |||
| 99616d5de5 | |||
| 83f898192a | |||
| 5eb9ee5c3c | |||
| df23627e9a | |||
| 76dc820b2d | |||
| 6b2b21b668 | |||
| bcb04257fa | |||
| 0cad1e0398 | |||
| 14d70a919d | |||
| 3aba9729e6 | |||
| eb840384d1 | |||
| 5bf613cd56 | |||
| ccfed3f3fc | |||
| 194e619537 | |||
| a0f9548fcf | |||
| 418315150a | |||
| ebb304d374 | |||
| 8580c2c1f0 | |||
| a3f460113a | |||
| e94f911d99 | |||
| f7446c5a2d | |||
| 6d1fa940a7 | |||
| cfac7c69dc | |||
| 3287d57554 | |||
| d347024939 | |||
| 8e4f86c8c6 | |||
| 5b855a575a | |||
| 4248f86c64 | |||
| f36011d4cc | |||
| 4953dfb8f3 | |||
| d003935769 | |||
| 58e795bd98 | |||
| 0709e883ea | |||
| 8965515215 | |||
| 69523ba027 | |||
| 2a4ed33024 | |||
| f880c44b79 | |||
| 5cac0fa869 | |||
| 303ebf8ea3 | |||
| 4d638c456e | |||
| 72fc465d1d | |||
| 2f579f4cfa | |||
| 1bc4bb4902 | |||
| d91b7dc735 | |||
| acb2f57176 | |||
| 3482004df0 | |||
| 4ed1b479ef | |||
| 5a931c2e38 | |||
| 17da345041 | |||
| 5e9be9e6c8 | |||
| 0148fe3e55 | |||
| a144d6070b | |||
| 989036ac21 | |||
| 523ed50647 | |||
| 03f81f4a25 | |||
| 002ad20d7d | |||
| 2cb6a39990 | |||
| 920ff3839e | |||
| d3dae75d38 | |||
| 4004ba6ccb | |||
| cf982ee2c6 | |||
| efd9487953 | |||
| b2552b6345 | |||
| 1a8f699ab4 | |||
| 5e3ab1768c | |||
| 291ff2d28a | |||
| 229975883c | |||
| af7ef822f0 | |||
| cc3688a982 | |||
| e080cda745 | |||
| 45c0f1390f | |||
| dacef1ac09 | |||
| 03a8456a2a | |||
| d1d749d8e4 | |||
| 74adabc43a | |||
| 3566305577 | |||
| 7442b2ee87 | |||
| 9aa49166a6 | |||
| f06ac24ecb | |||
| b796cc9756 | |||
| 25063ed251 | |||
| 72a47d71f2 | |||
| dba09976fb | |||
| 7a3c76b9f9 | |||
| 18fd6038df | |||
| 18814b6a1d | |||
| fc35d94b3c | |||
| 3604bc1378 | |||
| f0f65db9e3 | |||
| e5686d4d9a | |||
| 30c4a24b8d | |||
| 904122db17 | |||
| 8760edf0c3 | |||
| b4650771bc | |||
| b8182774a4 | |||
| bb3b6c027a | |||
| 1d18b5e71c | |||
| 858cb6c3c8 | |||
| 0a5f9f99ac | |||
| 2ac521e1c9 | |||
| ffc4f1d0c0 | |||
| 7246b0845c | |||
| 67acb4a32b | |||
| 15d4bcecc7 | |||
| 9c92f3fd75 |
2
.gitmodules
vendored
2
.gitmodules
vendored
@@ -1,3 +1,3 @@
|
|||||||
[submodule "mkdocs-material"]
|
[submodule "mkdocs-material"]
|
||||||
path = mkdocs-material
|
path = mkdocs-material
|
||||||
url = git@github.com:charlesreid1-docker/mkdocs-material.git
|
url = https://github.com/charlesreid1/mkdocs-material
|
||||||
|
|||||||
53
Makefile
53
Makefile
@@ -63,13 +63,14 @@ help:
|
|||||||
templates:
|
templates:
|
||||||
@find * -name "*.service.j2" | xargs -I '{}' chmod 644 {}
|
@find * -name "*.service.j2" | xargs -I '{}' chmod 644 {}
|
||||||
@find * -name "*.timer.j2" | xargs -I '{}' chmod 644 {}
|
@find * -name "*.timer.j2" | xargs -I '{}' chmod 644 {}
|
||||||
python3 $(POD_CHARLESREID1_DIR)/scripts/apply_templates.py
|
/home/charles/.pyenv/shims/python3 $(POD_CHARLESREID1_DIR)/scripts/apply_templates.py
|
||||||
|
|
||||||
list-templates:
|
list-templates:
|
||||||
@find * -name "*.j2"
|
@find * -name "*.j2"
|
||||||
|
|
||||||
clean-templates:
|
clean-templates:
|
||||||
python3 $(POD_CHARLESREID1_DIR)/scripts/clean_templates.py
|
# sudo is required because bind-mounted gitea files end up owned by root. stupid docker.
|
||||||
|
sudo -E /home/charles/.pyenv/shims/python3 $(POD_CHARLESREID1_DIR)/scripts/clean_templates.py
|
||||||
|
|
||||||
# Backups
|
# Backups
|
||||||
|
|
||||||
@@ -97,31 +98,42 @@ mw-fix-skins:
|
|||||||
# /www Dir
|
# /www Dir
|
||||||
|
|
||||||
clone-www:
|
clone-www:
|
||||||
python3 $(POD_CHARLESREID1_DIR)/scripts/git_clone_www.py
|
/home/charles/.pyenv/shims/python3 $(POD_CHARLESREID1_DIR)/scripts/git_clone_www.py
|
||||||
|
|
||||||
pull-www:
|
pull-www:
|
||||||
python3 $(POD_CHARLESREID1_DIR)/scripts/git_pull_www.py
|
/home/charles/.pyenv/shims/python3 $(POD_CHARLESREID1_DIR)/scripts/git_pull_www.py
|
||||||
|
|
||||||
install:
|
install:
|
||||||
ifeq ($(shell which systemctl),)
|
ifeq ($(shell which systemctl),)
|
||||||
$(error Please run this make command on a system with systemctl installed)
|
$(error Please run this make command on a system with systemctl installed)
|
||||||
endif
|
endif
|
||||||
|
@/home/charles/.pyenv/shims/python3 -c 'import botocore' || (echo "Please install the botocore library using python3 or pip3 binary"; exit 1)
|
||||||
|
@/home/charles/.pyenv/shims/python3 -c 'import boto3' || (echo "Please install the boto3 library using python3 or pip3 binary"; exit 1)
|
||||||
|
|
||||||
sudo cp $(POD_CHARLESREID1_DIR)/scripts/pod-charlesreid1.service /etc/systemd/system/pod-charlesreid1.service
|
sudo cp $(POD_CHARLESREID1_DIR)/scripts/pod-charlesreid1.service /etc/systemd/system/pod-charlesreid1.service
|
||||||
|
|
||||||
|
sudo cp $(POD_CHARLESREID1_DIR)/scripts/backups/pod-charlesreid1-backups-aws.{service,timer} /etc/systemd/system/.
|
||||||
|
sudo cp $(POD_CHARLESREID1_DIR)/scripts/backups/pod-charlesreid1-backups-cleanolderthan.{service,timer} /etc/systemd/system/.
|
||||||
|
sudo cp $(POD_CHARLESREID1_DIR)/scripts/backups/pod-charlesreid1-backups-gitea.{service,timer} /etc/systemd/system/.
|
||||||
sudo cp $(POD_CHARLESREID1_DIR)/scripts/backups/pod-charlesreid1-backups-wikidb.{service,timer} /etc/systemd/system/.
|
sudo cp $(POD_CHARLESREID1_DIR)/scripts/backups/pod-charlesreid1-backups-wikidb.{service,timer} /etc/systemd/system/.
|
||||||
sudo cp $(POD_CHARLESREID1_DIR)/scripts/backups/pod-charlesreid1-backups-wikifiles.{service,timer} /etc/systemd/system/.
|
sudo cp $(POD_CHARLESREID1_DIR)/scripts/backups/pod-charlesreid1-backups-wikifiles.{service,timer} /etc/systemd/system/.
|
||||||
sudo cp $(POD_CHARLESREID1_DIR)/scripts/backups/pod-charlesreid1-backups-gitea.{service,timer} /etc/systemd/system/.
|
|
||||||
sudo cp $(POD_CHARLESREID1_DIR)/scripts/backups/pod-charlesreid1-backups-aws.{service,timer} /etc/systemd/system/.
|
|
||||||
sudo cp $(POD_CHARLESREID1_DIR)/scripts/backups/canary/pod-charlesreid1-canary.{service,timer} /etc/systemd/system/.
|
sudo cp $(POD_CHARLESREID1_DIR)/scripts/backups/canary/pod-charlesreid1-canary.{service,timer} /etc/systemd/system/.
|
||||||
sudo cp $(POD_CHARLESREID1_DIR)/scripts/certbot/pod-charlesreid1-certbot.{service,timer} /etc/systemd/system/.
|
sudo cp $(POD_CHARLESREID1_DIR)/scripts/certbot/pod-charlesreid1-certbot.{service,timer} /etc/systemd/system/.
|
||||||
|
|
||||||
|
sudo cp $(POD_CHARLESREID1_DIR)/scripts/backups/10-pod-charlesreid1-rsyslog.conf /etc/rsyslog.d/.
|
||||||
|
|
||||||
sudo chmod 664 /etc/systemd/system/pod-charlesreid1*
|
sudo chmod 664 /etc/systemd/system/pod-charlesreid1*
|
||||||
sudo systemctl daemon-reload
|
sudo systemctl daemon-reload
|
||||||
|
|
||||||
|
sudo systemctl restart rsyslog
|
||||||
|
|
||||||
sudo systemctl enable pod-charlesreid1
|
sudo systemctl enable pod-charlesreid1
|
||||||
sudo systemctl enable pod-charlesreid1-backups-wikidb.timer
|
sudo systemctl enable pod-charlesreid1-backups-wikidb.timer
|
||||||
sudo systemctl enable pod-charlesreid1-backups-wikifiles.timer
|
sudo systemctl enable pod-charlesreid1-backups-wikifiles.timer
|
||||||
sudo systemctl enable pod-charlesreid1-backups-gitea.timer
|
sudo systemctl enable pod-charlesreid1-backups-gitea.timer
|
||||||
sudo systemctl enable pod-charlesreid1-backups-aws.timer
|
sudo systemctl enable pod-charlesreid1-backups-aws.timer
|
||||||
|
sudo systemctl enable pod-charlesreid1-backups-cleanolderthan.timer
|
||||||
sudo systemctl enable pod-charlesreid1-canary.timer
|
sudo systemctl enable pod-charlesreid1-canary.timer
|
||||||
sudo systemctl enable pod-charlesreid1-certbot.timer
|
sudo systemctl enable pod-charlesreid1-certbot.timer
|
||||||
|
|
||||||
@@ -129,37 +141,54 @@ endif
|
|||||||
sudo systemctl start pod-charlesreid1-backups-wikifiles.timer
|
sudo systemctl start pod-charlesreid1-backups-wikifiles.timer
|
||||||
sudo systemctl start pod-charlesreid1-backups-gitea.timer
|
sudo systemctl start pod-charlesreid1-backups-gitea.timer
|
||||||
sudo systemctl start pod-charlesreid1-backups-aws.timer
|
sudo systemctl start pod-charlesreid1-backups-aws.timer
|
||||||
|
sudo systemctl start pod-charlesreid1-backups-cleanolderthan.timer
|
||||||
sudo systemctl start pod-charlesreid1-canary.timer
|
sudo systemctl start pod-charlesreid1-canary.timer
|
||||||
sudo systemctl start pod-charlesreid1-certbot.timer
|
sudo systemctl start pod-charlesreid1-certbot.timer
|
||||||
|
|
||||||
|
sudo chown syslog:syslog /var/log/pod-charlesreid1-backups-aws.service.log
|
||||||
|
sudo chown syslog:syslog /var/log/pod-charlesreid1-backups-cleanolderthan.service.log
|
||||||
|
sudo chown syslog:syslog /var/log/pod-charlesreid1-backups-gitea.service.log
|
||||||
|
sudo chown syslog:syslog /var/log/pod-charlesreid1-backups-wikidb.service.log
|
||||||
|
sudo chown syslog:syslog /var/log/pod-charlesreid1-backups-wikifiles.service.log
|
||||||
|
sudo chown syslog:syslog /var/log/pod-charlesreid1-canary.service.log
|
||||||
|
|
||||||
uninstall:
|
uninstall:
|
||||||
ifeq ($(shell which systemctl),)
|
ifeq ($(shell which systemctl),)
|
||||||
$(error Please run this make command on a system with systemctl installed)
|
$(error Please run this make command on a system with systemctl installed)
|
||||||
endif
|
endif
|
||||||
-sudo systemctl disable pod-charlesreid1
|
-sudo systemctl disable pod-charlesreid1
|
||||||
|
-sudo systemctl disable pod-charlesreid1-backups-aws.timer
|
||||||
|
-sudo systemctl disable pod-charlesreid1-backups-cleanolderthan.timer
|
||||||
|
-sudo systemctl disable pod-charlesreid1-backups-gitea.timer
|
||||||
-sudo systemctl disable pod-charlesreid1-backups-wikidb.timer
|
-sudo systemctl disable pod-charlesreid1-backups-wikidb.timer
|
||||||
-sudo systemctl disable pod-charlesreid1-backups-wikifiles.timer
|
-sudo systemctl disable pod-charlesreid1-backups-wikifiles.timer
|
||||||
-sudo systemctl disable pod-charlesreid1-backups-gitea.timer
|
|
||||||
-sudo systemctl disable pod-charlesreid1-backups-aws.timer
|
|
||||||
-sudo systemctl disable pod-charlesreid1-canary.timer
|
-sudo systemctl disable pod-charlesreid1-canary.timer
|
||||||
-sudo systemctl disable pod-charlesreid1-certbot.timer
|
-sudo systemctl disable pod-charlesreid1-certbot.timer
|
||||||
|
|
||||||
# Leave the pod running!
|
# Leave the pod running!
|
||||||
# -sudo systemctl stop pod-charlesreid1
|
# -sudo systemctl stop pod-charlesreid1
|
||||||
|
|
||||||
|
-sudo systemctl stop pod-charlesreid1-backups-aws.timer
|
||||||
|
-sudo systemctl stop pod-charlesreid1-backups-cleanolderthan.timer
|
||||||
|
-sudo systemctl stop pod-charlesreid1-backups-gitea.timer
|
||||||
-sudo systemctl stop pod-charlesreid1-backups-wikidb.timer
|
-sudo systemctl stop pod-charlesreid1-backups-wikidb.timer
|
||||||
-sudo systemctl stop pod-charlesreid1-backups-wikifiles.timer
|
-sudo systemctl stop pod-charlesreid1-backups-wikifiles.timer
|
||||||
-sudo systemctl stop pod-charlesreid1-backups-gitea.timer
|
|
||||||
-sudo systemctl stop pod-charlesreid1-backups-aws.timer
|
|
||||||
-sudo systemctl stop pod-charlesreid1-canary.timer
|
-sudo systemctl stop pod-charlesreid1-canary.timer
|
||||||
-sudo systemctl stop pod-charlesreid1-certbot.timer
|
-sudo systemctl stop pod-charlesreid1-certbot.timer
|
||||||
|
|
||||||
-sudo rm -f /etc/systemd/system/pod-charlesreid1.service
|
-sudo rm -f /etc/systemd/system/pod-charlesreid1.service
|
||||||
|
|
||||||
|
-sudo rm -f /etc/systemd/system/pod-charlesreid1-backups-aws.{service,timer}
|
||||||
|
-sudo rm -f /etc/systemd/system/pod-charlesreid1-backups-cleanolderthan.{service,timer}
|
||||||
|
-sudo rm -f /etc/systemd/system/pod-charlesreid1-backups-gitea.{service,timer}
|
||||||
-sudo rm -f /etc/systemd/system/pod-charlesreid1-backups-wikidb.{service,timer}
|
-sudo rm -f /etc/systemd/system/pod-charlesreid1-backups-wikidb.{service,timer}
|
||||||
-sudo rm -f /etc/systemd/system/pod-charlesreid1-backups-wikifiles.{service,timer}
|
-sudo rm -f /etc/systemd/system/pod-charlesreid1-backups-wikifiles.{service,timer}
|
||||||
-sudo rm -f /etc/systemd/system/pod-charlesreid1-backups-gitea.{service,timer}
|
|
||||||
-sudo rm -f /etc/systemd/system/pod-charlesreid1-backups-aws.{service,timer}
|
|
||||||
-sudo rm -f /etc/systemd/system/pod-charlesreid1-canary.{service,timer}
|
-sudo rm -f /etc/systemd/system/pod-charlesreid1-canary.{service,timer}
|
||||||
-sudo rm -f /etc/systemd/system/pod-charlesreid1-certbot.{service,timer}
|
-sudo rm -f /etc/systemd/system/pod-charlesreid1-certbot.{service,timer}
|
||||||
|
|
||||||
sudo systemctl daemon-reload
|
sudo systemctl daemon-reload
|
||||||
|
|
||||||
|
-sudo rm -f /etc/rsyslog.d/10-pod-charlesreid1-rsyslog.conf
|
||||||
|
-sudo systemctl restart rsyslog
|
||||||
|
|
||||||
.PHONY: help
|
.PHONY: help
|
||||||
|
|||||||
324
MediaWikiMySqlUpgradePlan.md
Normal file
324
MediaWikiMySqlUpgradePlan.md
Normal file
@@ -0,0 +1,324 @@
|
|||||||
|
# Upgrade Plan: MediaWiki 1.34 → 1.39+ and MySQL 5.7 → 8.0
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
MediaWiki 1.34 (EOL Nov 2021) and MySQL 5.7 (EOL Oct 2023) are both end-of-life and no longer receive security patches. The goal is to upgrade both with **minimal downtime** by running old and new versions side-by-side, testing the new stack, then switching over — with the ability to roll back instantly.
|
||||||
|
|
||||||
|
**Additional motivation:** The REST API v1 endpoint `/w/rest.php/v1/page/{title}/with_html` returns a 500 error ("Unable to fetch Parsoid HTML") because MW 1.34 does not bundle Parsoid. MW 1.39 bundles Parsoid in-process, which is required for this endpoint to work. This blocks tools (e.g., MediaWiki MCP) that rely on the REST API to fetch rendered HTML.
|
||||||
|
|
||||||
|
## Strategy: Blue-Green Deployment
|
||||||
|
|
||||||
|
Run the old stack ("blue") untouched while building and testing the new stack ("green") alongside it. Nginx acts as the switch — changing one `proxy_pass` line flips between old and new.
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─ stormy_mw (MW 1.34) ──── stormy_mysql (MySQL 5.7) ← BLUE (old)
|
||||||
|
nginx ── proxy_pass ──────┤
|
||||||
|
└─ stormy_mw_new (MW 1.39) ─ stormy_mysql_new (MySQL 8) ← GREEN (new)
|
||||||
|
```
|
||||||
|
|
||||||
|
Both stacks use **separate volumes** — the old data is never touched.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Decisions (Locked In)
|
||||||
|
|
||||||
|
- **Target:** MediaWiki 1.39 LTS (smallest jump from 1.34, can do 1.39→1.42 later)
|
||||||
|
- **Skin:** Patch Bootstrap2 to replace deprecated API calls for MW 1.39 compatibility
|
||||||
|
- **EmbedVideo:** Skip for now — don't include in green stack. Add back later if needed.
|
||||||
|
- **Extensions in green stack:** SyntaxHighlight_GeSHi, ParserFunctions, Math (all have REL1_39 branches)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 1: Preparation (no downtime)
|
||||||
|
|
||||||
|
All work happens on the VPS alongside the running production stack.
|
||||||
|
|
||||||
|
### 1.1 Full backup
|
||||||
|
```bash
|
||||||
|
# Database dump
|
||||||
|
make backups
|
||||||
|
# or manually:
|
||||||
|
./scripts/backups/wikidb_dump.sh
|
||||||
|
|
||||||
|
# Also back up the MW volume (uploaded images, cache)
|
||||||
|
docker run --rm -v stormy_mw_data:/data -v /tmp/mw_backup:/backup \
|
||||||
|
alpine tar czf /backup/mw_data_backup.tar.gz -C /data .
|
||||||
|
```
|
||||||
|
|
||||||
|
### 1.2 Create new Dockerfiles
|
||||||
|
|
||||||
|
**`d-mediawiki-new/Dockerfile`** — based on `mediawiki:1.39`
|
||||||
|
- Same structure as current Dockerfile
|
||||||
|
- Update extension COPY paths for new versions
|
||||||
|
- Update apt packages if needed (texlive, imagemagick still required)
|
||||||
|
- Apache config stays the same (port 8989)
|
||||||
|
|
||||||
|
**`d-mysql-new/Dockerfile`** — based on `mysql:8.0`
|
||||||
|
- Same structure as current
|
||||||
|
- Keep slow-log config (syntax compatible with 8.0)
|
||||||
|
|
||||||
|
### 1.3 Update extensions for target MW version
|
||||||
|
|
||||||
|
Create `scripts/mw/build_extensions_dir_139.sh` to clone REL1_39 branches:
|
||||||
|
|
||||||
|
| Extension | Current | New |
|
||||||
|
|-----------|---------|-----|
|
||||||
|
| SyntaxHighlight_GeSHi | REL1_34 | REL1_39 |
|
||||||
|
| ParserFunctions | REL1_34 | REL1_39 |
|
||||||
|
| Math | REL1_34 | REL1_39 |
|
||||||
|
| EmbedVideo | v2.7.3 | **Skipped** (add back later) |
|
||||||
|
|
||||||
|
### 1.4 Patch Bootstrap2 skin
|
||||||
|
|
||||||
|
Replace deprecated calls in `skins/Bootstrap2/`:
|
||||||
|
- `wfRunHooks('hook', ...)` → `Hooks::run('hook', ...)` (MW 1.35+)
|
||||||
|
- `wfMsg('key')` → `wfMessage('key')->text()`
|
||||||
|
- `wfEmptyMsg('key')` → `wfMessage('key')->isDisabled()`
|
||||||
|
|
||||||
|
### 1.5 Update LocalSettings.php.j2 (new copy for green stack)
|
||||||
|
|
||||||
|
Changes needed for MW 1.39:
|
||||||
|
- `require_once "$IP/extensions/Math/Math.php"` → `wfLoadExtension( 'Math' )`
|
||||||
|
- `$wgDBmysql5 = true;` — remove (deprecated in 1.39)
|
||||||
|
- Remove `wfLoadExtension( 'EmbedVideo' )` (skipped for now)
|
||||||
|
- Review other deprecated settings
|
||||||
|
- Add Parsoid configuration (bundled in MW 1.39, runs in-process — no separate container needed):
|
||||||
|
```php
|
||||||
|
# Parsoid (required for REST API with_html endpoint)
|
||||||
|
wfLoadExtension( 'Parsoid', "$IP/vendor/wikimedia/parsoid/extension.json" );
|
||||||
|
$wgParsoidSettings = [
|
||||||
|
'useSelser' => true,
|
||||||
|
];
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 2: Build Green Stack (no downtime)
|
||||||
|
|
||||||
|
### 2.1 Add new services to docker-compose.yml.j2
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
stormy_mysql_new:
|
||||||
|
restart: always
|
||||||
|
build: d-mysql-new
|
||||||
|
container_name: stormy_mysql_new
|
||||||
|
volumes:
|
||||||
|
- "stormy_mysql_new_data:/var/lib/mysql"
|
||||||
|
- "./d-mysql/conf.d:/etc/mysql/conf.d:ro"
|
||||||
|
environment:
|
||||||
|
- MYSQL_ROOT_PASSWORD={{ pod_charlesreid1_mysql_password }}
|
||||||
|
networks:
|
||||||
|
- backend_new
|
||||||
|
|
||||||
|
stormy_mw_new:
|
||||||
|
restart: always
|
||||||
|
build: d-mediawiki-new
|
||||||
|
container_name: stormy_mw_new
|
||||||
|
volumes:
|
||||||
|
- "stormy_mw_new_data:/var/www/html"
|
||||||
|
environment:
|
||||||
|
- MEDIAWIKI_SITE_SERVER=https://{{ pod_charlesreid1_server_name }}
|
||||||
|
- MEDIAWIKI_SECRETKEY={{ pod_charlesreid1_mediawiki_secretkey }}
|
||||||
|
- MEDIAWIKI_UPGRADEKEY={{ pod_charlesreid1_mediawiki_upgradekey }}
|
||||||
|
- MYSQL_HOST=stormy_mysql_new
|
||||||
|
- MYSQL_DATABASE=wikidb
|
||||||
|
- MYSQL_USER=root
|
||||||
|
- MYSQL_PASSWORD={{ pod_charlesreid1_mysql_password }}
|
||||||
|
depends_on:
|
||||||
|
- stormy_mysql_new
|
||||||
|
networks:
|
||||||
|
- frontend
|
||||||
|
- backend_new
|
||||||
|
```
|
||||||
|
|
||||||
|
Add `stormy_mysql_new_data`, `stormy_mw_new_data` to volumes, `backend_new` to networks.
|
||||||
|
|
||||||
|
### 2.2 Build and start green containers
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose build stormy_mysql_new stormy_mw_new
|
||||||
|
docker compose up -d stormy_mysql_new stormy_mw_new
|
||||||
|
```
|
||||||
|
|
||||||
|
Old containers keep running — no disruption.
|
||||||
|
|
||||||
|
### 2.3 Migrate database to new MySQL 8.0
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Dump from old MySQL 5.7
|
||||||
|
docker exec stormy_mysql sh -c \
|
||||||
|
'mysqldump wikidb --databases -uroot -p"$MYSQL_ROOT_PASSWORD" --default-character-set=binary' \
|
||||||
|
> /tmp/wikidb_for_upgrade.sql
|
||||||
|
|
||||||
|
# Load into new MySQL 8.0
|
||||||
|
docker exec -i stormy_mysql_new sh -c \
|
||||||
|
'mysql -uroot -p"$MYSQL_ROOT_PASSWORD"' \
|
||||||
|
< /tmp/wikidb_for_upgrade.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2.4 Migrate MW uploaded files
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Copy images/uploads from old volume to new volume
|
||||||
|
docker run --rm \
|
||||||
|
-v stormy_mw_data:/old:ro \
|
||||||
|
-v stormy_mw_new_data:/new \
|
||||||
|
alpine sh -c 'cp -a /old/images /new/images 2>/dev/null; echo done'
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2.5 Run MediaWiki database upgrade
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec stormy_mw_new php /var/www/html/maintenance/update.php --quick
|
||||||
|
```
|
||||||
|
|
||||||
|
This migrates the DB schema from MW 1.34 → 1.39 format.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 3: Test Green Stack (no downtime)
|
||||||
|
|
||||||
|
### 3.1 Direct browser test
|
||||||
|
|
||||||
|
Temporarily expose the new MW on a different port for testing:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
stormy_mw_new:
|
||||||
|
ports:
|
||||||
|
- "8990:8989" # temporary, for direct testing
|
||||||
|
```
|
||||||
|
|
||||||
|
Visit `http://<vps-ip>:8990` to verify MW loads, pages render, login works.
|
||||||
|
|
||||||
|
### 3.2 Test via nginx (brief switchover)
|
||||||
|
|
||||||
|
Edit nginx config to point `/wiki/` and `/w/` at `stormy_mw_new:8989`:
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
proxy_pass http://stormy_mw_new:8989/wiki/;
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec stormy_nginx nginx -s reload
|
||||||
|
```
|
||||||
|
|
||||||
|
Test the live site. If broken, switch back:
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
proxy_pass http://stormy_mw:8989/wiki/;
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec stormy_nginx nginx -s reload
|
||||||
|
```
|
||||||
|
|
||||||
|
**Switchover and rollback each take ~2 seconds** (nginx reload, no container restart).
|
||||||
|
|
||||||
|
### 3.3 Test checklist
|
||||||
|
|
||||||
|
- [ ] Wiki pages render correctly
|
||||||
|
- [ ] Bootstrap2 skin displays properly
|
||||||
|
- [ ] Login works
|
||||||
|
- [ ] Math equations render
|
||||||
|
- [ ] Syntax highlighting works
|
||||||
|
- [ ] Image uploads work
|
||||||
|
- [ ] File downloads work
|
||||||
|
- [ ] Edit pages (as sysop)
|
||||||
|
- [ ] Search works
|
||||||
|
- [ ] Special pages load
|
||||||
|
- [ ] REST API: `curl -s -o /dev/null -w '%{http_code}' https://wiki.golly.life/w/rest.php/v1/page/Main_Page/with_html` returns `200`
|
||||||
|
- [ ] REST API: response contains rendered HTML (not "Unable to fetch Parsoid HTML")
|
||||||
|
- [ ] MediaWiki MCP tool can fetch pages without 500 errors
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 4: Switchover (~2 seconds downtime)
|
||||||
|
|
||||||
|
Once testing passes:
|
||||||
|
|
||||||
|
### 4.1 Final data sync
|
||||||
|
|
||||||
|
Right before switchover, re-dump and re-load the database to capture any edits made since Phase 2:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Fresh dump
|
||||||
|
docker exec stormy_mysql sh -c \
|
||||||
|
'mysqldump wikidb --databases -uroot -p"$MYSQL_ROOT_PASSWORD" --default-character-set=binary' \
|
||||||
|
> /tmp/wikidb_final.sql
|
||||||
|
|
||||||
|
# Load into new
|
||||||
|
docker exec -i stormy_mysql_new sh -c \
|
||||||
|
'mysql -uroot -p"$MYSQL_ROOT_PASSWORD" -e "DROP DATABASE wikidb; CREATE DATABASE wikidb;"'
|
||||||
|
docker exec -i stormy_mysql_new sh -c \
|
||||||
|
'mysql -uroot -p"$MYSQL_ROOT_PASSWORD"' < /tmp/wikidb_final.sql
|
||||||
|
|
||||||
|
# Re-run schema upgrade
|
||||||
|
docker exec stormy_mw_new php /var/www/html/maintenance/update.php --quick
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4.2 Switch nginx
|
||||||
|
|
||||||
|
Update proxy_pass in nginx config, reload. **This is the only moment of downtime.**
|
||||||
|
|
||||||
|
### 4.3 Stop old containers (optional, can defer)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose stop stormy_mysql stormy_mw
|
||||||
|
```
|
||||||
|
|
||||||
|
Keep volumes intact for rollback.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 5: Rollback (if needed)
|
||||||
|
|
||||||
|
At any point after switchover:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Point nginx back to old containers
|
||||||
|
# (edit proxy_pass back to stormy_mw:8989)
|
||||||
|
docker compose start stormy_mysql stormy_mw
|
||||||
|
docker exec stormy_nginx nginx -s reload
|
||||||
|
```
|
||||||
|
|
||||||
|
Old containers + old volumes are untouched. Rollback is instant.
|
||||||
|
|
||||||
|
**Keep old containers and volumes for at least 2 weeks** before removing.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Files to Create/Modify
|
||||||
|
|
||||||
|
| File | Action |
|
||||||
|
|------|--------|
|
||||||
|
| `d-mediawiki-new/Dockerfile` | Create — based on `mediawiki:1.39` |
|
||||||
|
| `d-mediawiki-new/charlesreid1-config/` | Create — copy from d-mediawiki, update extensions |
|
||||||
|
| `d-mysql-new/Dockerfile` | Create — based on `mysql:8.0` |
|
||||||
|
| `docker-compose.yml.j2` | Add green stack services, volumes, network |
|
||||||
|
| `d-nginx-charlesreid1/conf.d/https.DOMAIN.conf.j2` | Switchover: change proxy_pass targets |
|
||||||
|
| `scripts/mw/build_extensions_dir_139.sh` | Create — clone REL1_39 branches |
|
||||||
|
| `d-mediawiki-new/charlesreid1-config/mediawiki/LocalSettings.php.j2` | Update for MW 1.39 compat |
|
||||||
|
| `d-mediawiki-new/charlesreid1-config/mediawiki/skins/Bootstrap2/` | Patch deprecated API calls |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Risk Assessment
|
||||||
|
|
||||||
|
| Risk | Likelihood | Mitigation |
|
||||||
|
|------|-----------|------------|
|
||||||
|
| Bootstrap2 skin breaks on MW 1.39 | MEDIUM | Patching deprecated calls; have Vector as fallback |
|
||||||
|
| Math extension rendering changes | LOW | REL1_39 branch exists; test rendering |
|
||||||
|
| MySQL 8 query compatibility | LOW | MW 1.39 officially supports MySQL 8.0 |
|
||||||
|
| Uploaded images lost | NONE | Copied to new volume; old volume preserved |
|
||||||
|
| Database corruption on migration | LOW | Old DB untouched; dump/restore is safe |
|
||||||
|
| Pages using EmbedVideo break | LOW | Videos won't render but pages still load; add back later |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Implementation Order
|
||||||
|
|
||||||
|
1. **Prepare** new Dockerfiles and extension builds (Phase 1)
|
||||||
|
2. **Build** green stack alongside production (Phase 2)
|
||||||
|
3. **Test** thoroughly (Phase 3)
|
||||||
|
4. **Switch** when confident (Phase 4)
|
||||||
|
5. **Clean up** old containers after 2 weeks (Phase 5)
|
||||||
405
PlanFixBackups.md
Normal file
405
PlanFixBackups.md
Normal file
@@ -0,0 +1,405 @@
|
|||||||
|
# Plan: Fix the Broken wikidb Backup Script
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
**BLOCKING:** The MySQL no-root-password migration (`MySqlNoRootPasswordPlan.md`)
|
||||||
|
is on hold until backups are working. We will not touch the database until we
|
||||||
|
have a verified, complete, restorable dump in hand.
|
||||||
|
|
||||||
|
## What we observed
|
||||||
|
|
||||||
|
On 2026-04-13 at 18:02 PDT we ran `scripts/backups/wikidb_dump.sh` as a
|
||||||
|
pre-flight safety net. After ~14 seconds the output file stopped growing at
|
||||||
|
459,628,206 bytes (~439 MB) and the script hung. After 6+ minutes:
|
||||||
|
|
||||||
|
- The `mysqldump` process inside `stormy_mysql` was still alive but in `S`
|
||||||
|
(sleeping) state, using ~1% CPU.
|
||||||
|
- `SHOW PROCESSLIST` on MySQL showed **no** mysqldump connection — MySQL had
|
||||||
|
already dropped it.
|
||||||
|
- The dump file ended mid-`INSERT`, mid-row, with **no** `-- Dump completed on …`
|
||||||
|
trailer. The dump is unusable.
|
||||||
|
|
||||||
|
So: every "successful" run of this script may have been silently producing
|
||||||
|
truncated dumps. We do not know how long this has been broken or whether any
|
||||||
|
recent backup in `/home/charles/backups` or in S3 is restorable. **That is
|
||||||
|
question one.**
|
||||||
|
|
||||||
|
## Root cause hypothesis
|
||||||
|
|
||||||
|
`scripts/backups/wikidb_dump.sh` runs:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
DOCKERX="${DOCKER} exec -t"
|
||||||
|
${DOCKERX} ${CONTAINER_NAME} sh -c 'exec mysqldump wikidb --databases -uroot -p"$MYSQL_ROOT_PASSWORD" --default-character-set=binary' > "${BACKUP_TARGET}"
|
||||||
|
```
|
||||||
|
|
||||||
|
The `-t` flag allocates a pseudo-TTY inside the container. Two problems with
|
||||||
|
that:
|
||||||
|
|
||||||
|
1. **PTY corrupts binary output.** A PTY translates `LF` → `CRLF` on output.
|
||||||
|
`mysqldump --default-character-set=binary` writes raw `_binary` blobs that
|
||||||
|
contain `\n` bytes; these get rewritten in transit, silently corrupting the
|
||||||
|
dump even when it does complete.
|
||||||
|
2. **PTY buffers can deadlock on large streams.** PTYs have small kernel
|
||||||
|
buffers (typically 4 KB). When the redirect target (`> file`) drains slower
|
||||||
|
than mysqldump produces, or when MySQL hits `net_write_timeout` and closes
|
||||||
|
the connection, mysqldump can end up sleeping on a PTY write that will
|
||||||
|
never complete. That matches what we saw: MySQL connection gone, mysqldump
|
||||||
|
alive but sleeping, file frozen at ~439 MB.
|
||||||
|
|
||||||
|
The script also strips the first line with `tail -n +2` to drop mysqldump's
|
||||||
|
"Using a password on the command line interface can be insecure" warning. The
|
||||||
|
warning goes to **stderr**, not stdout, so this `tail` is at best a no-op and
|
||||||
|
at worst silently deletes the first line of real SQL.
|
||||||
|
|
||||||
|
## Affected files
|
||||||
|
|
||||||
|
| File | Change |
|
||||||
|
|------|--------|
|
||||||
|
| `scripts/backups/wikidb_dump.sh` | Remove `-t`; switch auth to `MYSQL_PWD` env; remove broken `tail -n +2`; add completion-trailer check; add `--single-transaction --quick --routines --triggers --events` |
|
||||||
|
| `scripts/backups/wikidb_restore_test.sh` | **NEW** — restore the latest dump into a throwaway MySQL container and run sanity queries |
|
||||||
|
| `scripts/backups/README.md` *(if present)* | Document the restore-test command and integrity check |
|
||||||
|
|
||||||
|
We will not touch `scripts/mysql/restore_database.sh` here — it is broken
|
||||||
|
independently (references the deleted `.mysql.rootpw.cnf`) and is tracked
|
||||||
|
separately.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 0: Triage (do this first, before any changes)
|
||||||
|
|
||||||
|
### Step 0.1: Kill the hung mysqldump
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec stormy_mysql sh -c 'pkill -9 mysqldump || true'
|
||||||
|
# also kill the host-side docker exec wrapper if it is still around
|
||||||
|
pgrep -af 'docker exec.*mysqldump' || true
|
||||||
|
```
|
||||||
|
|
||||||
|
After this, confirm nothing is running:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec stormy_mysql sh -c 'pgrep -a mysqldump || echo none'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 0.2: Remove the truncated dump
|
||||||
|
|
||||||
|
```bash
|
||||||
|
rm -i /home/charles/backups/$(date +%Y%m%d)/wikidb_*.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 0.3: Audit existing backups — are *any* of them complete?
|
||||||
|
|
||||||
|
We need to know whether we have a known-good dump anywhere. For each candidate
|
||||||
|
file, the last bytes should contain `-- Dump completed on`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
for f in $(find /home/charles/backups -name 'wikidb_*.sql' -mtime -30 | sort); do
|
||||||
|
trailer=$(tail -c 200 "$f" | tr -d '\0' | grep -o 'Dump completed on[^"]*' || echo "MISSING")
|
||||||
|
size=$(stat -c %s "$f")
|
||||||
|
echo "$f size=$size trailer=$trailer"
|
||||||
|
done
|
||||||
|
```
|
||||||
|
|
||||||
|
Any file showing `MISSING` is truncated and **not a real backup**. Record the
|
||||||
|
results — we need to know whether the most recent good dump is from yesterday,
|
||||||
|
last week, or never.
|
||||||
|
|
||||||
|
### Step 0.4: Audit the S3 backups
|
||||||
|
|
||||||
|
```bash
|
||||||
|
source ./environment
|
||||||
|
aws s3 ls "s3://${POD_CHARLESREID1_BACKUP_S3BUCKET}/" --recursive | grep wikidb | tail -20
|
||||||
|
```
|
||||||
|
|
||||||
|
Pull the most recent one down to a scratch dir and trailer-check it the same
|
||||||
|
way as Step 0.3. **Do not assume it is good just because it exists.**
|
||||||
|
|
||||||
|
### Step 0.5: Decide whether to pause writes
|
||||||
|
|
||||||
|
If Step 0.3 + 0.4 show no recent good backup, consider whether to pause writes
|
||||||
|
to the wiki (read-only mode via `$wgReadOnly` in `LocalSettings.php`) until we
|
||||||
|
have one. This is a judgement call — if the most recent good backup is days old
|
||||||
|
but the wiki is low-traffic, the risk of leaving it writable while we fix the
|
||||||
|
script is low. Decide explicitly, do not just drift.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 1: Fix the script
|
||||||
|
|
||||||
|
### Step 1.1: Edit `scripts/backups/wikidb_dump.sh`
|
||||||
|
|
||||||
|
Replace the docker exec block with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Pass the password via env to avoid:
|
||||||
|
# - the cmdline-password warning on stderr
|
||||||
|
# - the password showing up in `ps` inside the container
|
||||||
|
# No `-t`: PTY corrupts binary dumps and can deadlock on large output.
|
||||||
|
docker exec -i \
|
||||||
|
-e MYSQL_PWD \
|
||||||
|
"${CONTAINER_NAME}" \
|
||||||
|
sh -c 'exec mysqldump \
|
||||||
|
--user=root \
|
||||||
|
--single-transaction \
|
||||||
|
--quick \
|
||||||
|
--routines \
|
||||||
|
--triggers \
|
||||||
|
--events \
|
||||||
|
--default-character-set=binary \
|
||||||
|
--databases wikidb' \
|
||||||
|
> "${BACKUP_TARGET}"
|
||||||
|
```
|
||||||
|
|
||||||
|
Notes on each flag:
|
||||||
|
|
||||||
|
- `-i` — keep stdin open (no PTY). This is the single most important change.
|
||||||
|
- `-e MYSQL_PWD` — forwards the host's `MYSQL_PWD` env var into the container
|
||||||
|
for this one exec call. mysqldump reads `MYSQL_PWD` automatically. Set it on
|
||||||
|
the host before invoking the script:
|
||||||
|
```bash
|
||||||
|
export MYSQL_PWD="$(docker exec stormy_mysql printenv MYSQL_ROOT_PASSWORD)"
|
||||||
|
```
|
||||||
|
We pull it from the container so we don't have to duplicate the secret on
|
||||||
|
the host. The systemd unit / cron wrapper that runs this script will need
|
||||||
|
the same line.
|
||||||
|
- `--single-transaction` — InnoDB-only consistent snapshot without table
|
||||||
|
locks. wikidb is InnoDB. This is the standard recommendation for live MW
|
||||||
|
databases.
|
||||||
|
- `--quick` — stream rows one at a time instead of buffering whole tables in
|
||||||
|
RAM. Important for large `text` / `revision` tables.
|
||||||
|
- `--routines --triggers --events` — include stored programs. Cheap insurance.
|
||||||
|
- Removed `-uroot -p"$MYSQL_ROOT_PASSWORD"` from the inner sh -c, replaced
|
||||||
|
with `--user=root` + `MYSQL_PWD`.
|
||||||
|
|
||||||
|
### Step 1.2: Remove the broken `tail -n +2` block
|
||||||
|
|
||||||
|
The "warning" it was trying to strip went to stderr, never stdout. The
|
||||||
|
existing code:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tail -n +2 "${BACKUP_TARGET}" > "${BACKUP_TARGET}.tmp"
|
||||||
|
mv "${BACKUP_TARGET}.tmp" "${BACKUP_TARGET}"
|
||||||
|
```
|
||||||
|
|
||||||
|
is silently deleting the first line of real SQL (typically the
|
||||||
|
`-- MySQL dump …` header comment). Delete the block entirely.
|
||||||
|
|
||||||
|
### Step 1.3: Add an integrity check
|
||||||
|
|
||||||
|
After the dump, before declaring success:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# A complete mysqldump always ends with `-- Dump completed on …`.
|
||||||
|
if ! tail -c 200 "${BACKUP_TARGET}" | grep -q 'Dump completed on'; then
|
||||||
|
echo "ERROR: dump file ${BACKUP_TARGET} is missing the completion trailer." >&2
|
||||||
|
echo " mysqldump did not finish successfully." >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Sanity: file should be at least a few MB. Tune the floor as you like.
|
||||||
|
size=$(stat -c %s "${BACKUP_TARGET}")
|
||||||
|
if [ "${size}" -lt $((50 * 1024 * 1024)) ]; then
|
||||||
|
echo "ERROR: dump file ${BACKUP_TARGET} is only ${size} bytes; suspicious." >&2
|
||||||
|
exit 3
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Dump OK: ${BACKUP_TARGET} (${size} bytes)"
|
||||||
|
```
|
||||||
|
|
||||||
|
`set -eux` is already at the top of the script, so any failed step exits
|
||||||
|
non-zero. Good — make sure whatever runs the script (systemd, cron) actually
|
||||||
|
notices that exit code and alerts.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 2: Verify the new script works
|
||||||
|
|
||||||
|
### Step 2.1: Run it
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export MYSQL_PWD="$(docker exec stormy_mysql printenv MYSQL_ROOT_PASSWORD)"
|
||||||
|
source ./environment
|
||||||
|
bash ./scripts/backups/wikidb_dump.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Time it. On a healthy `--quick` stream, 400 MB of wikidb should take well
|
||||||
|
under a minute on local disk.
|
||||||
|
|
||||||
|
### Step 2.2: Verify the trailer
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tail -c 200 /home/charles/backups/$(date +%Y%m%d)/wikidb_*.sql | tr -d '\0'
|
||||||
|
```
|
||||||
|
|
||||||
|
Must end with `-- Dump completed on YYYY-MM-DD HH:MM:SS`.
|
||||||
|
|
||||||
|
### Step 2.3: Verify the byte count is sane
|
||||||
|
|
||||||
|
It should be **larger** than the truncated 439 MB we saw earlier (because the
|
||||||
|
truncated file was missing the tail end of a table). Compare to the largest
|
||||||
|
recent S3 backup if you have one.
|
||||||
|
|
||||||
|
### Step 2.4: Spot-check the SQL
|
||||||
|
|
||||||
|
```bash
|
||||||
|
head -50 /home/charles/backups/$(date +%Y%m%d)/wikidb_*.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
Should start with `-- MySQL dump …` (NOT with `CREATE TABLE` — if it starts
|
||||||
|
with `CREATE TABLE` then the dead `tail -n +2` is still there, eating the
|
||||||
|
header).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 3: Prove the dump is restorable
|
||||||
|
|
||||||
|
A backup is only a backup if you have actually restored from it. Until then
|
||||||
|
it is a file of unknown provenance.
|
||||||
|
|
||||||
|
### Step 3.1: Spin up a throwaway MySQL container
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -d --rm \
|
||||||
|
--name wikidb_restore_test \
|
||||||
|
-e MYSQL_ROOT_PASSWORD=temp_test_pw_$$ \
|
||||||
|
mysql:5.7 # or whatever version stormy_mysql is — check with: docker inspect stormy_mysql --format '{{.Config.Image}}'
|
||||||
|
```
|
||||||
|
|
||||||
|
Wait for it to be ready:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
until docker exec wikidb_restore_test sh -c 'mysqladmin -uroot -p"$MYSQL_ROOT_PASSWORD" ping' 2>/dev/null; do
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3.2: Pipe the dump in
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec -i wikidb_restore_test sh -c 'exec mysql -uroot -p"$MYSQL_ROOT_PASSWORD"' \
|
||||||
|
< /home/charles/backups/$(date +%Y%m%d)/wikidb_*.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
Should complete with no errors.
|
||||||
|
|
||||||
|
### Step 3.3: Run sanity queries against the restored DB
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec wikidb_restore_test sh -c 'exec mysql -uroot -p"$MYSQL_ROOT_PASSWORD" -e "
|
||||||
|
USE wikidb;
|
||||||
|
SELECT COUNT(*) AS pages FROM page;
|
||||||
|
SELECT COUNT(*) AS revisions FROM revision;
|
||||||
|
SELECT COUNT(*) AS texts FROM text;
|
||||||
|
SELECT MAX(rev_timestamp) AS most_recent_edit FROM revision;
|
||||||
|
"'
|
||||||
|
```
|
||||||
|
|
||||||
|
Compare those numbers to live `stormy_mysql`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker exec -i stormy_mysql sh -c 'exec mysql -uroot -p"$MYSQL_ROOT_PASSWORD" -e "
|
||||||
|
USE wikidb;
|
||||||
|
SELECT COUNT(*) FROM page;
|
||||||
|
SELECT COUNT(*) FROM revision;
|
||||||
|
SELECT COUNT(*) FROM text;
|
||||||
|
SELECT MAX(rev_timestamp) FROM revision;
|
||||||
|
"'
|
||||||
|
```
|
||||||
|
|
||||||
|
They should match (allowing for any edits between the dump time and the live
|
||||||
|
query).
|
||||||
|
|
||||||
|
### Step 3.4: Tear down
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker stop wikidb_restore_test
|
||||||
|
```
|
||||||
|
|
||||||
|
`--rm` removes it on stop. No leftover state.
|
||||||
|
|
||||||
|
### Step 3.5: Bake this into a script
|
||||||
|
|
||||||
|
Save the Phase 3 commands as `scripts/backups/wikidb_restore_test.sh` so we
|
||||||
|
can re-run it on demand. It should take a backup file path as its single
|
||||||
|
argument and exit non-zero on any mismatch.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 4: Verify the scheduled-backup path
|
||||||
|
|
||||||
|
Whatever runs `wikidb_dump.sh` on a schedule needs to:
|
||||||
|
|
||||||
|
1. Set `MYSQL_PWD` (or otherwise provide the password) before invoking.
|
||||||
|
2. Actually notice and alert on a non-zero exit.
|
||||||
|
|
||||||
|
### Step 4.1: Find the scheduler
|
||||||
|
|
||||||
|
```bash
|
||||||
|
systemctl list-timers --all | grep -i backup
|
||||||
|
ls /etc/systemd/system/ | grep -i backup
|
||||||
|
crontab -l
|
||||||
|
sudo crontab -l
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 4.2: Inspect whatever you find
|
||||||
|
|
||||||
|
Confirm it sources `./environment` (or otherwise gets `MYSQL_PWD`), runs the
|
||||||
|
script, and surfaces failures (slack canary webhook? email? exit-code check?
|
||||||
|
journalctl?). If the failure path is "we'd notice in the logs eventually,"
|
||||||
|
that is not a failure path.
|
||||||
|
|
||||||
|
### Step 4.3: Trigger the scheduled job manually and confirm a clean run
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo systemctl start <whatever-the-unit-is>.service
|
||||||
|
journalctl -u <whatever-the-unit-is>.service --since "5 min ago"
|
||||||
|
```
|
||||||
|
|
||||||
|
The journal should show the "Dump OK" line from Step 1.3.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Phase 5: Commit and unblock the MySQL work
|
||||||
|
|
||||||
|
### Step 5.1: Commit the script + new restore-test script
|
||||||
|
|
||||||
|
Branch, commit, push, PR. Reference this plan in the PR description.
|
||||||
|
|
||||||
|
### Step 5.2: Update `MySqlNoRootPasswordPlan.md` Step 4 (Take a fresh backup)
|
||||||
|
|
||||||
|
It should now point at the fixed script and the restore-test script — Phase 0
|
||||||
|
of the no-root-password plan should require **both** a successful dump AND a
|
||||||
|
successful restore-test before proceeding.
|
||||||
|
|
||||||
|
### Step 5.3: Resume the MySQL no-root-password migration
|
||||||
|
|
||||||
|
Only after Phase 3 above has passed at least once on a fresh dump.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Rollback
|
||||||
|
|
||||||
|
There is nothing to roll back in Phase 0–3 — we are only modifying a script
|
||||||
|
and creating throwaway containers. If the new script doesn't work, the old
|
||||||
|
script is in git history (`git checkout -- scripts/backups/wikidb_dump.sh`)
|
||||||
|
and we are no worse off than we are right now (which is: backups are
|
||||||
|
broken).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Notes / open questions
|
||||||
|
|
||||||
|
- **How long has this been broken?** Answer with Phase 0.3 + 0.4. If every
|
||||||
|
recent dump is truncated, this has been broken since whenever the wiki grew
|
||||||
|
past the first PTY-buffer-stall threshold. We should figure out an
|
||||||
|
approximate date so we know what window of "we thought we had backups" was
|
||||||
|
fictional.
|
||||||
|
- **Why no alert?** Phase 4 needs to answer this. A backup pipeline that can
|
||||||
|
silently produce 439 MB of garbage for an unknown number of days is the
|
||||||
|
real bug. The script fix is necessary but not sufficient.
|
||||||
|
- **Should we move off `mysqldump` entirely?** For a database this size,
|
||||||
|
`mysqldump` is fine. Not worth re-architecting. The fix is one flag and
|
||||||
|
one integrity check.
|
||||||
|
- **`docker exec -t` elsewhere in the repo?** Worth a grep — same bug pattern
|
||||||
|
could exist in any other backup or maintenance script.
|
||||||
19
Troubleshooting.md
Normal file
19
Troubleshooting.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
To get a shell in a container that has been created, before it is runnning in a pod, use `docker run`:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker run --rm -it --entrypoint bash <image-name-or-id>
|
||||||
|
|
||||||
|
|
||||||
|
docker run --rm -it --entrypoint bash pod-charlesreid1_stormy_mediawiki
|
||||||
|
```
|
||||||
|
|
||||||
|
To get a shell in a container that is running in a pod, use `docker exec`:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker exec -it <image-name> /bin/bash
|
||||||
|
|
||||||
|
docker exec -it stormy_mw /bin/bash
|
||||||
|
```
|
||||||
|
|
||||||
|
Also, if no changes are picking up, and you've already tried rebuilding the container image, try editing the Dockerfile.
|
||||||
|
|
||||||
@@ -6,12 +6,14 @@
|
|||||||
;; https://github.com/go-gitea/gitea/blob/master/conf/app.ini
|
;; https://github.com/go-gitea/gitea/blob/master/conf/app.ini
|
||||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||||
|
|
||||||
APP_NAME = {{ gitea_app_name }}
|
APP_NAME = {{ pod_charlesreid1_gitea_app_name }}
|
||||||
RUN_USER = git
|
RUN_USER = git
|
||||||
RUN_MODE = prod
|
RUN_MODE = prod
|
||||||
|
WORK_PATH = /data/gitea
|
||||||
|
|
||||||
[ui]
|
[ui]
|
||||||
DEFAULT_THEME = arc-green
|
DEFAULT_THEME = gitea-dark
|
||||||
|
THEMES = gitea-dark
|
||||||
|
|
||||||
[database]
|
[database]
|
||||||
DB_TYPE = sqlite3
|
DB_TYPE = sqlite3
|
||||||
@@ -31,17 +33,17 @@ DISABLE_HTTP_GIT = false
|
|||||||
|
|
||||||
[server]
|
[server]
|
||||||
PROTOCOL = http
|
PROTOCOL = http
|
||||||
DOMAIN = git.{{ server_name_default }}
|
DOMAIN = git.{{ pod_charlesreid1_server_name }}
|
||||||
#CERT_FILE = /www/gitea/certs/cert.pem
|
#CERT_FILE = /www/gitea/certs/cert.pem
|
||||||
#KEY_FILE = /www/gitea/certs/key.pem
|
#KEY_FILE = /www/gitea/certs/key.pem
|
||||||
SSH_DOMAIN = git.{{ server_name_default }}
|
SSH_DOMAIN = git.{{ pod_charlesreid1_server_name }}
|
||||||
HTTP_PORT = 3000
|
HTTP_PORT = 3000
|
||||||
HTTP_ADDR = 0.0.0.0
|
HTTP_ADDR = 0.0.0.0
|
||||||
ROOT_URL = https://git.{{ server_name_default }}
|
ROOT_URL = https://git.{{ pod_charlesreid1_server_name }}
|
||||||
;ROOT_URL = %(PROTOCOL)s://%(DOMAIN)s:%(HTTP_PORT)s/
|
;ROOT_URL = %(PROTOCOL)s://%(DOMAIN)s:%(HTTP_PORT)s/
|
||||||
DISABLE_SSH = false
|
DISABLE_SSH = false
|
||||||
; port to display in clone url:
|
; port to display in clone url:
|
||||||
SSH_PORT = 222
|
;SSH_PORT = 222
|
||||||
; port for built-in ssh server to listen on:
|
; port for built-in ssh server to listen on:
|
||||||
SSH_LISTEN_PORT = 22
|
SSH_LISTEN_PORT = 22
|
||||||
OFFLINE_MODE = false
|
OFFLINE_MODE = false
|
||||||
@@ -92,9 +94,9 @@ ENABLED = false
|
|||||||
|
|
||||||
[security]
|
[security]
|
||||||
INSTALL_LOCK = true
|
INSTALL_LOCK = true
|
||||||
SECRET_KEY = {{ gitea_secret_key }}
|
SECRET_KEY = {{ pod_charlesreid1_gitea_secretkey }}
|
||||||
MIN_PASSWORD_LENGTH = 6
|
MIN_PASSWORD_LENGTH = 10
|
||||||
INTERNAL_TOKEN = {{ gitea_internal_token }}
|
INTERNAL_TOKEN = {{ pod_charlesreid1_gitea_internaltoken }}
|
||||||
|
|
||||||
[other]
|
[other]
|
||||||
SHOW_FOOTER_BRANDING = false
|
SHOW_FOOTER_BRANDING = false
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM mediawiki
|
FROM mediawiki:1.34
|
||||||
|
|
||||||
EXPOSE 8989
|
EXPOSE 8989
|
||||||
|
|
||||||
@@ -41,17 +41,13 @@ RUN chown -R www-data:www-data /var/www/html/*
|
|||||||
# Skins
|
# Skins
|
||||||
COPY charlesreid1-config/mediawiki/skins /var/www/html/skins
|
COPY charlesreid1-config/mediawiki/skins /var/www/html/skins
|
||||||
RUN chown -R www-data:www-data /var/www/html/skins
|
RUN chown -R www-data:www-data /var/www/html/skins
|
||||||
|
RUN touch /var/www/html/skins
|
||||||
|
|
||||||
# Settings
|
# Settings
|
||||||
COPY charlesreid1-config/mediawiki/LocalSettings.php /var/www/html/LocalSettings.php
|
COPY charlesreid1-config/mediawiki/LocalSettings.php /var/www/html/LocalSettings.php
|
||||||
RUN chown -R www-data:www-data /var/www/html/LocalSettings*
|
RUN chown -R www-data:www-data /var/www/html/LocalSettings*
|
||||||
RUN chmod 600 /var/www/html/LocalSettings.php
|
RUN chmod 600 /var/www/html/LocalSettings.php
|
||||||
|
|
||||||
# MediaWiki Fail2ban log directory
|
|
||||||
RUN mkdir -p /var/log/mwf2b
|
|
||||||
RUN chown -R www-data:www-data /var/log/mwf2b
|
|
||||||
RUN chmod 700 /var/log/mwf2b
|
|
||||||
|
|
||||||
# Apache conf file
|
# Apache conf file
|
||||||
COPY charlesreid1-config/apache/*.conf /etc/apache2/sites-enabled/
|
COPY charlesreid1-config/apache/*.conf /etc/apache2/sites-enabled/
|
||||||
RUN a2enmod rewrite
|
RUN a2enmod rewrite
|
||||||
@@ -59,4 +55,10 @@ RUN service apache2 restart
|
|||||||
|
|
||||||
## make texvc
|
## make texvc
|
||||||
#CMD cd /var/www/html/extensions/Math && make && apache2-foreground
|
#CMD cd /var/www/html/extensions/Math && make && apache2-foreground
|
||||||
|
|
||||||
|
# PHP conf file
|
||||||
|
# https://hub.docker.com/_/php/
|
||||||
|
COPY php/php.ini /usr/local/etc/php/
|
||||||
|
|
||||||
|
# Start
|
||||||
CMD apache2-foreground
|
CMD apache2-foreground
|
||||||
|
|||||||
@@ -5,6 +5,10 @@ To update the MediaWiki skin:
|
|||||||
- Rebuild the MW container while the docker pod is still running (won't effect the docker pod)
|
- Rebuild the MW container while the docker pod is still running (won't effect the docker pod)
|
||||||
- When finished rebuilding the MW container, restart the docker pod.
|
- When finished rebuilding the MW container, restart the docker pod.
|
||||||
|
|
||||||
|
The skin currently in use is in `charlesreid1-config/mediawiki/skins/Bootstrap2`
|
||||||
|
|
||||||
|
To rebuild and then restart the pod:
|
||||||
|
|
||||||
```
|
```
|
||||||
# switch to main pod directory
|
# switch to main pod directory
|
||||||
cd ../
|
cd ../
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
ServerName {{ server_name_default }}
|
ServerName {{ pod_charlesreid1_server_name }}
|
||||||
|
|
||||||
Listen 8989
|
Listen 8989
|
||||||
|
|
||||||
@@ -7,10 +7,10 @@ Listen 8989
|
|||||||
# talks to apache via 127.0.0.1
|
# talks to apache via 127.0.0.1
|
||||||
# on port 8989
|
# on port 8989
|
||||||
|
|
||||||
ServerAlias www.{{ server_name_default }}
|
ServerAlias www.{{ pod_charlesreid1_server_name }}
|
||||||
|
|
||||||
LogLevel warn
|
LogLevel warn
|
||||||
ServerAdmin {{ admin_email }}
|
ServerAdmin {{ pod_charlesreid1_mediawiki_admin_email }}
|
||||||
DirectoryIndex index.html index.cgi index.php
|
DirectoryIndex index.html index.cgi index.php
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -13,8 +13,8 @@ if ( !defined( 'MEDIAWIKI' ) ) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
## The protocol and server name to use in fully-qualified URLs
|
## The protocol and server name to use in fully-qualified URLs
|
||||||
$wgServer = 'https://{{ server_name_default }}';
|
$wgServer = 'https://{{ pod_charlesreid1_server_name }}';
|
||||||
$wgCanonicalServer = 'https://{{ server_name_default }}';
|
$wgCanonicalServer = 'https://{{ pod_charlesreid1_server_name }}';
|
||||||
|
|
||||||
## The URL path to static resources (images, scripts, etc.)
|
## The URL path to static resources (images, scripts, etc.)
|
||||||
$wgStylePath = "$wgScriptPath/skins";
|
$wgStylePath = "$wgScriptPath/skins";
|
||||||
@@ -47,6 +47,7 @@ $wgDBmysql5 = true;
|
|||||||
|
|
||||||
# Shared memory settings
|
# Shared memory settings
|
||||||
$wgMainCacheType = CACHE_ACCEL;
|
$wgMainCacheType = CACHE_ACCEL;
|
||||||
|
$wgCacheDirectory = "$IP/cache";
|
||||||
$wgMemCachedServers = [];
|
$wgMemCachedServers = [];
|
||||||
|
|
||||||
# To enable image uploads, make sure the 'images' directory
|
# To enable image uploads, make sure the 'images' directory
|
||||||
@@ -104,7 +105,7 @@ $wgAuthenticationTokenVersion = "1";
|
|||||||
|
|
||||||
# Site upgrade key. Must be set to a string (default provided) to turn on the
|
# Site upgrade key. Must be set to a string (default provided) to turn on the
|
||||||
# web installer while LocalSettings.php is in place
|
# web installer while LocalSettings.php is in place
|
||||||
$wgUpgradeKey = "984c1d9858dabc27";
|
$wgUpgradeKey = getenv('MEDIAWIKI_UPGRADEKEY');
|
||||||
|
|
||||||
# No license info
|
# No license info
|
||||||
$wgRightsPage = "";
|
$wgRightsPage = "";
|
||||||
@@ -156,7 +157,7 @@ $wgPutIPinRC=true;
|
|||||||
# Getting some weird "Error creating thumbnail: Invalid thumbnail parameters" messages w/ thumbnail
|
# Getting some weird "Error creating thumbnail: Invalid thumbnail parameters" messages w/ thumbnail
|
||||||
# http://www.gossamer-threads.com/lists/wiki/mediawiki/169439
|
# http://www.gossamer-threads.com/lists/wiki/mediawiki/169439
|
||||||
$wgMaxImageArea=64000000;
|
$wgMaxImageArea=64000000;
|
||||||
$wgMaxShellMemory=0;
|
$wgMaxShellMemory=512000;
|
||||||
|
|
||||||
$wgFavicon="$wgScriptPath/favicon.ico";
|
$wgFavicon="$wgScriptPath/favicon.ico";
|
||||||
|
|
||||||
@@ -209,13 +210,6 @@ wfLoadExtension( 'EmbedVideo' );
|
|||||||
|
|
||||||
require_once "$IP/extensions/Math/Math.php";
|
require_once "$IP/extensions/Math/Math.php";
|
||||||
|
|
||||||
#############################################
|
|
||||||
# Fail2banlog extension
|
|
||||||
# https://www.mediawiki.org/wiki/Extension:Fail2banlog
|
|
||||||
|
|
||||||
require_once "$IP/extensions/Fail2banlog/Fail2banlog.php";
|
|
||||||
$wgFail2banlogfile = "/var/log/apache2/mwf2b.log";
|
|
||||||
|
|
||||||
#############################################
|
#############################################
|
||||||
# Fix cookies crap
|
# Fix cookies crap
|
||||||
|
|
||||||
@@ -224,7 +218,7 @@ session_save_path("/tmp");
|
|||||||
##############################################
|
##############################################
|
||||||
# Secure login
|
# Secure login
|
||||||
|
|
||||||
$wgServer = "https://{{ server_name_default }}";
|
$wgServer = "https://{{ pod_charlesreid1_server_name }}";
|
||||||
$wgSecureLogin = true;
|
$wgSecureLogin = true;
|
||||||
|
|
||||||
###################################
|
###################################
|
||||||
|
|||||||
@@ -1,93 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# clone or download each extension
|
|
||||||
# and build o
|
|
||||||
|
|
||||||
mkdir -p extensions
|
|
||||||
(
|
|
||||||
cd extensions
|
|
||||||
|
|
||||||
##############################
|
|
||||||
|
|
||||||
Extension="SyntaxHighlight_GeSHi"
|
|
||||||
if [ ! -d ${Extension} ]
|
|
||||||
then
|
|
||||||
## This requires mediawiki > 1.31
|
|
||||||
## (so does REL1_31)
|
|
||||||
#git clone https://github.com/wikimedia/mediawiki-extensions-SyntaxHighlight_GeSHi.git SyntaxHighlight_GeSHi
|
|
||||||
|
|
||||||
## This manually downloads REL1_30
|
|
||||||
#wget https://extdist.wmflabs.org/dist/extensions/SyntaxHighlight_GeSHi-REL1_30-87392f1.tar.gz -O SyntaxHighlight_GeSHi.tar.gz
|
|
||||||
#tar -xzf SyntaxHighlight_GeSHi.tar.gz -C ${PWD}
|
|
||||||
#rm -f SyntaxHighlight_GeSHi.tar.gz
|
|
||||||
|
|
||||||
# Best of both worlds
|
|
||||||
git clone https://github.com/wikimedia/mediawiki-extensions-SyntaxHighlight_GeSHi.git SyntaxHighlight_GeSHi
|
|
||||||
(
|
|
||||||
cd ${Extension}
|
|
||||||
git checkout --track remotes/origin/REL1_34
|
|
||||||
)
|
|
||||||
else
|
|
||||||
echo "Skipping ${Extension}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
##############################
|
|
||||||
|
|
||||||
Extension="ParserFunctions"
|
|
||||||
if [ ! -d ${Extension} ]
|
|
||||||
then
|
|
||||||
git clone https://github.com/wikimedia/mediawiki-extensions-ParserFunctions.git ${Extension}
|
|
||||||
(
|
|
||||||
cd ${Extension}
|
|
||||||
git checkout --track remotes/origin/REL1_34
|
|
||||||
)
|
|
||||||
else
|
|
||||||
echo "Skipping ${Extension}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
##############################
|
|
||||||
|
|
||||||
Extension="EmbedVideo"
|
|
||||||
if [ ! -d ${Extension} ]
|
|
||||||
then
|
|
||||||
git clone https://github.com/HydraWiki/mediawiki-embedvideo.git ${Extension}
|
|
||||||
(
|
|
||||||
cd ${Extension}
|
|
||||||
git checkout v2.7.3
|
|
||||||
)
|
|
||||||
else
|
|
||||||
echo "Skipping ${Extension}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
##############################
|
|
||||||
|
|
||||||
Extension="Math"
|
|
||||||
if [ ! -d ${Extension} ]
|
|
||||||
then
|
|
||||||
git clone https://github.com/wikimedia/mediawiki-extensions-Math.git ${Extension}
|
|
||||||
(
|
|
||||||
cd ${Extension}
|
|
||||||
git checkout REL1_34
|
|
||||||
)
|
|
||||||
else
|
|
||||||
echo "Skipping ${Extension}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
##############################
|
|
||||||
|
|
||||||
Extension="Fail2banlog"
|
|
||||||
if [ ! -d ${Extension} ]
|
|
||||||
then
|
|
||||||
git clone https://github.com/charlesreid1-docker/mw-fail2ban.git ${Extension}
|
|
||||||
(
|
|
||||||
cd ${Extension}
|
|
||||||
git checkout master
|
|
||||||
)
|
|
||||||
else
|
|
||||||
echo "Skipping ${Extension}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
##############################
|
|
||||||
|
|
||||||
# fin
|
|
||||||
)
|
|
||||||
@@ -106,7 +106,7 @@ include('/var/www/html/skins/Bootstrap2/navbar.php');
|
|||||||
<div class="container-fixed">
|
<div class="container-fixed">
|
||||||
<div class="navbar-header">
|
<div class="navbar-header">
|
||||||
<a href="/wiki/" class="navbar-brand">
|
<a href="/wiki/" class="navbar-brand">
|
||||||
{{ top_domain }} wiki
|
{{ pod_charlesreid1_server_name }} wiki
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
<div>
|
<div>
|
||||||
|
|||||||
@@ -11,7 +11,7 @@
|
|||||||
</span>
|
</span>
|
||||||
Made from the command line with vim by
|
Made from the command line with vim by
|
||||||
<a href="http://charlesreid1.com">charlesreid1</a><br />
|
<a href="http://charlesreid1.com">charlesreid1</a><br />
|
||||||
with help from <a href="https://getbootstrap.com/">Bootstrap</a> and <a href="http://getpelican.com">Pelican</a>.
|
with help from <a href="https://getbootstrap.com/">Bootstrap</a> and <a href="http://mediawiki.org">MediaWiki</a>.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p style="text-align: center">
|
<p style="text-align: center">
|
||||||
|
|||||||
@@ -6,14 +6,14 @@
|
|||||||
<span class="icon-bar"></span>
|
<span class="icon-bar"></span>
|
||||||
<span class="icon-bar"></span>
|
<span class="icon-bar"></span>
|
||||||
</button>
|
</button>
|
||||||
<a href="/" class="navbar-brand">{{ top_domain }}</a>
|
<a href="/" class="navbar-brand">{{ pod_charlesreid1_server_name }}</a>
|
||||||
</div>
|
</div>
|
||||||
<div>
|
<div>
|
||||||
<div class="collapse navbar-collapse" id="myNavbar">
|
<div class="collapse navbar-collapse" id="myNavbar">
|
||||||
<ul class="nav navbar-nav">
|
<ul class="nav navbar-nav">
|
||||||
|
|
||||||
<li>
|
<li>
|
||||||
<a href="https://{{ top_domain }}/wiki">Wiki</a>
|
<a href="https://{{ pod_charlesreid1_server_name }}/wiki">Wiki</a>
|
||||||
</li>
|
</li>
|
||||||
|
|
||||||
</ul>
|
</ul>
|
||||||
|
|||||||
@@ -1086,7 +1086,8 @@ html {
|
|||||||
}
|
}
|
||||||
body {
|
body {
|
||||||
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
|
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
|
||||||
font-size: 14px;
|
/*font-size: 14px;*/
|
||||||
|
font-size: 20px;
|
||||||
line-height: 1.42857143;
|
line-height: 1.42857143;
|
||||||
color: #c8c8c8;
|
color: #c8c8c8;
|
||||||
background-color: #272b30;
|
background-color: #272b30;
|
||||||
|
|||||||
3
d-mediawiki/php/php.ini
Normal file
3
d-mediawiki/php/php.ini
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
post_max_size = 128M
|
||||||
|
memory_limit = 128M
|
||||||
|
upload_max_filesize = 100M
|
||||||
@@ -4,8 +4,4 @@ MAINTAINER charles@charlesreid1.com
|
|||||||
# make mysql data a volume
|
# make mysql data a volume
|
||||||
VOLUME ["/var/lib/mysql"]
|
VOLUME ["/var/lib/mysql"]
|
||||||
|
|
||||||
# put password in a password file
|
|
||||||
RUN printf "[client]\nuser=root\npassword=$MYSQL_ROOT_PASSWORD" > /root/.mysql.rootpw.cnf
|
|
||||||
RUN chmod 0600 /root/.mysql.rootpw.cnf
|
|
||||||
|
|
||||||
RUN chown mysql:mysql /var/lib/mysql
|
RUN chown mysql:mysql /var/lib/mysql
|
||||||
|
|||||||
5
d-mysql/conf.d/slow-log.cnf
Normal file
5
d-mysql/conf.d/slow-log.cnf
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
[mysqld]
|
||||||
|
slow_query_log = 1
|
||||||
|
slow_query_log_file = /var/log/mysql/mysql-slow.log
|
||||||
|
long_query_time = 2
|
||||||
|
log_queries_not_using_indexes = 0
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
# https://serverfault.com/a/525011
|
|
||||||
server {
|
|
||||||
server_name _;
|
|
||||||
listen *:80 default_server deferred;
|
|
||||||
return 444;
|
|
||||||
}
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
####################
|
####################
|
||||||
#
|
#
|
||||||
# {{ server_name_default }}
|
# {{ pod_charlesreid1_server_name }}
|
||||||
# http/{{ port_default }}
|
# http/{{ port_default }}
|
||||||
#
|
#
|
||||||
# basically, just redirects to https
|
# basically, just redirects to https
|
||||||
@@ -10,20 +10,20 @@
|
|||||||
server {
|
server {
|
||||||
listen 80;
|
listen 80;
|
||||||
listen [::]:80;
|
listen [::]:80;
|
||||||
server_name {{ server_name_default }};
|
server_name {{ pod_charlesreid1_server_name }};
|
||||||
return 301 https://{{ server_name_default }}$request_uri;
|
return 301 https://{{ pod_charlesreid1_server_name }}$request_uri;
|
||||||
}
|
}
|
||||||
|
|
||||||
server {
|
server {
|
||||||
listen 80;
|
listen 80;
|
||||||
listen [::]:80;
|
listen [::]:80;
|
||||||
server_name www.{{ server_name_default }};
|
server_name www.{{ pod_charlesreid1_server_name }};
|
||||||
return 301 https://www.{{ server_name_default }}$request_uri;
|
return 301 https://www.{{ pod_charlesreid1_server_name }}$request_uri;
|
||||||
}
|
}
|
||||||
|
|
||||||
server {
|
server {
|
||||||
listen 80;
|
listen 80;
|
||||||
listen [::]:80;
|
listen [::]:80;
|
||||||
server_name git.{{ server_name_default }};
|
server_name git.{{ pod_charlesreid1_server_name }};
|
||||||
return 301 https://git.{{ server_name_default }}$request_uri;
|
return 301 https://git.{{ pod_charlesreid1_server_name }}$request_uri;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
####################
|
####################
|
||||||
#
|
#
|
||||||
# {{ server_name_default }}
|
# {{ pod_charlesreid1_server_name }}
|
||||||
# https/443
|
# https/443
|
||||||
#
|
#
|
||||||
# {{ server_name_default }} and www.{{ server_name_default }}
|
# {{ pod_charlesreid1_server_name }} and www.{{ pod_charlesreid1_server_name }}
|
||||||
# should handle the following cases:
|
# should handle the following cases:
|
||||||
# - w/ and wiki/ should reverse proxy story_mw
|
# - w/ and wiki/ should reverse proxy story_mw
|
||||||
# - gitea subdomain should reverse proxy stormy_gitea
|
# - gitea subdomain should reverse proxy stormy_gitea
|
||||||
@@ -15,30 +15,46 @@
|
|||||||
server {
|
server {
|
||||||
listen 443 ssl;
|
listen 443 ssl;
|
||||||
listen [::]:443 ssl;
|
listen [::]:443 ssl;
|
||||||
server_name {{ server_name_default }} default_server;
|
server_name {{ pod_charlesreid1_server_name }};
|
||||||
|
|
||||||
ssl_certificate /etc/letsencrypt/live/{{ server_name_default }}/fullchain.pem;
|
ssl_certificate /etc/letsencrypt/live/{{ pod_charlesreid1_server_name }}/fullchain.pem;
|
||||||
ssl_certificate_key /etc/letsencrypt/live/{{ server_name_default }}/privkey.pem;
|
ssl_certificate_key /etc/letsencrypt/live/{{ pod_charlesreid1_server_name }}/privkey.pem;
|
||||||
include /etc/letsencrypt/options-ssl-nginx.conf;
|
include /etc/letsencrypt/options-ssl-nginx.conf;
|
||||||
include /etc/nginx/conf.d/secheaders.conf;
|
include /etc/nginx/conf.d/secheaders.conf;
|
||||||
include /etc/nginx/conf.d/csp.conf;
|
include /etc/nginx/conf.d/csp.conf;
|
||||||
|
|
||||||
location / {
|
location / {
|
||||||
try_files $uri $uri/ =404;
|
try_files $uri $uri/ =404;
|
||||||
root /www/{{ server_name_default }}/htdocs;
|
root /www/{{ pod_charlesreid1_server_name }}/htdocs;
|
||||||
index index.html;
|
index index.html;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
location = /robots.txt {
|
||||||
|
alias /var/www/robots/robots.txt;
|
||||||
|
}
|
||||||
|
|
||||||
location /wiki/ {
|
location /wiki/ {
|
||||||
|
# Apply rate limit here.
|
||||||
|
limit_req zone=gitealimit burst=20 nodelay;
|
||||||
|
# Limit download rate to 500 KB/s per connection (4 Mbps)
|
||||||
|
limit_rate 500k;
|
||||||
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
proxy_set_header X-Forwarded-For $remote_addr;
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
proxy_set_header Host $host;
|
proxy_set_header Host $host;
|
||||||
proxy_pass http://stormy_mw:8989/wiki/;
|
proxy_pass http://stormy_mw:8989/wiki/;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /w/ {
|
location /w/ {
|
||||||
|
# Apply rate limit here.
|
||||||
|
limit_req zone=gitealimit burst=20 nodelay;
|
||||||
|
# Limit download rate to 500 KB/s per connection (4 Mbps)
|
||||||
|
limit_rate 500k;
|
||||||
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
proxy_set_header X-Forwarded-For $remote_addr;
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
proxy_set_header Host $host;
|
proxy_set_header Host $host;
|
||||||
proxy_pass http://stormy_mw:8989/w/;
|
proxy_pass http://stormy_mw:8989/w/;
|
||||||
}
|
}
|
||||||
@@ -55,31 +71,43 @@ server {
|
|||||||
server {
|
server {
|
||||||
listen 443 ssl;
|
listen 443 ssl;
|
||||||
listen [::]:443 ssl;
|
listen [::]:443 ssl;
|
||||||
server_name www.{{ server_name_default }};
|
server_name www.{{ pod_charlesreid1_server_name }};
|
||||||
|
|
||||||
ssl_certificate /etc/letsencrypt/live/www.{{ server_name_default }}/fullchain.pem;
|
ssl_certificate /etc/letsencrypt/live/www.{{ pod_charlesreid1_server_name }}/fullchain.pem;
|
||||||
ssl_certificate_key /etc/letsencrypt/live/www.{{ server_name_default }}/privkey.pem;
|
ssl_certificate_key /etc/letsencrypt/live/www.{{ pod_charlesreid1_server_name }}/privkey.pem;
|
||||||
include /etc/letsencrypt/options-ssl-nginx.conf;
|
include /etc/letsencrypt/options-ssl-nginx.conf;
|
||||||
include /etc/nginx/conf.d/secheaders.conf;
|
include /etc/nginx/conf.d/secheaders.conf;
|
||||||
include /etc/nginx/conf.d/csp.conf;
|
include /etc/nginx/conf.d/csp.conf;
|
||||||
|
|
||||||
root /www/{{ server_name_default }}/htdocs;
|
root /www/{{ pod_charlesreid1_server_name }}/htdocs;
|
||||||
|
|
||||||
location / {
|
location / {
|
||||||
try_files $uri $uri/ =404;
|
try_files $uri $uri/ =404;
|
||||||
index index.html;
|
index index.html;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
location = /robots.txt {
|
||||||
|
alias /var/www/robots/robots.txt;
|
||||||
|
}
|
||||||
|
|
||||||
location /wiki/ {
|
location /wiki/ {
|
||||||
|
limit_req zone=gitealimit burst=20 nodelay;
|
||||||
|
limit_rate 500k;
|
||||||
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
proxy_set_header X-Forwarded-For $remote_addr;
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
proxy_set_header Host $host;
|
proxy_set_header Host $host;
|
||||||
proxy_pass http://stormy_mw:8989/wiki/;
|
proxy_pass http://stormy_mw:8989/wiki/;
|
||||||
}
|
}
|
||||||
|
|
||||||
location /w/ {
|
location /w/ {
|
||||||
|
# Apply rate limit here.
|
||||||
|
limit_req zone=gitealimit burst=20 nodelay;
|
||||||
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
proxy_set_header X-Forwarded-For $remote_addr;
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
proxy_set_header Host $host;
|
proxy_set_header Host $host;
|
||||||
proxy_pass http://stormy_mw:8989/w/;
|
proxy_pass http://stormy_mw:8989/w/;
|
||||||
}
|
}
|
||||||
@@ -94,18 +122,29 @@ server {
|
|||||||
server {
|
server {
|
||||||
listen 443 ssl;
|
listen 443 ssl;
|
||||||
listen [::]:443 ssl;
|
listen [::]:443 ssl;
|
||||||
server_name git.{{ server_name_default }};
|
server_name git.{{ pod_charlesreid1_server_name }};
|
||||||
|
|
||||||
ssl_certificate /etc/letsencrypt/live/git.{{ server_name_default }}/fullchain.pem;
|
ssl_certificate /etc/letsencrypt/live/git.{{ pod_charlesreid1_server_name }}/fullchain.pem;
|
||||||
ssl_certificate_key /etc/letsencrypt/live/git.{{ server_name_default }}/privkey.pem;
|
ssl_certificate_key /etc/letsencrypt/live/git.{{ pod_charlesreid1_server_name }}/privkey.pem;
|
||||||
include /etc/letsencrypt/options-ssl-nginx.conf;
|
include /etc/letsencrypt/options-ssl-nginx.conf;
|
||||||
include /etc/nginx/conf.d/secheaders.conf;
|
include /etc/nginx/conf.d/secheaders.conf;
|
||||||
include /etc/nginx/conf.d/giteacsp.conf;
|
include /etc/nginx/conf.d/giteacsp.conf;
|
||||||
|
|
||||||
location / {
|
location / {
|
||||||
|
# Apply the rate limit here.
|
||||||
|
# Allows a burst of 20 requests, but anything beyond the max is queued.
|
||||||
|
limit_req zone=gitealimit burst=20 nodelay;
|
||||||
|
# Limit download rate to 500 KB/s per connection (4 Mbps)
|
||||||
|
limit_rate 500k;
|
||||||
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
proxy_set_header X-Forwarded-For $remote_addr;
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
proxy_set_header Host $host;
|
proxy_set_header Host $host;
|
||||||
proxy_pass http://stormy_gitea:3000/;
|
proxy_pass http://stormy_gitea:3000/;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
location = /robots.txt {
|
||||||
|
alias /var/www/robots/gitea.txt;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
37
d-nginx-charlesreid1/nginx.conf
Normal file
37
d-nginx-charlesreid1/nginx.conf
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
user nginx;
|
||||||
|
worker_processes auto;
|
||||||
|
|
||||||
|
error_log /var/log/nginx/error.log notice;
|
||||||
|
pid /run/nginx.pid;
|
||||||
|
|
||||||
|
|
||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
http {
|
||||||
|
|
||||||
|
# Gitea rate limiting:
|
||||||
|
# 5 requests per second rate limit
|
||||||
|
limit_req_zone $binary_remote_addr zone=gitealimit:10m rate=5r/s;
|
||||||
|
|
||||||
|
include /etc/nginx/mime.types;
|
||||||
|
default_type application/octet-stream;
|
||||||
|
|
||||||
|
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||||
|
'$status $body_bytes_sent "$http_referer" '
|
||||||
|
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||||
|
|
||||||
|
access_log /var/log/nginx/access.log main;
|
||||||
|
|
||||||
|
sendfile on;
|
||||||
|
#tcp_nopush on;
|
||||||
|
|
||||||
|
keepalive_timeout 65;
|
||||||
|
|
||||||
|
#gzip on;
|
||||||
|
|
||||||
|
include /etc/nginx/conf.d/*.conf;
|
||||||
|
}
|
||||||
|
|
||||||
16
d-nginx-charlesreid1/robots/gitea.txt
Normal file
16
d-nginx-charlesreid1/robots/gitea.txt
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
User-agent: *
|
||||||
|
Disallow: */commit/*
|
||||||
|
Disallow: */src/*
|
||||||
|
Disallow: */tree/*
|
||||||
|
Disallow: */activity/*
|
||||||
|
Disallow: */wiki/*
|
||||||
|
Disallow: */releases/*
|
||||||
|
Disallow: */pulls/*
|
||||||
|
Disallow: */stars
|
||||||
|
Disallow: */watchers
|
||||||
|
Disallow: */forks
|
||||||
|
Disallow: *?tab=activity
|
||||||
|
Disallow: *?tab=stars
|
||||||
|
Disallow: *?tab=following
|
||||||
|
Disallow: *?tab=followers
|
||||||
|
Disallow: *?lang=*
|
||||||
2
d-nginx-charlesreid1/robots/robots.txt
Normal file
2
d-nginx-charlesreid1/robots/robots.txt
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
User-agent: *
|
||||||
|
Disallow: /w/
|
||||||
@@ -5,7 +5,7 @@ services:
|
|||||||
# https://stackoverflow.com/a/39039830
|
# https://stackoverflow.com/a/39039830
|
||||||
|
|
||||||
stormy_gitea:
|
stormy_gitea:
|
||||||
image: gitea/gitea:latest
|
image: gitea/gitea:1.24.5
|
||||||
container_name: stormy_gitea
|
container_name: stormy_gitea
|
||||||
environment:
|
environment:
|
||||||
- USER_UID=1000
|
- USER_UID=1000
|
||||||
@@ -13,6 +13,7 @@ services:
|
|||||||
restart: always
|
restart: always
|
||||||
volumes:
|
volumes:
|
||||||
- "stormy_gitea_data:/data"
|
- "stormy_gitea_data:/data"
|
||||||
|
- "./d-nginx-charlesreid1/robots:/var/www/robots:ro"
|
||||||
- "./d-gitea/custom:/data/gitea"
|
- "./d-gitea/custom:/data/gitea"
|
||||||
- "./d-gitea/data:/app/gitea/data"
|
- "./d-gitea/data:/app/gitea/data"
|
||||||
- "/gitea_repositories:/data/git/repositories"
|
- "/gitea_repositories:/data/git/repositories"
|
||||||
@@ -23,53 +24,68 @@ services:
|
|||||||
max-file: "10"
|
max-file: "10"
|
||||||
ports:
|
ports:
|
||||||
- "22:22"
|
- "22:22"
|
||||||
|
networks:
|
||||||
|
- frontend
|
||||||
|
|
||||||
stormy_mysql:
|
stormy_mysql:
|
||||||
|
restart: always
|
||||||
build: d-mysql
|
build: d-mysql
|
||||||
container_name: stormy_mysql
|
container_name: stormy_mysql
|
||||||
volumes:
|
volumes:
|
||||||
- "stormy_mysql_data:/var/lib/mysql"
|
- "stormy_mysql_data:/var/lib/mysql"
|
||||||
|
- "./d-mysql/conf.d:/etc/mysql/conf.d:ro"
|
||||||
logging:
|
logging:
|
||||||
driver: "json-file"
|
driver: "json-file"
|
||||||
options:
|
options:
|
||||||
max-size: 1m
|
max-size: 1m
|
||||||
max-file: "10"
|
max-file: "10"
|
||||||
environment:
|
environment:
|
||||||
- MYSQL_ROOT_PASSWORD={{ mysql_password }}
|
- MYSQL_ROOT_PASSWORD={{ pod_charlesreid1_mysql_password }}
|
||||||
|
- MYSQL_DATABASE=wikidb
|
||||||
|
- MYSQL_USER=wikiuser
|
||||||
|
- MYSQL_PASSWORD={{ pod_charlesreid1_mysql_wikiuser_password }}
|
||||||
|
networks:
|
||||||
|
- backend
|
||||||
|
|
||||||
stormy_mw:
|
stormy_mw:
|
||||||
|
restart: always
|
||||||
build: d-mediawiki
|
build: d-mediawiki
|
||||||
container_name: stormy_mw
|
container_name: stormy_mw
|
||||||
volumes:
|
volumes:
|
||||||
- "stormy_mw_data:/var/www/html"
|
- "stormy_mw_data:/var/www/html"
|
||||||
- "./mwf2b:/var/log/mwf2b"
|
|
||||||
logging:
|
logging:
|
||||||
driver: "json-file"
|
driver: "json-file"
|
||||||
options:
|
options:
|
||||||
max-size: 1m
|
max-size: 1m
|
||||||
max-file: "10"
|
max-file: "10"
|
||||||
environment:
|
environment:
|
||||||
- MEDIAWIKI_SITE_SERVER=https://{{ server_name_default }}
|
- MEDIAWIKI_SITE_SERVER=https://{{ pod_charlesreid1_server_name }}
|
||||||
- MEDIAWIKI_SECRETKEY={{ mediawiki_secretkey }}
|
- MEDIAWIKI_SECRETKEY={{ pod_charlesreid1_mediawiki_secretkey }}
|
||||||
|
- MEDIAWIKI_UPGRADEKEY={{ pod_charlesreid1_mediawiki_upgradekey }}
|
||||||
- MYSQL_HOST=stormy_mysql
|
- MYSQL_HOST=stormy_mysql
|
||||||
- MYSQL_DATABASE=wikidb
|
- MYSQL_DATABASE=wikidb
|
||||||
- MYSQL_USER=root
|
- MYSQL_USER=wikiuser
|
||||||
- MYSQL_PASSWORD={{ mysql_password }}
|
- MYSQL_PASSWORD={{ pod_charlesreid1_mysql_wikiuser_password }}
|
||||||
depends_on:
|
depends_on:
|
||||||
- stormy_mysql
|
- stormy_mysql
|
||||||
|
networks:
|
||||||
|
- frontend
|
||||||
|
- backend
|
||||||
|
|
||||||
stormy_nginx:
|
stormy_nginx:
|
||||||
restart: always
|
restart: always
|
||||||
image: nginx
|
image: nginx:1.27.5
|
||||||
container_name: stormy_nginx
|
container_name: stormy_nginx
|
||||||
hostname: {{ server_name_default }}
|
hostname: {{ pod_charlesreid1_server_name }}
|
||||||
hostname: charlesreid1.com
|
|
||||||
command: /bin/bash -c "nginx -g 'daemon off;'"
|
command: /bin/bash -c "nginx -g 'daemon off;'"
|
||||||
volumes:
|
volumes:
|
||||||
|
- "./d-nginx-charlesreid1/nginx.conf:/etc/nginx/nginx.conf:ro"
|
||||||
- "./d-nginx-charlesreid1/conf.d:/etc/nginx/conf.d:ro"
|
- "./d-nginx-charlesreid1/conf.d:/etc/nginx/conf.d:ro"
|
||||||
|
- "./d-nginx-charlesreid1/robots:/var/www/robots:ro"
|
||||||
- "/etc/localtime:/etc/localtime:ro"
|
- "/etc/localtime:/etc/localtime:ro"
|
||||||
- "/etc/letsencrypt:/etc/letsencrypt"
|
- "/etc/letsencrypt:/etc/letsencrypt:ro"
|
||||||
- "/www/{{ server_name_default }}/htdocs:/www/{{ server_name_default }}/htdocs:ro"
|
- "/www/{{ pod_charlesreid1_server_name }}/htdocs:/www/{{ pod_charlesreid1_server_name }}/htdocs:ro"
|
||||||
|
- "stormy_nginx_logs:/var/log/nginx"
|
||||||
logging:
|
logging:
|
||||||
driver: "json-file"
|
driver: "json-file"
|
||||||
options:
|
options:
|
||||||
@@ -82,8 +98,15 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "80:80"
|
- "80:80"
|
||||||
- "443:443"
|
- "443:443"
|
||||||
|
networks:
|
||||||
|
- frontend
|
||||||
|
|
||||||
|
networks:
|
||||||
|
frontend:
|
||||||
|
backend:
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
stormy_mysql_data:
|
stormy_mysql_data:
|
||||||
stormy_mw_data:
|
stormy_mw_data:
|
||||||
stormy_gitea_data:
|
stormy_gitea_data:
|
||||||
|
stormy_nginx_logs:
|
||||||
|
|||||||
9
docs/BlockIps.md
Normal file
9
docs/BlockIps.md
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
To block IP address:
|
||||||
|
|
||||||
|
* Modify the nginx config file template at
|
||||||
|
`d-nginx-charlesreid1/conf.d/https.DOMAIN.conf.j2`
|
||||||
|
* Re-render the Jinja templates into config files via
|
||||||
|
`make clean-templates && make templates`
|
||||||
|
* Stop and restart the pod service:
|
||||||
|
`sudo systemctl stop pod-charlesreid1 &&
|
||||||
|
sudo systemctl start pod-charlesreid1`
|
||||||
@@ -10,10 +10,12 @@ export POD_CHARLESREID1_USER="nonrootuser"
|
|||||||
# ----------
|
# ----------
|
||||||
export POD_CHARLESREID1_MW_ADMIN_EMAIL="email@example.com"
|
export POD_CHARLESREID1_MW_ADMIN_EMAIL="email@example.com"
|
||||||
export POD_CHARLESREID1_MW_SECRET_KEY="SecretKeyString"
|
export POD_CHARLESREID1_MW_SECRET_KEY="SecretKeyString"
|
||||||
|
export POD_CHARLESREID1_MW_UPGRADE_KEY="UpgradeKeyString"
|
||||||
|
|
||||||
# mysql:
|
# mysql:
|
||||||
# ------
|
# ------
|
||||||
export POD_CHARLESREID1_MYSQL_PASSWORD="SuperSecretPassword"
|
export POD_CHARLESREID1_MYSQL_PASSWORD="SuperSecretPassword"
|
||||||
|
export POD_CHARLESREID1_MYSQL_WIKIUSER_PASSWORD="AnotherSecretPassword"
|
||||||
|
|
||||||
# gitea:
|
# gitea:
|
||||||
# ------
|
# ------
|
||||||
|
|||||||
36
environment.j2
Normal file
36
environment.j2
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# multiple templates:
|
||||||
|
# -------------------
|
||||||
|
export POD_CHARLESREID1_DIR="{{ pod_charlesreid1_pod_install_dir }}"
|
||||||
|
export POD_CHARLESREID1_TLD="{{ pod_charlesreid1_server_name }}"
|
||||||
|
export POD_CHARLESREID1_USER="{{ pod_charlesreid1_username }}"
|
||||||
|
export POD_CHARLESREID1_VPN_IP_ADDR="{{ pod_charlesreid1_vpn_ip_addr }}"
|
||||||
|
|
||||||
|
# mediawiki:
|
||||||
|
# ----------
|
||||||
|
export POD_CHARLESREID1_MW_ADMIN_EMAIL="{{ pod_charlesreid1_mediawiki_admin_email }}"
|
||||||
|
export POD_CHARLESREID1_MW_SECRET_KEY="{{ pod_charlesreid1_mediawiki_secretkey }}"
|
||||||
|
|
||||||
|
# mysql:
|
||||||
|
# ------
|
||||||
|
export POD_CHARLESREID1_MYSQL_PASSWORD="{{ pod_charlesreid1_mysql_password }}"
|
||||||
|
export POD_CHARLESREID1_MYSQL_WIKIUSER_PASSWORD="{{ pod_charlesreid1_mysql_wikiuser_password }}"
|
||||||
|
|
||||||
|
# gitea:
|
||||||
|
# ------
|
||||||
|
export POD_CHARLESREID1_GITEA_APP_NAME="{{ pod_charlesreid1_gitea_app_name }}"
|
||||||
|
export POD_CHARLESREID1_GITEA_SECRET_KEY="{{ pod_charlesreid1_gitea_secretkey }}"
|
||||||
|
export POD_CHARLESREID1_GITEA_INTERNAL_TOKEN="{{ pod_charlesreid1_gitea_internaltoken }}"
|
||||||
|
|
||||||
|
# aws:
|
||||||
|
# ----
|
||||||
|
export AWS_ACCESS_KEY_ID="{{ pod_charlesreid1_backups_aws_access_key }}"
|
||||||
|
export AWS_SECRET_ACCESS_KEY="{{ pod_charlesreid1_backups_aws_secret_access_key }}"
|
||||||
|
export AWS_DEFAULT_REGION="{{ pod_charlesreid1_backups_aws_region }}"
|
||||||
|
|
||||||
|
# backups and scripts:
|
||||||
|
# --------------------
|
||||||
|
export POD_CHARLESREID1_BACKUP_DIR="{{ pod_charlesreid1_backups_dir }}"
|
||||||
|
export POD_CHARLESREID1_BACKUP_S3BUCKET="{{ pod_charlesreid1_backups_bucket }}"
|
||||||
|
export POD_CHARLESREID1_CANARY_WEBHOOK="{{ pod_charlesreid1_backups_canary_slack_url }}"
|
||||||
@@ -8,25 +8,28 @@ from jinja2 import Environment, FileSystemLoader, select_autoescape
|
|||||||
|
|
||||||
|
|
||||||
# Should existing files be overwritten
|
# Should existing files be overwritten
|
||||||
OVERWRITE = False
|
OVERWRITE = True
|
||||||
|
|
||||||
# Map of jinja variables to environment variables
|
# Map of jinja variables to environment variables
|
||||||
jinja_to_env = {
|
jinja_to_env = {
|
||||||
"pod_install_dir": "POD_CHARLESREID1_DIR",
|
"pod_charlesreid1_pod_install_dir": "POD_CHARLESREID1_DIR",
|
||||||
"top_domain": "POD_CHARLESREID1_TLD",
|
"pod_charlesreid1_server_name": "POD_CHARLESREID1_TLD",
|
||||||
"server_name_default" : "POD_CHARLESREID1_TLD",
|
"pod_charlesreid1_username": "POD_CHARLESREID1_USER",
|
||||||
"username": "POD_CHARLESREID1_USER",
|
"pod_charlesreid1_vpn_ip_addr": "POD_CHARLESREID1_VPN_IP_ADDR",
|
||||||
# docker-compose:
|
"pod_charlesreid1_mediawiki_admin_email": "POD_CHARLESREID1_MW_ADMIN_EMAIL",
|
||||||
"mysql_password" : "POD_CHARLESREID1_MYSQL_PASSWORD",
|
"pod_charlesreid1_mediawiki_secretkey": "POD_CHARLESREID1_MW_SECRET_KEY",
|
||||||
"mediawiki_secretkey" : "POD_CHARLESREID1_MW_SECRET_KEY",
|
"pod_charlesreid1_mediawiki_upgradekey": "POD_CHARLESREID1_MW_UPGRADE_KEY",
|
||||||
# mediawiki:
|
"pod_charlesreid1_mysql_password": "POD_CHARLESREID1_MYSQL_PASSWORD",
|
||||||
"admin_email": "POD_CHARLESREID1_MW_ADMIN_EMAIL",
|
"pod_charlesreid1_mysql_wikiuser_password": "POD_CHARLESREID1_MYSQL_WIKIUSER_PASSWORD",
|
||||||
# gitea:
|
"pod_charlesreid1_gitea_app_name": "POD_CHARLESREID1_GITEA_APP_NAME",
|
||||||
"gitea_app_name": "POD_CHARLESREID1_GITEA_APP_NAME",
|
"pod_charlesreid1_gitea_secretkey": "POD_CHARLESREID1_GITEA_SECRET_KEY",
|
||||||
"gitea_secret_key": "POD_CHARLESREID1_GITEA_SECRET_KEY",
|
"pod_charlesreid1_gitea_internaltoken": "POD_CHARLESREID1_GITEA_INTERNAL_TOKEN",
|
||||||
"gitea_internal_token": "POD_CHARLESREID1_GITEA_INTERNAL_TOKEN",
|
"pod_charlesreid1_backups_aws_access_key": "AWS_ACCESS_KEY_ID",
|
||||||
# aws:
|
"pod_charlesreid1_backups_aws_secret_access_key": "AWS_SECRET_ACCESS_KEY",
|
||||||
"backup_canary_webhook_url": "POD_CHARLESREID1_CANARY_WEBHOOK",
|
"pod_charlesreid1_backups_aws_region": "AWS_DEFAULT_REGION",
|
||||||
|
"pod_charlesreid1_backups_dir": "POD_CHARLESREID1_BACKUP_DIR",
|
||||||
|
"pod_charlesreid1_backups_bucket": "POD_CHARLESREID1_BACKUP_S3BUCKET",
|
||||||
|
"pod_charlesreid1_backups_canary_slack_url": "POD_CHARLESREID1_CANARY_WEBHOOK",
|
||||||
}
|
}
|
||||||
|
|
||||||
scripts_dir = os.path.dirname(os.path.abspath(__file__))
|
scripts_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
|||||||
28
scripts/backups/10-pod-charlesreid1-rsyslog.conf
Normal file
28
scripts/backups/10-pod-charlesreid1-rsyslog.conf
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
if ( $programname startswith "pod-charlesreid1-canary" ) then {
|
||||||
|
action(type="omfile" file="/var/log/pod-charlesreid1-canary.service.log" flushOnTXEnd="off")
|
||||||
|
stop
|
||||||
|
}
|
||||||
|
if ( $programname startswith "pod-charlesreid1-certbot" ) then {
|
||||||
|
action(type="omfile" file="/var/log/pod-charlesreid1-certbot.service.log" flushOnTXEnd="off")
|
||||||
|
stop
|
||||||
|
}
|
||||||
|
if ( $programname startswith "pod-charlesreid1-backups-aws" ) then {
|
||||||
|
action(type="omfile" file="/var/log/pod-charlesreid1-backups-aws.service.log" flushOnTXEnd="off")
|
||||||
|
stop
|
||||||
|
}
|
||||||
|
if ( $programname startswith "pod-charlesreid1-backups-cleanolderthan" ) then {
|
||||||
|
action(type="omfile" file="/var/log/pod-charlesreid1-backups-cleanolderthan.service.log" flushOnTXEnd="off")
|
||||||
|
stop
|
||||||
|
}
|
||||||
|
if ( $programname startswith "pod-charlesreid1-backups-gitea" ) then {
|
||||||
|
action(type="omfile" file="/var/log/pod-charlesreid1-backups-gitea.service.log" flushOnTXEnd="off")
|
||||||
|
stop
|
||||||
|
}
|
||||||
|
if ( $programname startswith "pod-charlesreid1-backups-wikidb" ) then {
|
||||||
|
action(type="omfile" file="/var/log/pod-charlesreid1-backups-wikidb.service.log" flushOnTXEnd="on")
|
||||||
|
stop
|
||||||
|
}
|
||||||
|
if ( $programname startswith "pod-charlesreid1-backups-wikifiles" ) then {
|
||||||
|
action(type="omfile" file="/var/log/pod-charlesreid1-backups-wikifiles.service.log" flushOnTXEnd="on")
|
||||||
|
stop
|
||||||
|
}
|
||||||
@@ -13,3 +13,40 @@ for the systemd service.
|
|||||||
|
|
||||||
Use `make install` in the top level of this repo to install
|
Use `make install` in the top level of this repo to install
|
||||||
the rendered service and timer files.
|
the rendered service and timer files.
|
||||||
|
|
||||||
|
## syslog filtering
|
||||||
|
|
||||||
|
Due to a bug in systemd bundled with Ubuntu 18.04, we can't just use the nice easy solution of
|
||||||
|
directing output and error to a specific file.
|
||||||
|
|
||||||
|
Instead, the services all send their stderr and stdout to the system log, and then rsyslog
|
||||||
|
filters those messages and collects them into a separate log file.
|
||||||
|
|
||||||
|
First, install the services.
|
||||||
|
|
||||||
|
Then, install the following rsyslog config file:
|
||||||
|
|
||||||
|
`/etc/rsyslog.d/10-pod-charlesreid1-rsyslog.conf`:
|
||||||
|
|
||||||
|
```
|
||||||
|
if $programname == 'pod-charlesreid1-canary' then /var/log/pod-charlesreid1-canary.service.log
|
||||||
|
if $programname == 'pod-charlesreid1-canary' then stop
|
||||||
|
|
||||||
|
if $programname == 'pod-charlesreid1-backups-aws' then /var/log/pod-charlesreid1-backups-aws.service.log
|
||||||
|
if $programname == 'pod-charlesreid1-backups-aws' then stop
|
||||||
|
|
||||||
|
if $programname == 'pod-charlesreid1-backups-cleanolderthan' then /var/log/pod-charlesreid1-backups-cleanolderthan.service.log
|
||||||
|
if $programname == 'pod-charlesreid1-backups-cleanolderthan' then stop
|
||||||
|
|
||||||
|
if $programname == 'pod-charlesreid1-backups-gitea' then /var/log/pod-charlesreid1-backups-gitea.service.log
|
||||||
|
if $programname == 'pod-charlesreid1-backups-gitea' then stop
|
||||||
|
|
||||||
|
if $programname == 'pod-charlesreid1-backups-wikidb' then /var/log/pod-charlesreid1-backups-wikidb.service.log
|
||||||
|
if $programname == 'pod-charlesreid1-backups-wikidb' then stop
|
||||||
|
|
||||||
|
if $programname == 'pod-charlesreid1-backups-wikifiles' then /var/log/pod-charlesreid1-backups-wikifiles.service.log
|
||||||
|
if $programname == 'pod-charlesreid1-backups-wikifiles' then stop
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -37,22 +37,22 @@ if [ "$#" == "0" ]; then
|
|||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
echo "Checking that directory exists"
|
echo "Checking that directory exists"
|
||||||
/usr/bin/test -d ${POD_CHARLESREID1_BACKUP_DIR}
|
/usr/bin/test -d "${POD_CHARLESREID1_BACKUP_DIR}"
|
||||||
|
|
||||||
echo "Checking that we can access the S3 bucket"
|
echo "Checking that we can access the S3 bucket"
|
||||||
aws s3 ls s3://${POD_CHARLESREID1_BACKUP_S3BUCKET} > /dev/null
|
aws s3 ls "s3://${POD_CHARLESREID1_BACKUP_S3BUCKET}" > /dev/null
|
||||||
|
|
||||||
# Get name of last backup, to copy to AWS
|
# Get name of last backup, to copy to AWS
|
||||||
LAST_BACKUP=$(/bin/ls -1 -t ${POD_CHARLESREID1_BACKUP_DIR} | /usr/bin/head -n1)
|
LAST_BACKUP=$(/bin/ls -1 -t "${POD_CHARLESREID1_BACKUP_DIR}" | /usr/bin/head -n1)
|
||||||
echo "Last backup found: ${LAST_BACKUP}"
|
echo "Last backup found: ${LAST_BACKUP}"
|
||||||
echo "Last backup directory: ${POD_CHARLESREID1_BACKUP_DIR}/${LAST_BACKUP}"
|
echo "Last backup directory: ${POD_CHARLESREID1_BACKUP_DIR}/${LAST_BACKUP}"
|
||||||
|
|
||||||
BACKUP_SIZE=$(du -hs ${POD_CHARLESREID1_BACKUP_DIR}/${LAST_BACKUP})
|
BACKUP_SIZE=$(/usr/bin/du -hs "${POD_CHARLESREID1_BACKUP_DIR}/${LAST_BACKUP}" | cut -f 1)
|
||||||
echo "Backup directory size: ${BACKUP_SIZE}"
|
echo "Backup directory size: ${BACKUP_SIZE}"
|
||||||
|
|
||||||
# Copy to AWS
|
# Copy to AWS
|
||||||
echo "Backing up directory ${POD_CHARLESREID1_BACKUP_DIR}/${LAST_BACKUP}"
|
echo "Backing up directory ${POD_CHARLESREID1_BACKUP_DIR}/${LAST_BACKUP}"
|
||||||
aws s3 cp --only-show-errors --recursive ${POD_CHARLESREID1_BACKUP_DIR}/${LAST_BACKUP} s3://${POD_CHARLESREID1_BACKUP_S3BUCKET}/backups/${LAST_BACKUP}
|
aws s3 cp --only-show-errors --no-progress --recursive "${POD_CHARLESREID1_BACKUP_DIR}/${LAST_BACKUP}" "s3://${POD_CHARLESREID1_BACKUP_S3BUCKET}/backups/${LAST_BACKUP}"
|
||||||
echo "Done."
|
echo "Done."
|
||||||
|
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ def main():
|
|||||||
alert(msg)
|
alert(msg)
|
||||||
|
|
||||||
# verify there is a backup newer than N days
|
# verify there is a backup newer than N days
|
||||||
newer_backups = subprocess.getoutput(f'find {backup_dir} -mtime -{N}').split('\n')
|
newer_backups = subprocess.getoutput(f'find {backup_dir}/* -mtime -{N}').split('\n')
|
||||||
if len(newer_backups)==1 and newer_backups[0]=='':
|
if len(newer_backups)==1 and newer_backups[0]=='':
|
||||||
msg = "Local Backups Error:\n"
|
msg = "Local Backups Error:\n"
|
||||||
msg += f"The backup directory `{backup_dir}` is missing backup files from the last {N} day(s)!"
|
msg += f"The backup directory `{backup_dir}` is missing backup files from the last {N} day(s)!"
|
||||||
@@ -35,7 +35,7 @@ def main():
|
|||||||
newest_backup_files = subprocess.getoutput(f'find {newest_backup_path} -type f').split('\n')
|
newest_backup_files = subprocess.getoutput(f'find {newest_backup_path} -type f').split('\n')
|
||||||
|
|
||||||
# verify the most recent backup directory is not empty
|
# verify the most recent backup directory is not empty
|
||||||
if len(newest_backup_files)==1 and newer_backups[0]=='':
|
if len(newest_backup_files)==1 and newest_backup_files[0]=='':
|
||||||
msg = "Local Backups Error:\n"
|
msg = "Local Backups Error:\n"
|
||||||
msg += f"The most recent backup directory `{newest_backup_path}` is empty!"
|
msg += f"The most recent backup directory `{newest_backup_path}` is empty!"
|
||||||
alert(msg)
|
alert(msg)
|
||||||
@@ -48,6 +48,24 @@ def main():
|
|||||||
msg += f"Backup file name: {backup_file}!"
|
msg += f"Backup file name: {backup_file}!"
|
||||||
alert(msg)
|
alert(msg)
|
||||||
|
|
||||||
|
# verify .sql dumps end with the mysqldump completion trailer.
|
||||||
|
# A non-empty file can still be truncated mid-row (e.g. PTY deadlock,
|
||||||
|
# net_write_timeout) — without this check, a 439 MB partial dump looks
|
||||||
|
# healthy to a size-only canary.
|
||||||
|
for backup_file in newest_backup_files:
|
||||||
|
if not backup_file.endswith('.sql'):
|
||||||
|
continue
|
||||||
|
with open(backup_file, 'rb') as f:
|
||||||
|
f.seek(0, os.SEEK_END)
|
||||||
|
f.seek(max(0, f.tell() - 512))
|
||||||
|
tail = f.read()
|
||||||
|
if b'Dump completed on' not in tail:
|
||||||
|
msg = "Local Backups Error:\n"
|
||||||
|
msg += f"SQL backup file `{backup_file}` is missing the "
|
||||||
|
msg += "`-- Dump completed on ...` trailer.\n"
|
||||||
|
msg += "mysqldump did not finish — the dump is truncated and not restorable."
|
||||||
|
alert(msg)
|
||||||
|
|
||||||
# verify the most recent backup files exist in the s3 backups bucket
|
# verify the most recent backup files exist in the s3 backups bucket
|
||||||
bucket_base_path = os.path.join('backups', newest_backup_name)
|
bucket_base_path = os.path.join('backups', newest_backup_name)
|
||||||
for backup_file in newest_backup_files:
|
for backup_file in newest_backup_files:
|
||||||
@@ -64,10 +82,12 @@ def check_exists(bucket_name, bucket_path):
|
|||||||
# File does not exist
|
# File does not exist
|
||||||
msg = "S3 Backups Error:\n"
|
msg = "S3 Backups Error:\n"
|
||||||
msg += f"Failed to find the file `{bucket_path}` in bucket `{bucket_name}`"
|
msg += f"Failed to find the file `{bucket_path}` in bucket `{bucket_name}`"
|
||||||
|
alert(msg)
|
||||||
else:
|
else:
|
||||||
# Problem accessing backups on bucket
|
# Problem accessing backups on bucket
|
||||||
msg = "S3 Backups Error:\n"
|
msg = "S3 Backups Error:\n"
|
||||||
msg += f"Failed to access the file `{bucket_path}` in bucket `{bucket_name}`"
|
msg += f"Failed to access the file `{bucket_path}` in bucket `{bucket_name}`"
|
||||||
|
alert(msg)
|
||||||
|
|
||||||
|
|
||||||
def alert(msg):
|
def alert(msg):
|
||||||
@@ -97,7 +117,7 @@ def alert(msg):
|
|||||||
raise Exception(response.status_code, response.text)
|
raise Exception(response.status_code, response.text)
|
||||||
|
|
||||||
print("Goodbye.")
|
print("Goodbye.")
|
||||||
sys.exit(1)
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -5,9 +5,10 @@ After=docker.service
|
|||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=oneshot
|
Type=oneshot
|
||||||
StandardError=file:{{ pod_install_dir }}/.pod-charlesreid1-canary.service.error.log
|
StandardError=syslog
|
||||||
StandardOutput=file:{{ pod_install_dir }}/.pod-charlesreid1-canary.service.output.log
|
StandardOutput=syslog
|
||||||
ExecStart=/bin/bash -ac '. {{ pod_install_dir }}/environment; {{ pod_install_dir }}/scripts/backups/canary/vp/bin/python3 {{ pod_install_dir }}/scripts/backups/canary/backups_canary.py'
|
SyslogIdentifier=pod-charlesreid1-canary
|
||||||
|
ExecStartPre=/usr/bin/test -f {{ pod_charlesreid1_pod_install_dir }}/environment
|
||||||
|
ExecStart=/bin/bash -ac '. {{ pod_charlesreid1_pod_install_dir }}/environment; /home/charles/.pyenv/shims/python3 {{ pod_charlesreid1_pod_install_dir }}/scripts/backups/canary/backups_canary.py'
|
||||||
User=charles
|
User=charles
|
||||||
Group=charles
|
Group=charles
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
Description=Timer to run the pod-charlesreid1 backups canary
|
Description=Timer to run the pod-charlesreid1 backups canary
|
||||||
|
|
||||||
[Timer]
|
[Timer]
|
||||||
OnCalendar=Sun *-*-* 9:03:00
|
OnCalendar=*-*-* 7:01:00
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=timers.target
|
WantedBy=timers.target
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ set -eux
|
|||||||
|
|
||||||
# Number of days of backups to retain.
|
# Number of days of backups to retain.
|
||||||
# Everything older than this many days will be deleted
|
# Everything older than this many days will be deleted
|
||||||
N="45"
|
N="22"
|
||||||
|
|
||||||
function usage {
|
function usage {
|
||||||
set +x
|
set +x
|
||||||
@@ -39,7 +39,7 @@ if [ "$#" == "0" ]; then
|
|||||||
echo "Backup directory: ${POD_CHARLESREID1_BACKUP_DIR}"
|
echo "Backup directory: ${POD_CHARLESREID1_BACKUP_DIR}"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
echo "Cleaning backups directory $BACKUP_DIR"
|
echo "Cleaning backups directory $POD_CHARLESREID1_BACKUP_DIR"
|
||||||
echo "The following files older than $N days will be deleted:"
|
echo "The following files older than $N days will be deleted:"
|
||||||
find ${POD_CHARLESREID1_BACKUP_DIR} -mtime +${N}
|
find ${POD_CHARLESREID1_BACKUP_DIR} -mtime +${N}
|
||||||
|
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ if [ "$#" == "0" ]; then
|
|||||||
# We don't need to use docker, since these directories
|
# We don't need to use docker, since these directories
|
||||||
# are both bind-mounted into the Docker container
|
# are both bind-mounted into the Docker container
|
||||||
echo "Backing up custom directory"
|
echo "Backing up custom directory"
|
||||||
tar czf ${CUSTOM_TARGET} ${POD_CHARLESREID1_DIR}/d-gitea/custom
|
tar --exclude='gitea.log' --ignore-failed-read -czf ${CUSTOM_TARGET} ${POD_CHARLESREID1_DIR}/d-gitea/custom
|
||||||
echo "Backing up data directory"
|
echo "Backing up data directory"
|
||||||
tar czf ${DATA_TARGET} ${POD_CHARLESREID1_DIR}/d-gitea/data
|
tar czf ${DATA_TARGET} ${POD_CHARLESREID1_DIR}/d-gitea/data
|
||||||
|
|
||||||
|
|||||||
@@ -5,10 +5,10 @@ After=docker.service
|
|||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=oneshot
|
Type=oneshot
|
||||||
StandardError=file:{{ pod_install_dir }}/.pod-charlesreid1-backups-aws.service.error.log
|
StandardError=syslog
|
||||||
StandardOutput=file:{{ pod_install_dir }}/.pod-charlesreid1-backups-aws.service.output.log
|
StandardOutput=syslog
|
||||||
ExecStartPre=/usr/bin/test -f {{ pod_install_dir }}/environment
|
SyslogIdentifier=pod-charlesreid1-backups-aws
|
||||||
ExecStart=/bin/bash -ac '. {{ pod_install_dir }}/environment; {{ pod_install_dir }}/scripts/backups/aws_backup.sh'
|
ExecStartPre=/usr/bin/test -f {{ pod_charlesreid1_pod_install_dir }}/environment
|
||||||
|
ExecStart=/bin/bash -ac '. {{ pod_charlesreid1_pod_install_dir }}/environment; {{ pod_charlesreid1_pod_install_dir }}/scripts/backups/aws_backup.sh'
|
||||||
User=charles
|
User=charles
|
||||||
Group=charles
|
Group=charles
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ Description=Timer to copy the lastest pod-charlesreid1 backup to an S3 bucket
|
|||||||
|
|
||||||
[Timer]
|
[Timer]
|
||||||
OnCalendar=Sun *-*-* 2:56:00
|
OnCalendar=Sun *-*-* 2:56:00
|
||||||
|
#OnCalendar=*-*-* 2:56:00
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=timers.target
|
WantedBy=timers.target
|
||||||
|
|||||||
@@ -1,12 +1,14 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=Copy the latest pod-charlesreid1 backup to an S3 bucket
|
Description=Clean pod-charlesreid1 backups older than N days
|
||||||
Requires=docker.service
|
Requires=docker.service
|
||||||
After=docker.service
|
After=docker.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=oneshot
|
Type=oneshot
|
||||||
StandardError=file:{{ pod_install_dir }}/.pod-charlesreid1-backups-cleanolderthan.service.error.log
|
StandardError=syslog
|
||||||
StandardOutput=file:{{ pod_install_dir }}/.pod-charlesreid1-backups-cleanolderthan.service.output.log
|
StandardOutput=syslog
|
||||||
ExecStart=/bin/bash -ac '. {{ pod_install_dir }}/environment; {{ pod_install_dir }}/scripts/backups/clean_olderthan.sh'
|
SyslogIdentifier=pod-charlesreid1-backups-cleanolderthan
|
||||||
|
ExecStartPre=/usr/bin/test -f {{ pod_charlesreid1_pod_install_dir }}/environment
|
||||||
|
ExecStart=/bin/bash -ac '. {{ pod_charlesreid1_pod_install_dir }}/environment; {{ pod_charlesreid1_pod_install_dir }}/scripts/backups/clean_olderthan.sh'
|
||||||
User=charles
|
User=charles
|
||||||
Group=charles
|
Group=charles
|
||||||
|
|||||||
@@ -0,0 +1,9 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Timer to clean files older than N days from the pod-charlesreid1 backups dir
|
||||||
|
|
||||||
|
[Timer]
|
||||||
|
OnCalendar=Sun *-*-* 2:28:00
|
||||||
|
#OnCalendar=*-*-* 2:28:00
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target
|
||||||
@@ -5,10 +5,10 @@ After=docker.service
|
|||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=oneshot
|
Type=oneshot
|
||||||
StandardError=file:{{ pod_install_dir }}/.pod-charlesreid1-backups-gitea.service.error.log
|
StandardError=syslog
|
||||||
StandardOutput=file:{{ pod_install_dir }}/.pod-charlesreid1-backups-gitea.service.output.log
|
StandardOutput=syslog
|
||||||
ExecStartPre=/usr/bin/test -f {{ pod_install_dir }}/environment
|
SyslogIdentifier=pod-charlesreid1-backups-gitea
|
||||||
ExecStart=/bin/bash -ac '. {{ pod_install_dir }}/environment; {{ pod_install_dir }}/scripts/backups/gitea_backup.sh'
|
ExecStartPre=/usr/bin/test -f {{ pod_charlesreid1_pod_install_dir }}/environment
|
||||||
|
ExecStart=/bin/bash -ac '. {{ pod_charlesreid1_pod_install_dir }}/environment; {{ pod_charlesreid1_pod_install_dir }}/scripts/backups/gitea_backup.sh'
|
||||||
User=charles
|
User=charles
|
||||||
Group=charles
|
Group=charles
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ Description=Timer to back up pod-charlesreid1 gitea files
|
|||||||
|
|
||||||
[Timer]
|
[Timer]
|
||||||
OnCalendar=Sun *-*-* 2:12:00
|
OnCalendar=Sun *-*-* 2:12:00
|
||||||
|
#OnCalendar=*-*-* 2:12:00
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=timers.target
|
WantedBy=timers.target
|
||||||
|
|||||||
@@ -5,10 +5,10 @@ After=docker.service
|
|||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=oneshot
|
Type=oneshot
|
||||||
StandardError=file:{{ pod_install_dir }}/.pod-charlesreid1-backups-wikidb.service.error.log
|
StandardError=syslog
|
||||||
StandardOutput=file:{{ pod_install_dir }}/.pod-charlesreid1-backups-wikidb.service.output.log
|
StandardOutput=syslog
|
||||||
ExecStartPre=/usr/bin/test -f {{ pod_install_dir }}/environment
|
SyslogIdentifier=pod-charlesreid1-backups-wikidb
|
||||||
ExecStart=/bin/bash -ac '. {{ pod_install_dir }}/environment; {{ pod_install_dir }}/scripts/backups/wikidb_dump.sh'
|
ExecStartPre=/usr/bin/test -f {{ pod_charlesreid1_pod_install_dir }}/environment
|
||||||
|
ExecStart=/bin/bash -ac '. {{ pod_charlesreid1_pod_install_dir }}/environment; {{ pod_charlesreid1_pod_install_dir }}/scripts/backups/wikidb_dump.sh'
|
||||||
User=charles
|
User=charles
|
||||||
Group=charles
|
Group=charles
|
||||||
|
|
||||||
|
|||||||
@@ -1,13 +1,14 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=Back up the pod-charlesreid1 wiki files
|
Description=Back up pod-charlesreid1 wiki files
|
||||||
Requires=docker.service
|
Requires=docker.service
|
||||||
After=docker.service
|
After=docker.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=oneshot
|
Type=oneshot
|
||||||
StandardError=file:{{ pod_install_dir }}/.pod-charlesreid1-backups-wikifiles.service.error.log
|
StandardError=syslog
|
||||||
StandardOutput=file:{{ pod_install_dir }}/.pod-charlesreid1-backups-wikifiles.service.output.log
|
StandardOutput=syslog
|
||||||
ExecStartPre=/usr/bin/test -f {{ pod_install_dir }}/environment
|
SyslogIdentifier=pod-charlesreid1-backups-wikifiles
|
||||||
ExecStart=/bin/bash -ac '. {{ pod_install_dir }}/environment; {{ pod_install_dir }}/scripts/backups/wikifiles_dump.sh'
|
ExecStartPre=/usr/bin/test -f {{ pod_charlesreid1_pod_install_dir }}/environment
|
||||||
|
ExecStart=/bin/bash -ac '. {{ pod_charlesreid1_pod_install_dir }}/environment; {{ pod_charlesreid1_pod_install_dir }}/scripts/backups/wikifiles_dump.sh'
|
||||||
User=charles
|
User=charles
|
||||||
Group=charles
|
Group=charles
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=Timer to back up the pod-charlesreid1 wiki files
|
Description=Timer to back up pod-charlesreid1 wiki files
|
||||||
|
|
||||||
[Timer]
|
[Timer]
|
||||||
OnCalendar=Sun *-*-* 2:08:00
|
OnCalendar=Sun *-*-* 2:08:00
|
||||||
|
|||||||
@@ -5,7 +5,8 @@
|
|||||||
set -eux
|
set -eux
|
||||||
|
|
||||||
CONTAINER_NAME="stormy_mysql"
|
CONTAINER_NAME="stormy_mysql"
|
||||||
STAMP="`date +"%Y%m%d"`"
|
DATESTAMP="`date +"%Y%m%d"`"
|
||||||
|
TIMESTAMP="`date +"%Y%m%d_%H%M%S"`"
|
||||||
|
|
||||||
function usage {
|
function usage {
|
||||||
set +x
|
set +x
|
||||||
@@ -20,7 +21,7 @@ function usage {
|
|||||||
echo "Example:"
|
echo "Example:"
|
||||||
echo ""
|
echo ""
|
||||||
echo " ./wikidb_dump.sh"
|
echo " ./wikidb_dump.sh"
|
||||||
echo " (creates ${POD_CHARLESREID1_BACKUP_DIR}/20200101/wikidb_20200101.sql)"
|
echo " (creates ${POD_CHARLESREID1_BACKUP_DIR}/YYYYMMDD/wikidb_YYYYMMDD_HHMMSS.sql)"
|
||||||
echo ""
|
echo ""
|
||||||
exit 1;
|
exit 1;
|
||||||
}
|
}
|
||||||
@@ -36,26 +37,63 @@ fi
|
|||||||
|
|
||||||
if [ "$#" == "0" ]; then
|
if [ "$#" == "0" ]; then
|
||||||
|
|
||||||
TARGET="wikidb_${STAMP}.sql"
|
TARGET="wikidb_${TIMESTAMP}.sql"
|
||||||
BACKUP_TARGET="${POD_CHARLESREID1_BACKUP_DIR}/${STAMP}/${TARGET}"
|
BACKUP_DIR="${POD_CHARLESREID1_BACKUP_DIR}/${DATESTAMP}"
|
||||||
|
BACKUP_TARGET="${BACKUP_DIR}/${TARGET}"
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "pod-charlesreid1: wikidb_dump.sh"
|
echo "pod-charlesreid1: wikidb_dump.sh"
|
||||||
echo "--------------------------------"
|
echo "--------------------------------"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Backup directory: ${POD_CHARLESREID1_BACKUP_DIR}"
|
echo "Backup directory: ${BACKUP_DIR}"
|
||||||
echo "Backup target: ${BACKUP_TARGET}"
|
echo "Backup target: ${BACKUP_TARGET}"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
mkdir -p ${POD_CHARLESREID1_BACKUP_DIR}/${STAMP}
|
mkdir -p "${BACKUP_DIR}"
|
||||||
|
|
||||||
DOCKER=$(which docker)
|
|
||||||
DOCKERX="${DOCKER} exec -t"
|
|
||||||
|
|
||||||
echo "Running mysqldump inside the mysql container"
|
echo "Running mysqldump inside the mysql container"
|
||||||
${DOCKERX} ${CONTAINER_NAME} sh -c 'exec mysqldump wikidb --databases -uroot -p"$MYSQL_ROOT_PASSWORD"' 2>&1 | grep -v "Using a password" > ${BACKUP_TARGET}
|
|
||||||
|
|
||||||
echo "Done."
|
# Pull the root password out of the container so we don't duplicate the
|
||||||
|
# secret on the host, and forward it in via MYSQL_PWD (which mysqldump
|
||||||
|
# reads automatically). No -t: a PTY corrupts --default-character-set=binary
|
||||||
|
# output (LF→CRLF translation on binary blobs) and its small kernel buffer
|
||||||
|
# can deadlock on large dumps.
|
||||||
|
set +x
|
||||||
|
MYSQL_PWD="$(docker exec "${CONTAINER_NAME}" printenv MYSQL_ROOT_PASSWORD)"
|
||||||
|
export MYSQL_PWD
|
||||||
|
set -x
|
||||||
|
|
||||||
|
docker exec -i \
|
||||||
|
-e MYSQL_PWD \
|
||||||
|
"${CONTAINER_NAME}" \
|
||||||
|
sh -c 'exec mysqldump \
|
||||||
|
--user=root \
|
||||||
|
--single-transaction \
|
||||||
|
--quick \
|
||||||
|
--routines \
|
||||||
|
--triggers \
|
||||||
|
--events \
|
||||||
|
--default-character-set=binary \
|
||||||
|
--databases wikidb' \
|
||||||
|
> "${BACKUP_TARGET}"
|
||||||
|
|
||||||
|
unset MYSQL_PWD
|
||||||
|
|
||||||
|
# A complete mysqldump always ends with "-- Dump completed on ...".
|
||||||
|
# Missing trailer means the dump is truncated and not restorable.
|
||||||
|
if ! tail -c 200 "${BACKUP_TARGET}" | grep -q 'Dump completed on'; then
|
||||||
|
echo "ERROR: dump file ${BACKUP_TARGET} is missing the completion trailer." >&2
|
||||||
|
echo " mysqldump did not finish successfully." >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
size=$(stat -c %s "${BACKUP_TARGET}")
|
||||||
|
if [ "${size}" -lt $((50 * 1024 * 1024)) ]; then
|
||||||
|
echo "ERROR: dump file ${BACKUP_TARGET} is only ${size} bytes; suspicious." >&2
|
||||||
|
exit 3
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Dump OK: ${BACKUP_TARGET} (${size} bytes)"
|
||||||
|
|
||||||
else
|
else
|
||||||
usage
|
usage
|
||||||
|
|||||||
110
scripts/backups/wikidb_restore_test.sh
Executable file
110
scripts/backups/wikidb_restore_test.sh
Executable file
@@ -0,0 +1,110 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Restore a wikidb dump into a throwaway MySQL 5.7 container and run sanity
|
||||||
|
# queries against it. Compares row counts to live stormy_mysql. Exits non-zero
|
||||||
|
# on any failure.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# ./wikidb_restore_test.sh <path-to-dump.sql>
|
||||||
|
#
|
||||||
|
# A backup is only a backup if you have actually restored from it.
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
DUMP="${1:-}"
|
||||||
|
if [ -z "${DUMP}" ] || [ ! -f "${DUMP}" ]; then
|
||||||
|
echo "Usage: $0 <path-to-wikidb-dump.sql>" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
LIVE_CONTAINER="stormy_mysql"
|
||||||
|
TEST_CONTAINER="wikidb_restore_test_$$"
|
||||||
|
TEST_PW="temp_restore_test_pw_$$"
|
||||||
|
IMAGE="mysql:5.7"
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
docker stop "${TEST_CONTAINER}" >/dev/null 2>&1 || true
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
echo "[1/5] Starting throwaway MySQL container ${TEST_CONTAINER}..."
|
||||||
|
docker run -d --rm \
|
||||||
|
--name "${TEST_CONTAINER}" \
|
||||||
|
-e MYSQL_ROOT_PASSWORD="${TEST_PW}" \
|
||||||
|
"${IMAGE}" >/dev/null
|
||||||
|
|
||||||
|
echo "[2/5] Waiting for MySQL to accept authenticated connections..."
|
||||||
|
# `mysqladmin ping` returns OK before the root user is actually set up, so we
|
||||||
|
# have to probe with a real authenticated query and accept only success.
|
||||||
|
ready=0
|
||||||
|
for i in $(seq 1 60); do
|
||||||
|
if docker exec -e MYSQL_PWD="${TEST_PW}" "${TEST_CONTAINER}" \
|
||||||
|
mysql -uroot -e 'SELECT 1' >/dev/null 2>&1; then
|
||||||
|
ready=1
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
if [ "${ready}" -ne 1 ]; then
|
||||||
|
echo "ERROR: MySQL in ${TEST_CONTAINER} never became ready." >&2
|
||||||
|
docker logs "${TEST_CONTAINER}" 2>&1 | tail -20 >&2
|
||||||
|
exit 4
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "[3/5] Piping dump into throwaway MySQL..."
|
||||||
|
docker exec -i -e MYSQL_PWD="${TEST_PW}" "${TEST_CONTAINER}" \
|
||||||
|
mysql -uroot < "${DUMP}"
|
||||||
|
|
||||||
|
echo "[4/5] Querying restored DB..."
|
||||||
|
restored=$(docker exec -e MYSQL_PWD="${TEST_PW}" "${TEST_CONTAINER}" \
|
||||||
|
mysql -uroot -N -B -e "
|
||||||
|
USE wikidb;
|
||||||
|
SELECT COUNT(*) FROM page;
|
||||||
|
SELECT COUNT(*) FROM revision;
|
||||||
|
SELECT COUNT(*) FROM text;
|
||||||
|
SELECT COALESCE(MAX(rev_timestamp), 'none') FROM revision;
|
||||||
|
")
|
||||||
|
|
||||||
|
echo "--- restored ---"
|
||||||
|
echo "${restored}"
|
||||||
|
|
||||||
|
echo "[5/5] Querying live ${LIVE_CONTAINER}..."
|
||||||
|
LIVE_PW="$(docker exec "${LIVE_CONTAINER}" printenv MYSQL_ROOT_PASSWORD)"
|
||||||
|
live=$(docker exec -e MYSQL_PWD="${LIVE_PW}" "${LIVE_CONTAINER}" \
|
||||||
|
mysql -uroot -N -B -e "
|
||||||
|
USE wikidb;
|
||||||
|
SELECT COUNT(*) FROM page;
|
||||||
|
SELECT COUNT(*) FROM revision;
|
||||||
|
SELECT COUNT(*) FROM text;
|
||||||
|
SELECT COALESCE(MAX(rev_timestamp), 'none') FROM revision;
|
||||||
|
")
|
||||||
|
|
||||||
|
echo "--- live ---"
|
||||||
|
echo "${live}"
|
||||||
|
|
||||||
|
r_page=$(echo "${restored}" | sed -n '1p')
|
||||||
|
r_rev=$(echo "${restored}" | sed -n '2p')
|
||||||
|
r_text=$(echo "${restored}" | sed -n '3p')
|
||||||
|
l_page=$(echo "${live}" | sed -n '1p')
|
||||||
|
l_rev=$(echo "${live}" | sed -n '2p')
|
||||||
|
l_text=$(echo "${live}" | sed -n '3p')
|
||||||
|
|
||||||
|
fail=0
|
||||||
|
for kind in page rev text; do
|
||||||
|
r_var="r_${kind}"
|
||||||
|
l_var="l_${kind}"
|
||||||
|
r="${!r_var}"
|
||||||
|
l="${!l_var}"
|
||||||
|
if [ "${r}" != "${l}" ]; then
|
||||||
|
echo "MISMATCH: ${kind} count restored=${r} live=${l}" >&2
|
||||||
|
fail=1
|
||||||
|
else
|
||||||
|
echo "OK: ${kind} count = ${r}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "${fail}" -ne 0 ]; then
|
||||||
|
echo "RESTORE TEST FAILED." >&2
|
||||||
|
exit 5
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "RESTORE TEST PASSED."
|
||||||
@@ -5,7 +5,8 @@
|
|||||||
set -eux
|
set -eux
|
||||||
|
|
||||||
CONTAINER_NAME="stormy_mw"
|
CONTAINER_NAME="stormy_mw"
|
||||||
STAMP="`date +"%Y%m%d"`"
|
DATESTAMP="`date +"%Y%m%d"`"
|
||||||
|
TIMESTAMP="`date +"%Y%m%d_%H%M%S"`"
|
||||||
|
|
||||||
function usage {
|
function usage {
|
||||||
set +x
|
set +x
|
||||||
@@ -20,7 +21,7 @@ function usage {
|
|||||||
echo "Example:"
|
echo "Example:"
|
||||||
echo ""
|
echo ""
|
||||||
echo " ./wikifiles_dump.sh"
|
echo " ./wikifiles_dump.sh"
|
||||||
echo " (creates ${POD_CHARLESREID1_BACKUP_DIR}/20200101/wikifiles_20200101.tar.gz)"
|
echo " (creates ${POD_CHARLESREID1_BACKUP_DIR}/YYYYMMDD/wikifiles_YYYYMMDD_HHMMSS.tar.gz)"
|
||||||
echo ""
|
echo ""
|
||||||
exit 1;
|
exit 1;
|
||||||
}
|
}
|
||||||
@@ -36,18 +37,19 @@ fi
|
|||||||
|
|
||||||
if [ "$#" == "0" ]; then
|
if [ "$#" == "0" ]; then
|
||||||
|
|
||||||
TARGET="wikifiles_${STAMP}.tar.gz"
|
TARGET="wikifiles_${TIMESTAMP}.tar.gz"
|
||||||
BACKUP_TARGET="${POD_CHARLESREID1_BACKUP_DIR}/${STAMP}/${TARGET}"
|
BACKUP_DIR="${POD_CHARLESREID1_BACKUP_DIR}/${DATESTAMP}"
|
||||||
|
BACKUP_TARGET="${BACKUP_DIR}/${TARGET}"
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "pod-charlesreid1: wikifiles_dump.sh"
|
echo "pod-charlesreid1: wikifiles_dump.sh"
|
||||||
echo "-----------------------------------"
|
echo "-----------------------------------"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Backup directory: ${POD_CHARLESREID1_BACKUP_DIR}"
|
echo "Backup directory: ${BACKUP_DIR}"
|
||||||
echo "Backup target: ${BACKUP_TARGET}"
|
echo "Backup target: ${BACKUP_TARGET}"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
mkdir -p ${POD_CHARLESREID1_BACKUP_DIR}/${STAMP}
|
mkdir -p ${BACKUP_DIR}
|
||||||
|
|
||||||
DOCKER=$(which docker)
|
DOCKER=$(which docker)
|
||||||
DOCKERX="${DOCKER} exec -t"
|
DOCKERX="${DOCKER} exec -t"
|
||||||
@@ -62,6 +64,7 @@ if [ "$#" == "0" ]; then
|
|||||||
echo "Step 3: Clean up tar.gz file"
|
echo "Step 3: Clean up tar.gz file"
|
||||||
${DOCKERX} ${CONTAINER_NAME} /bin/rm -f /tmp/${TARGET}
|
${DOCKERX} ${CONTAINER_NAME} /bin/rm -f /tmp/${TARGET}
|
||||||
|
|
||||||
|
echo "Successfully wrote wikifiles dump to file: ${BACKUP_TARGET}"
|
||||||
echo "Done."
|
echo "Done."
|
||||||
|
|
||||||
else
|
else
|
||||||
|
|||||||
47
scripts/backups/wikifiles_restore.sh
Executable file
47
scripts/backups/wikifiles_restore.sh
Executable file
@@ -0,0 +1,47 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Restore wiki files from a tar file
|
||||||
|
# into the stormy_mw container.
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
function usage {
|
||||||
|
echo ""
|
||||||
|
echo "restore_wikifiles.sh script:"
|
||||||
|
echo "Restore wiki files from a tar file"
|
||||||
|
echo "into the stormy_mw container"
|
||||||
|
echo ""
|
||||||
|
echo " ./restore_wikifiles.sh <tar-file>"
|
||||||
|
echo ""
|
||||||
|
echo "Example:"
|
||||||
|
echo ""
|
||||||
|
echo " ./restore_wikifiles.sh /path/to/wikifiles.tar.gz"
|
||||||
|
echo ""
|
||||||
|
echo ""
|
||||||
|
exit 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
# NOTE:
|
||||||
|
# I assume images/ is the only directory to back up/restore.
|
||||||
|
# If there are more I forgot, add them back in here.
|
||||||
|
# (skins and extensions are static, added into image at build time.)
|
||||||
|
|
||||||
|
if [[ "$#" -eq 1 ]];
|
||||||
|
then
|
||||||
|
|
||||||
|
NAME="stormy_mw"
|
||||||
|
TAR=$(basename "$1")
|
||||||
|
|
||||||
|
echo "Checking that container ${NAME} exists"
|
||||||
|
docker ps --format '{{.Names}}' | grep ${NAME} || exit 1;
|
||||||
|
|
||||||
|
echo "Copying dir $1 into container ${NAME}"
|
||||||
|
set -x
|
||||||
|
docker cp $1 ${NAME}:/tmp/${TAR}
|
||||||
|
docker exec -it ${NAME} rm -rf /var/www/html/images.old
|
||||||
|
docker exec -it ${NAME} mv /var/www/html/images /var/www/html/images.old
|
||||||
|
docker exec -it ${NAME} tar -xf /tmp/${TAR} -C / && rm -f /tmp/${TAR}
|
||||||
|
docker exec -it ${NAME} chown -R www-data:www-data /var/www/html/images
|
||||||
|
|
||||||
|
else
|
||||||
|
usage
|
||||||
|
fi
|
||||||
@@ -5,6 +5,8 @@ After=docker.service
|
|||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=oneshot
|
Type=oneshot
|
||||||
StandardError=file:{{ pod_install_dir }}/.pod-charlesreid1-certbot.service.error.log
|
StandardError=syslog
|
||||||
StandardOutput=file:{{ pod_install_dir }}/.pod-charlesreid1-certbot.service.output.log
|
StandardOutput=syslog
|
||||||
ExecStart=/bin/bash -ac '. {{ pod_install_dir }}/environment; {{ pod_install_dir }}/scripts/certbot/renew_charlesreid1_certs.sh'
|
SyslogIdentifier=pod-charlesreid1-certbot
|
||||||
|
ExecStartPre=/usr/bin/test -f {{ pod_charlesreid1_pod_install_dir }}/environment
|
||||||
|
ExecStart=/bin/bash -ac '. {{ pod_charlesreid1_pod_install_dir }}/environment; {{ pod_charlesreid1_pod_install_dir }}/scripts/certbot/renew_charlesreid1_certs.sh'
|
||||||
|
|||||||
@@ -2,8 +2,8 @@
|
|||||||
Description=Timer to renew certificates for pod-charlesreid1
|
Description=Timer to renew certificates for pod-charlesreid1
|
||||||
|
|
||||||
[Timer]
|
[Timer]
|
||||||
# Run on the first Sunday of every month
|
# Run daily
|
||||||
OnCalendar=Sun *-*-01..07 4:03:00
|
OnCalendar=*-*-* 4:03:00
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=timers.target
|
WantedBy=timers.target
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ if [ "$#" == "0" ]; then
|
|||||||
sudo systemctl stop ${SERVICE}
|
sudo systemctl stop ${SERVICE}
|
||||||
|
|
||||||
echo "Stop pod"
|
echo "Stop pod"
|
||||||
docker-compose -f {{ pod_install_dir }}/docker-compose.yml down
|
docker-compose -f {{ pod_charlesreid1_pod_install_dir }}/docker-compose.yml down
|
||||||
|
|
||||||
echo "Run certbot renew"
|
echo "Run certbot renew"
|
||||||
SUBS="git www"
|
SUBS="git www"
|
||||||
@@ -63,7 +63,7 @@ if [ "$#" == "0" ]; then
|
|||||||
done
|
done
|
||||||
|
|
||||||
echo "Start pod"
|
echo "Start pod"
|
||||||
docker-compose -f {{ pod_install_dir }}/docker-compose.yml up -d
|
docker-compose -f {{ pod_charlesreid1_pod_install_dir }}/docker-compose.yml up -d
|
||||||
|
|
||||||
echo "Enable and start system service ${SERVICE}"
|
echo "Enable and start system service ${SERVICE}"
|
||||||
sudo systemctl enable ${SERVICE}
|
sudo systemctl enable ${SERVICE}
|
||||||
|
|||||||
@@ -13,7 +13,9 @@ def clean():
|
|||||||
rname = tname[:-3]
|
rname = tname[:-3]
|
||||||
rpath = os.path.join(tdir, rname)
|
rpath = os.path.join(tdir, rname)
|
||||||
|
|
||||||
if os.path.exists(rpath):
|
ignore_list = ['environment']
|
||||||
|
|
||||||
|
if os.path.exists(rpath) and rname not in ignore_list:
|
||||||
print(f"Removing file {rpath}")
|
print(f"Removing file {rpath}")
|
||||||
os.remove(rpath)
|
os.remove(rpath)
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -11,8 +11,8 @@ directory structure for charlesreid1.com
|
|||||||
content. (Or, charlesreid1.XYZ, whatever.)
|
content. (Or, charlesreid1.XYZ, whatever.)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
SERVER_NAME_DEFAULT = '{{ server_name_default }}'
|
SERVER_NAME_DEFAULT = '{{ pod_charlesreid1_server_name }}'
|
||||||
USERNAME = '{{ username }}'
|
USERNAME = '{{ pod_charlesreid1_username }}'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -10,8 +10,8 @@ This script git pulls the /www directory
|
|||||||
for updating charlesreid1.com content.
|
for updating charlesreid1.com content.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
SERVER_NAME_DEFAULT = '{{ server_name_default }}'
|
SERVER_NAME_DEFAULT = '{{ pod_charlesreid1_server_name }}'
|
||||||
USERNAME = '{{ username }}'
|
USERNAME = '{{ pod_charlesreid1_username }}'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -80,19 +80,5 @@ fi
|
|||||||
|
|
||||||
##############################
|
##############################
|
||||||
|
|
||||||
Extension="Fail2banlog"
|
|
||||||
if [ ! -d ${Extension} ]
|
|
||||||
then
|
|
||||||
git clone https://github.com/charlesreid1-docker/mw-fail2ban.git ${Extension}
|
|
||||||
(
|
|
||||||
cd ${Extension}
|
|
||||||
git checkout master
|
|
||||||
)
|
|
||||||
else
|
|
||||||
echo "Skipping ${Extension}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
##############################
|
|
||||||
|
|
||||||
# fin
|
# fin
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,13 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
#
|
#
|
||||||
# fix LocalSettings.php in the mediawiki container.
|
# fix LocalSettings.php in the mediawiki container.
|
||||||
#
|
|
||||||
# docker is stupid, so it doesn't let you bind mount
|
|
||||||
# a single file into a docker volume.
|
|
||||||
#
|
|
||||||
# so, rather than rebuilding the entire goddamn container
|
|
||||||
# just to update LocalSettings.php when it changes, we just
|
|
||||||
# use a docker cp command to copy it into the container.
|
|
||||||
set -eux
|
set -eux
|
||||||
|
|
||||||
NAME="stormy_mw"
|
NAME="stormy_mw"
|
||||||
|
|||||||
@@ -1,12 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
#
|
#
|
||||||
# fix extensions dir in the mediawiki container
|
# fix extensions dir in the mediawiki container
|
||||||
#
|
|
||||||
# in theory, we should be able to update the
|
|
||||||
# extensions folder in d-mediawiki/charlesreid1-config,
|
|
||||||
# but in reality this falls on its face.
|
|
||||||
# So, we have to fix the fucking extensions directory
|
|
||||||
# ourselves.
|
|
||||||
set -eux
|
set -eux
|
||||||
|
|
||||||
NAME="stormy_mw"
|
NAME="stormy_mw"
|
||||||
|
|||||||
@@ -1,13 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
#
|
#
|
||||||
# fix skins in the mediawiki container.
|
# fix skins in the mediawiki container.
|
||||||
#
|
|
||||||
# docker is stupid, so it doesn't let you bind mount
|
|
||||||
# a single file into a docker volume.
|
|
||||||
#
|
|
||||||
# so, rather than rebuilding the entire goddamn container
|
|
||||||
# just to update the skin when it changes, we just
|
|
||||||
# use a docker cp command to copy it into the container.
|
|
||||||
set -eux
|
set -eux
|
||||||
|
|
||||||
NAME="stormy_mw"
|
NAME="stormy_mw"
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
#
|
#
|
||||||
# Restore wiki files from a tar file
|
# Restore wiki files from a tar file
|
||||||
# into the stormy_mw container.
|
# into the stormy_mw container.
|
||||||
set -eux
|
set -eu
|
||||||
|
|
||||||
function usage {
|
function usage {
|
||||||
echo ""
|
echo ""
|
||||||
@@ -31,16 +31,16 @@ then
|
|||||||
NAME="stormy_mw"
|
NAME="stormy_mw"
|
||||||
TAR=$(basename "$1")
|
TAR=$(basename "$1")
|
||||||
|
|
||||||
echo "Checking that container exists"
|
echo "Checking that container ${NAME} exists"
|
||||||
docker ps --format '{{.Names}}' | grep ${NAME} || exit 1;
|
docker ps --format '{{.Names}}' | grep ${NAME} || exit 1;
|
||||||
|
|
||||||
echo "Copying $1 into container ${NAME}"
|
echo "Copying dir $1 into container ${NAME}"
|
||||||
set -x
|
set -x
|
||||||
docker cp $1 ${NAME}:/tmp/${TAR}
|
docker cp $1 ${NAME}:/tmp/${TAR}
|
||||||
|
docker exec -it ${NAME} rm -rf /var/www/html/images.old
|
||||||
docker exec -it ${NAME} mv /var/www/html/images /var/www/html/images.old
|
docker exec -it ${NAME} mv /var/www/html/images /var/www/html/images.old
|
||||||
docker exec -it ${NAME} tar -xf /tmp/${TAR} -C / && rm -f /tmp/${TAR}
|
docker exec -it ${NAME} tar -xf /tmp/${TAR} -C / && rm -f /tmp/${TAR}
|
||||||
docker exec -it ${NAME} chown -R www-data:www-data /var/www/html/images
|
docker exec -it ${NAME} chown -R www-data:www-data /var/www/html/images
|
||||||
set +x
|
|
||||||
|
|
||||||
else
|
else
|
||||||
usage
|
usage
|
||||||
|
|||||||
@@ -1,35 +1,36 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
echo "this script is deprecated, see ../backups/wikidb_dump.sh"
|
||||||
|
##
|
||||||
|
## Dump a database to an .sql file
|
||||||
|
## from the stormy_mysql container.
|
||||||
|
#set -eu
|
||||||
#
|
#
|
||||||
# Dump a database to an .sql file
|
#function usage {
|
||||||
# from the stormy_mysql container.
|
# echo ""
|
||||||
set -x
|
# echo "dump_database.sh script:"
|
||||||
|
# echo "Dump a database to an .sql file "
|
||||||
function usage {
|
# echo "from the stormy_mysql container."
|
||||||
echo ""
|
# echo ""
|
||||||
echo "dump_database.sh script:"
|
# echo " ./dump_database.sh <sql-dump-file>"
|
||||||
echo "Dump a database to an .sql file "
|
# echo ""
|
||||||
echo "from the stormy_mysql container."
|
# echo "Example:"
|
||||||
echo ""
|
# echo ""
|
||||||
echo " ./dump_database.sh <sql-dump-file>"
|
# echo " ./dump_database.sh /path/to/wikidb_dump.sql"
|
||||||
echo ""
|
# echo ""
|
||||||
echo "Example:"
|
# echo ""
|
||||||
echo ""
|
# exit 1;
|
||||||
echo " ./dump_database.sh /path/to/wikidb_dump.sql"
|
#}
|
||||||
echo ""
|
#
|
||||||
echo ""
|
#CONTAINER_NAME="stormy_mysql"
|
||||||
exit 1;
|
#
|
||||||
}
|
#if [[ "$#" -gt 0 ]];
|
||||||
|
#then
|
||||||
CONTAINER_NAME="stormy_mysql"
|
#
|
||||||
|
# TARGET="$1"
|
||||||
if [[ "$#" -gt 0 ]];
|
# mkdir -p $(dirname $TARGET)
|
||||||
then
|
# set -x
|
||||||
|
# docker exec -i ${CONTAINER_NAME} sh -c 'exec mysqldump wikidb --databases -uroot -p"$MYSQL_ROOT_PASSWORD"' > $TARGET
|
||||||
TARGET="$1"
|
#
|
||||||
mkdir -p $(dirname $TARGET)
|
#else
|
||||||
docker exec -i ${CONTAINER_NAME} sh -c 'exec mysqldump wikidb --databases -uroot -p"$MYSQL_ROOT_PASSWORD"' > $TARGET
|
# usage
|
||||||
|
#fi
|
||||||
else
|
|
||||||
usage
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,7 @@
|
|||||||
# Note that this expects the .sql dump
|
# Note that this expects the .sql dump
|
||||||
# to create its own databases.
|
# to create its own databases.
|
||||||
# Use the --databases flag with mysqldump.
|
# Use the --databases flag with mysqldump.
|
||||||
|
set -eu
|
||||||
|
|
||||||
function usage {
|
function usage {
|
||||||
echo ""
|
echo ""
|
||||||
@@ -42,31 +43,23 @@ function usage {
|
|||||||
# because of all these one-off
|
# because of all these one-off
|
||||||
# "whoopsie we don't do that" problems.
|
# "whoopsie we don't do that" problems.
|
||||||
|
|
||||||
|
if [[ "$#" -eq 1 ]];
|
||||||
|
then
|
||||||
|
|
||||||
CONTAINER_NAME="stormy_mysql"
|
CONTAINER_NAME="stormy_mysql"
|
||||||
TARGET=$(basename $1)
|
TARGET=$(basename $1)
|
||||||
TARGET_DIR=$(dirname $1)
|
TARGET_DIR=$(dirname $1)
|
||||||
|
|
||||||
|
|
||||||
if [[ "$#" -eq 1 ]];
|
|
||||||
then
|
|
||||||
|
|
||||||
# Step 1: Copy the sql dump into the container
|
|
||||||
set -x
|
set -x
|
||||||
|
# Step 1: Copy the sql dump into the container
|
||||||
docker cp $1 ${CONTAINER_NAME}:/tmp/${TARGET}
|
docker cp $1 ${CONTAINER_NAME}:/tmp/${TARGET}
|
||||||
set +x
|
|
||||||
|
|
||||||
# Step 2: Run sqldump inside the container
|
# Step 2: Run sqldump inside the container
|
||||||
set -x
|
|
||||||
docker exec -i ${CONTAINER_NAME} sh -c "/usr/bin/mysql --defaults-file=/root/.mysql.rootpw.cnf < /tmp/${TARGET}"
|
docker exec -i ${CONTAINER_NAME} sh -c "/usr/bin/mysql --defaults-file=/root/.mysql.rootpw.cnf < /tmp/${TARGET}"
|
||||||
set +x
|
|
||||||
|
|
||||||
# Step 3: Clean up sql dump from inside container
|
# Step 3: Clean up sql dump from inside container
|
||||||
set -x
|
docker exec -i ${CONTAINER_NAME} sh -c "/bin/rm -fr /tmp/${TARGET}"
|
||||||
docker exec -i ${CONTAINER_NAME} sh -c "/bin/rm -fr /tmp/${TARGET}.sql"
|
|
||||||
set +x
|
|
||||||
|
|
||||||
|
|
||||||
set +x
|
|
||||||
else
|
else
|
||||||
usage
|
usage
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -7,9 +7,9 @@ After=docker.service
|
|||||||
Restart=always
|
Restart=always
|
||||||
StandardError=null
|
StandardError=null
|
||||||
StandardOutput=null
|
StandardOutput=null
|
||||||
ExecStartPre=/usr/bin/test -f {{ pod_install_dir }}/docker-compose.yml
|
ExecStartPre=/usr/bin/test -f {{ pod_charlesreid1_pod_install_dir }}/docker-compose.yml
|
||||||
ExecStart=/usr/local/bin/docker-compose -f {{ pod_install_dir }}/docker-compose.yml up
|
ExecStart=/usr/local/bin/docker-compose -f {{ pod_charlesreid1_pod_install_dir }}/docker-compose.yml up
|
||||||
ExecStop=/usr/local/bin/docker-compose -f {{ pod_install_dir }}/docker-compose.yml stop
|
ExecStop=/usr/local/bin/docker-compose -f {{ pod_charlesreid1_pod_install_dir }}/docker-compose.yml stop
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=default.target
|
WantedBy=default.target
|
||||||
|
|||||||
Reference in New Issue
Block a user