5 Commits

Author SHA1 Message Date
76c8bdefbd fix broken backup script 2019-11-14 01:04:12 -08:00
3ef8f12310 cat the log 2019-11-14 01:03:50 -08:00
a90ccf2ba1 bump nginx-charlesreid1 submodule 2019-11-14 01:03:20 -08:00
dd1d1841e3 bump mysql version in submodule 2019-11-14 01:00:55 -08:00
da954335bd updating d-gitea submodule 2019-11-14 00:59:19 -08:00
49 changed files with 686 additions and 1175 deletions

12
.gitignore vendored
View File

@@ -2,15 +2,3 @@ site
root.password
docker-compose.yml
*.zip
scripts/output
scripts/forever_loop.py.j2
scripts/forever_loop.service.j2
scripts/executioner.pyc
scripts/git_clone_www.py
scripts/git_pull_www.py
scripts/git_clone_data.py
scripts/git_pull_data.py

8
.gitmodules vendored
View File

@@ -10,12 +10,12 @@
[submodule "d-nginx-charlesreid1"]
path = d-nginx-charlesreid1
url = git@github.com:charlesreid1-docker/d-nginx-charlesreid1.git
[submodule "d-gitea"]
path = d-gitea
url = git@github.com:charlesreid1-docker/d-gitea.git
[submodule "d-python-files"]
path = d-python-files
url = git@github.com:charlesreid1-docker/d-python-files.git
[submodule "mkdocs-material"]
path = mkdocs-material
url = git@github.com:charlesreid1-docker/mkdocs-material.git
[submodule "d-gitea"]
path = d-gitea
url = git@github.com:charlesreid1-docker/d-gitea.git
url = git@github.com:charlesreid1/mkdocs-material.git

View File

@@ -1,42 +0,0 @@
import os, re, sys
from jinja2 import Environment, FileSystemLoader, select_autoescape
"""
Apply Default Values to Jinja Templates
This script applies default values to
docker-compose.yml file.
The template is useful for Ansible,
but this is useful for experiments/one-offs.
"""
# Where templates live
TEMPLATEDIR = '.'
# Where rendered templates will go
OUTDIR = '.'
# Should existing files be overwritten
OVERWRITE = False
env = Environment(loader=FileSystemLoader('.'))
tfile = 'docker-compose.yml.j2'
rfile = 'docker-compose.yml'
content = env.get_template(tfile).render({
"server_name_default" : "charlesreid1.com",
"mediawiki_secretkey" : "asdfqwerty_oiuqoweiruoasdfi",
"mysql_password" : "MySuperSecretPassword"
})
# Write to file
if os.path.exists(rfile) and not OVERWRITE:
raise Exception("Error: file %s already exists!"%(rfile))
else:
with open(rfile,'w') as f:
f.write(content)

Submodule d-gitea updated: 983cd1bf18...f73c8c8258

Submodule d-mysql updated: 20e811f8de...3f8949139e

View File

@@ -1,9 +1,6 @@
version: "3.3"
services:
# Note: depends_on is from here
# https://stackoverflow.com/a/39039830
stormy_gitea:
image: gitea/gitea:latest
environment:
@@ -12,8 +9,9 @@ services:
restart: always
volumes:
- "stormy_gitea_data:/data"
- "./d-gitea/custom:/data/gitea"
- "./d-gitea/data:/app/gitea/data"
- "./d-gitea/custom/conf:/data/gitea/conf"
- "./d-gitea/custom/public:/data/gitea/public"
- "./d-gitea/custom/templates:/data/gitea/templates"
logging:
driver: "json-file"
options:
@@ -44,53 +42,48 @@ services:
max-size: 1m
max-file: "10"
environment:
- MEDIAWIKI_SITE_SERVER=https://{{ server_name_default }}
- MEDIAWIKI_SITE_SERVER=https://charlesreid1.com
- MEDIAWIKI_SECRETKEY={{ mediawiki_secretkey }}
- MYSQL_HOST=stormy_mysql
- MYSQL_HOST=mysql
- MYSQL_DATABASE=wikidb
- MYSQL_USER=root
- MYSQL_PASSWORD={{ mysql_password }}
depends_on:
- stormy_mysql
stormy_nginx:
restart: always
image: nginx
hostname: {{ server_name_default }}
hostname: charlesreid1.com
command: /bin/bash -c "nginx -g 'daemon off;'"
volumes:
- "./d-nginx-charlesreid1/conf.d:/etc/nginx/conf.d:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/etc/letsencrypt:/etc/letsencrypt"
- "/www/{{ server_name_default }}/htdocs:/www/{{ server_name_default }}/htdocs:ro"
- "/www/charlesreid1.blue/htdocs:/www/charlesreid1.blue/htdocs:ro"
- "/www/charlesreid1.red/htdocs:/www/charlesreid1.red/htdocs:ro"
- "/www/charlesreid1.com/htdocs:/www/charlesreid1.com/htdocs:ro"
logging:
driver: "json-file"
options:
max-size: 1m
max-file: "10"
depends_on:
- stormy_mw
- stormy_gitea
#- stormy_files
#- stormy_myadmin
ports:
- "80:80"
- "443:443"
### stormy_files:
### image: python:3.6
### command: bash -c "cd /files && python3 -m http.server 8081"
### volumes:
### - "/www/files.{{ server_name_default }}:/files"
### logging:
### driver: "json-file"
### stormy_myadmin:
### image: "phpmyadmin/phpmyadmin"
### links:
### - stormy_mysql:db
### environment:
### - PMA_ABSOLUTE_URI={{ server_name_default }}/phpMyAdmin
### - PMA_ABSOLUTE_URI=charlesreid1.com/phpMyAdmin
### stormy_files:
### image: python:3.6
### command: bash -c "cd /files && python3 -m http.server 8081"
### volumes:
### - "/www/files:/files"
### logging:
### driver: "json-file"
volumes:
stormy_mysql_data:

View File

@@ -3,16 +3,6 @@
Contains useful scripts for setting up and maintaining
the charlesreid1.com docker pod.
## TODO
Update:
- jinja templates
- apply template scripts
- executioner
- forever tasks and forever loops
## `dockerpod-charlesreid1.service`
This .service script is a systemd startup script that
@@ -44,3 +34,17 @@ This script pulls the latest changes from the
`gh-pages` branch in the `/www/` folder cloned
with the `git_clone_www.sh` script.
## `git_clone_data.sh`
This clones the data repository (under version control
at <https://git.charlesreid1.com/data/charlesreid1>)
into the `/www` folder cloned with the `git_clone_www.sh`
script.
## `git_pull_data.sh`
This script pulls the latest changes to the
charlesreid1-data repository and updates the
`data/` folder in the `/www/charlesreid1.com/htdocs`
folder.

View File

@@ -1,88 +0,0 @@
import os, re, sys
import glob
import subprocess
from jinja2 import Environment, FileSystemLoader, select_autoescape
"""
Apply Default Values to Jinja Templates
This script applies default values to
templates in this folder.
The templates are used by Ansible,
but this script uses the same template
engine as Ansible to apply template
variable values to the template files
and make real files.
only variables are:
- `username` - user/group name to change ownership to
- `pod_install_dir` - installation directory of pod
"""
# Where templates live
TEMPLATEDIR = '.'
# Where rendered templates will go
OUTDIR = 'output'
# Should existing (destination) files
# be overwritten if they exist?
OVERWRITE = True
# Template variables
TV = {
'pod_install_dir': '/home/charles/pod-charlesreid1',
'username': 'charles'
}
def apply_templates(template_dir, output_dir, template_vars, overwrite=False):
"""Apply the template variables
to the template files.
"""
if not os.path.exists(output_dir):
msg = "Error: output dir %s does not exist!"%(output_dir)
raise Exception(msg)
if not os.path.exists(template_dir):
msg = "Error: template dir %s does not exist!"%(output_dir)
raise Exception(msg)
# Jinja env
env = Environment(loader=FileSystemLoader('.'))
# Render templates
template_files = glob.glob('backup_*')
render_files = [re.sub('\.j2','',s) for s in template_files]
for rfile,tfile in zip(render_files,template_files):
# Get rendered template content
content = env.get_template(tfile).render(**template_vars)
# Write to file
dest = os.path.join(output_dir,rfile)
if os.path.exists(dest) and overwrite is False:
msg = "Error: template rendering destination %s already exists!"%(dest)
raise Exception(msg)
with open(dest,'w') as f:
f.write(content)
x = 'executioner.py'
subprocess.call(['cp',x,os.path.join(output_dir,x)])
print("Rendered the following templates:%s\nOutput files:%s\n"%(
"".join(["\n- "+os.path.join(template_dir,j) for j in template_files]),
"".join(["\n- "+os.path.join(output_dir,j) for j in render_files])
))
if __name__=="__main__":
apply_templates(TEMPLATEDIR,OUTDIR,TV,OVERWRITE)

View File

@@ -1,89 +0,0 @@
import os, re, sys
import glob
import subprocess
from jinja2 import Environment, FileSystemLoader, select_autoescape
"""
Apply Default Values to Jinja Templates
This script applies default values to
templates in this folder.
The templates are used by Ansible,
but this script uses the same template
engine as Ansible to apply template
variable values to the template files
and make real files.
only variables are:
- `username` - user/group name to change ownership to
- `server_name_default` - name of server
(e.g., charlesreid1.com or charlesreid1.red)
"""
# Where templates live
TEMPLATEDIR = '.'
# Where rendered templates will go
OUTDIR = 'output'
# Should existing (destination) files
# be overwritten if they exist?
OVERWRITE = True
# Template variables
TV = {
'server_name_default': 'charlesreid1.red',
'username': 'charles'
}
def apply_templates(template_dir, output_dir, template_vars, overwrite=False):
"""Apply the template variables
to the template files.
"""
if not os.path.exists(output_dir):
msg = "Error: output dir %s does not exist!"%(output_dir)
raise Exception(msg)
if not os.path.exists(template_dir):
msg = "Error: template dir %s does not exist!"%(output_dir)
raise Exception(msg)
# Jinja env
env = Environment(loader=FileSystemLoader('.'))
# Render templates
template_files = glob.glob('git_*.py.j2')
render_files = [re.sub('\.j2','',s) for s in template_files]
for rfile,tfile in zip(render_files,template_files):
# Get rendered template content
content = env.get_template(tfile).render(**template_vars)
# Write to file
dest = os.path.join(output_dir,rfile)
if os.path.exists(dest) and overwrite is False:
msg = "Error: template rendering destination %s already exists!"%(dest)
raise Exception(msg)
with open(dest,'w') as f:
f.write(content)
x = 'executioner.py'
subprocess.call(['cp',x,os.path.join(output_dir,x)])
print("Rendered the following templates:%s\nOutput files:%s\n"%(
"".join(["\n- "+os.path.join(template_dir,j) for j in template_files]),
"".join(["\n- "+os.path.join(output_dir,j) for j in render_files])
))
if __name__=="__main__":
apply_templates(TEMPLATEDIR,OUTDIR,TV,OVERWRITE)

View File

@@ -1,82 +0,0 @@
import os, re, sys
import glob
from jinja2 import Environment, FileSystemLoader, select_autoescape
"""
Apply Default Values to Jinja Templates
for pod-charlesreid1 startup service
This script applies default values to
templates in this folder.
The templates are used by Ansible,
but this script uses the same template
engine as Ansible to apply template
variable values to the template files
and make real files.
only variables are:
- `pod_install_dir` - location where pod-charlesreid1 repo is
"""
# Where templates live
TEMPLATEDIR = '.'
# Where rendered templates will go
OUTDIR = 'output'
# Should existing (destination) files
# be overwritten if they exist?
OVERWRITE = True
# Template variables
TV = { 'pod_install_dir' : '/home/charles/pod-charlesreid1' }
def apply_templates(template_dir, output_dir, template_vars, overwrite=False):
"""Apply the template variables
to the template files.
"""
if not os.path.exists(output_dir):
msg = "Error: output dir %s does not exist!"%(output_dir)
raise Exception(msg)
if not os.path.exists(template_dir):
msg = "Error: template dir %s does not exist!"%(output_dir)
raise Exception(msg)
# Jinja env
env = Environment(loader=FileSystemLoader('.'))
# Render templates
template_files = glob.glob('dockerpod*') \
+ glob.glob('databot*')
render_files = [re.sub('\.j2','',s) for s in template_files]
for rfile,tfile in zip(render_files,template_files):
# Get rendered template content
content = env.get_template(tfile).render(**template_vars)
# Write to file
dest = os.path.join(output_dir,rfile)
if os.path.exists(dest) and overwrite is False:
msg = "Error: template rendering destination %s already exists!"%(dest)
raise Exception(msg)
with open(dest,'w') as f:
f.write(content)
print("Rendered the following templates:%s\nOutput files:%s\n"%(
"".join(["\n- "+os.path.join(template_dir,j) for j in template_files]),
"".join(["\n- "+os.path.join(output_dir,j) for j in render_files])
))
if __name__=="__main__":
apply_templates(TEMPLATEDIR,OUTDIR,TV,OVERWRITE)

View File

@@ -1,21 +0,0 @@
# the scripts attic
These four scripts use a git clone or
git pull command that is useful because
it uses a non-standard dot git/clone
directory layout.
However, these do nothing to ensure
that said folders exist and have
correct permissions, and in some
cases these scripts were fed incorrect
info, so better to use the python
templates in the parent directory.
```
git_clone_data.sh
git_clone_www.sh
git_pull_data.sh
git_pull_www.sh
```

View File

@@ -1,75 +0,0 @@
#!/usr/bin/env python3
import subprocess
import os, sys
import time
from datetime import datetime
from executioner import execute
"""
Forever Loop of Gitea Backup Task
Run a forever loop to back up Gitea instance.
"""
def task(pod_dir,username):
work_dir = os.path.join(pod_dir,'scripts')
gitea_dir = os.path.join(pod_dir,'utils-gitea')
backup_dir = os.path.join('/backups','gitea')
def run_cmd(f,cmd):
"""This runs a command and writes the output to a log file."""
f.write("About to run the following command for backup gitea task:\n")
f.write(" $ " + " ".join(cmd))
f.write("\n")
f.write("Command Output:")
for loo in execute(cmd):
f.write(loo)
f.write("\n")
f.write("Done.")
d = datetime.now().strftime('%Y-%m-%d_%H%M')
logfile = '/tmp/backup_gitea_forever_%s.log'%(d)
print("Running task. Log file: %s"%(logfile))
with open(logfile,'w') as f:
py_bin = sys.executable
# Step 1:
# Make sure work dir and backup dir exist
if not os.path.exists(work_dir):
# Quit if working dir does not exist
return
if not os.path.exists(backup_dir):
# Make backup dir if it does not exist
subprocess.call(['mkdir','-p',backup_dir])
subprocess.call(['chown',username+':'+username,backup_dir])
time.sleep(5)
# Step 2:
# Back up gitea to file
backup_script = os.path.join(gitea_dir,'backup_gitea.sh')
backup_target = backup_dir
backup_cmd = [backup_script,backup_target]
run_cmd(f,backup_cmd)
time.sleep(5)
if __name__=="__main__":
# Run a forever loop
time.sleep(5)
while True:
task('{{ pod_install_dir }}','{{ username }}')
fudge = 30
two_weeks = 2*7*24*60*60 - fudge
time.sleep(two_weeks)

View File

@@ -1,11 +0,0 @@
[Unit]
Description=do the gitea backup task with a forever loop
[Service]
Restart=always
ExecStart=/usr/bin/python {{ pod_install_dir }}/scripts/backup_gitea.py
ExecStop=/usr/bin/pgrep -f backup_gitea | /usr/bin/xargs /bin/kill
[Install]
WantedBy=default.target

View File

@@ -1,76 +0,0 @@
#!/usr/bin/env python3
import subprocess
import os, sys
import time
from datetime import datetime
from executioner import execute
"""
Forever Loop of MySQL Backup Task
Run a forever loop to back up MySQL databases.
"""
def task(pod_dir,username):
work_dir = os.path.join(pod_dir,'scripts')
mysql_dir = os.path.join(pod_dir,'utils-mysql')
backup_dir = os.path.join('/backups','mysql')
def run_cmd(f,cmd):
"""This runs a command and writes the output to a log file."""
f.write("About to run the following command for backup mysql task:\n")
f.write(" $ " + " ".join(cmd))
f.write("\n")
f.write("Command Output:")
for loo in execute(cmd):
f.write(loo)
f.write("\n")
f.write("Done.")
d = datetime.now().strftime('%Y-%m-%d_%H%M')
logfile = '/tmp/backup_mysql_forever_%s.log'%(d)
print("Running task. Log file: %s"%(logfile))
with open(logfile,'w') as f:
py_bin = sys.executable
# Step 1:
# Make sure work dir and backup dir exist
if not os.path.exists(work_dir):
# Quit if working dir does not exist
return
if not os.path.exists(backup_dir):
# Make backup dir if it does not exist
subprocess.call(['mkdir','-p',backup_dir])
subprocess.call(['chown',username+':'+username,backup_dir])
time.sleep(5)
# Step 2:
# Back up SQL database to file
backup_script = os.path.join(mysql_dir,'dump_database.sh')
sql_file = 'wikidb_%s.sql'%(d)
backup_target = os.path.join(backup_dir,sql_file)
backup_cmd = [backup_script,backup_target]
run_cmd(f,backup_cmd)
time.sleep(5)
if __name__=="__main__":
# Run a forever loop
time.sleep(10)
while True:
task('{{ pod_install_dir }}','{{ username }}')
fudge = 30
one_week = 7*24*60*60 - fudge
time.sleep(one_week)

View File

@@ -1,11 +0,0 @@
[Unit]
Description=do the mysql backup task with a forever loop
[Service]
Restart=always
ExecStart=/usr/bin/python {{ pod_install_dir }}/scripts/backup_mysql.py
ExecStop=/usr/bin/pgrep -f backup_mysql | /usr/bin/xargs /bin/kill
[Install]
WantedBy=default.target

View File

@@ -1,76 +0,0 @@
#!/usr/bin/env python3
import subprocess
import os, sys
import time
from datetime import datetime
from executioner import execute
"""
Forever Loop of MySQL Backup Task
Run a forever loop to back up MySQL databases.
"""
def task(pod_dir,username):
work_dir = os.path.join(pod_dir,'scripts')
mw_dir = os.path.join(pod_dir,'utils-mw')
backup_dir = os.path.join('/backups','mediawiki')
def run_cmd(f,cmd):
"""This runs a command and writes the output to a log file."""
f.write("About to run the following command for backup mysql task:\n")
f.write(" $ " + " ".join(cmd))
f.write("\n")
f.write("Command Output:")
for loo in execute(cmd):
f.write(loo)
f.write("\n")
f.write("Done.")
d = datetime.now().strftime('%Y-%m-%d_%H%M')
logfile = '/tmp/backup_mw_forever_%s.log'%(d)
print("Running task. Log file: %s"%(logfile))
with open(logfile,'w') as f:
py_bin = sys.executable
# Step 1:
# Make sure work dir and backup dir exist
if not os.path.exists(work_dir):
# Quit if working dir does not exist
return
if not os.path.exists(backup_dir):
# Make backup dir if it does not exist
subprocess.call(['mkdir','-p',backup_dir])
subprocess.call(['chown',username+':'+username,backup_dir])
time.sleep(5)
# Step 2:
# Back up wiki files to tar file
backup_script = os.path.join(mw_dir,'backup_wikifiles.sh')
tar_file = 'wikifiles_%s.tar.gz'%(d)
backup_target = os.path.join(backup_dir,tar_file)
backup_cmd = [backup_script,backup_target]
run_cmd(f,backup_cmd)
time.sleep(5)
if __name__=="__main__":
# Run a forever loop
time.sleep(5)
while True:
task('{{ pod_install_dir }}','{{ username }}')
fudge = 30
one_week = 7*24*60*60 - fudge
time.sleep(one_week)

View File

@@ -1,11 +0,0 @@
[Unit]
Description=do the mediawiki files backup task with a forever loop
[Service]
Restart=always
ExecStart=/usr/bin/python {{ pod_install_dir }}/scripts/backup_wikifiles.py
ExecStop=/usr/bin/pgrep -f backup_wikifiles | /usr/bin/xargs /bin/kill
[Install]
WantedBy=default.target

View File

@@ -1,74 +0,0 @@
#!/usr/bin/env python3
import subprocess
import os, sys
import time
from datetime import datetime
from executioner import execute
"""
Forever Loop of Data Bot (Gitea) Task
Run a forever loop to scrape gitea for
commit counts, commit the new commit counts,
and check them all in.
"""
def task(pod_dir):
work_dir = os.path.join(pod_dir,'utils-gitea')
def run_cmd(f,cmd):
"""
This runs a databot command
and writes the output to a log file.
"""
f.write("About to run the following command for databot forever task:\n")
f.write(" $ " + " ".join(cmd))
f.write("\n")
f.write("Command Output:")
for loo in execute(cmd):
f.write(loo)
f.write("\n")
f.write("Done.")
d = datetime.now().strftime('%Y-m-%d')
with open('/tmp/databot_forever_%s.log'%(d),'w') as f:
py_bin = sys.executable
# Step 1:
# scrape gitea as sudo
scrape_script = os.path.join(work_dir,'scrape_gitea_as_sudo.sh')
scrape_cmd = ['sudo',
py_bin,scrape_script]
run_cmd(scrape_cmd)
time.sleep(5)
# Step 2:
# assemble gitea counts as user
assemble_script = os.path.join(work_dir,'assemble_gitea_counts.sh')
assemble_cmd = ['sudo','-H','-u','charles',
py_bin,assemble_script]
run_cmd(assemble_cmd)
time.sleep(5)
# Step 3:
# gitea pull data as user
pull_script = 'git_pull_data.py.j2'
pull_cmd = ['sudo','-H','-u','charles',
py_bin,pull_script]
run_cmd(pull_cmd)
if __name__=="__main__":
# Run a forever loop
time.sleep(5)
while True:
task('{{ pod_install_dir }}')
time.sleep(5)

View File

@@ -1,10 +0,0 @@
[Unit]
Description=do the databot task with a forever loop
[Service]
Restart=always
ExecStart=/usr/bin/python {{ pod_install_dir }}/scripts/databot_forever.py
ExecStop=/usr/bin/pgrep -f databot_forever | /usr/bin/xargs /bin/kill
[Install]
WantedBy=default.target

View File

@@ -1,21 +0,0 @@
#!/usr/bin/env python3
import subprocess
import os
def execute(cmd):
"""
A function to run a command and return the
lines of output as they are generated,
allowing the calling function to "stream"
the output of the command to print() or etc.
"""
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(p.stdout.readline, ""):
yield stdout_line
p.stdout.close()
err = "".join([j for j in iter(p.stderr.readline,"")])
return_code = p.wait()
if return_code:
yield err
raise subprocess.CalledProcessError(return_code, cmd)

View File

@@ -4,7 +4,17 @@
# master branch to the /www/${DOMAIN}
# directory structure
DOMAIN="{{ server_name_default }}"
if [[ "$#" -eq "0" ]]; then
# default value
DOMAIN="charlesreid1.com"
elif [[ "$#" -eq "1" ]]; then
# user-provided value
DOMAIN="$1"
else
# huh?
echo "git_clone_data.sh takes 0 or 1 input arguments, you provided $#"
exit 1;
fi
REPOURL="https://git.charlesreid1.com/data/charlesreid1-data.git"

View File

@@ -1,78 +0,0 @@
#!/usr/bin/env python3
import subprocess
import os
from executioner import execute
"""
/www Initial Setup
This script sets up the initial /www
directory structure for charlesreid1.com
content. (Or, charlesreid1.XYZ, whatever.)
"""
SERVER_NAME_DEFAULT = '{{ server_name_default }}'
USERNAME = '{{ username }}'
# Set repo urls
### # Use git.charlesreid1.com to clone charlesreid1.com content
### repourl = "https://git.charlesreid1.com/charlesreid1/charlesreid1.com.git"
#
# Use github.com to clone charlesreid1.com content
repourl = "https://github.com/charlesreid1-docker/charlesreid1.com.git"
# Set directory locations
root = '/www'
basedir = os.path.join(root,SERVER_NAME_DEFAULT)
htdocsdir = os.path.join(basedir,'htdocs')
dotgitdir = os.path.join(basedir,'git.htdocs')
# Start by making sure the base directory exists
subprocess.call(['mkdir','-p',basedir])
# Run the clone command, but only if there is no dot git directory yet
# /www/SERVER_NAME_DEFAULT/htdocs
if not os.path.exists(dotgitdir):
if os.path.exists(htdocsdir):
# an htdocs dir with no dot git dir?
# this must be some kind of mistake.
subprocess.call(['rm','-fr',htdocsdir])
# Clone htdocs folder for SERVER_NAME_DEFAULT
clonecmd = ['git','clone',
'--separate-git-dir=%s'%(dotgitdir),
'-b','gh-pages',
repourl, htdocsdir]
print("About to clone /www content for %s using command:\n"%(SERVER_NAME_DEFAULT))
print(" $ " + " ".join(clonecmd))
print("\n")
print("Command Output:")
for loo in execute(clonecmd):
print(loo)
print("\n")
print("Done.")
# Step 2: chown everybody
# Construct chown command
chowncmd = ['chown','-R',
USERNAME+":"+USERNAME,
basedir]
print("About to chown /www directory using command:\n")
print(" $ " + " ".join(chowncmd))
print("\n")
print("Command Output:")
for loo in execute(chowncmd):
print(loo)
print("\n")
print("Done.")

View File

@@ -28,7 +28,7 @@ mkdir -p /www/${DOMAIN}
# /www/<domain>/htdocs does not exist
if [ ! -d "/www/${DOMAIN}/htdocs" ]; then
echo "Cloning repo for ${DOMAIN} to /www"
echo "Cloning repo for ${DOMAIN} to /wwww"
git -C /www/${DOMAIN} \
clone \

View File

@@ -1,67 +0,0 @@
#!/usr/bin/env python3
import subprocess
import os
from executioner import execute
"""
Pull /www
This script git pulls the /www directory
for updating charlesreid1.com content.
"""
SERVER_NAME_DEFAULT = '{{ server_name_default }}'
USERNAME = '{{ username }}'
# Set directory locations
root = '/www'
basedir = os.path.join(root,SERVER_NAME_DEFAULT)
htdocsdir = os.path.join(basedir,'htdocs')
dotgitdir = os.path.join(basedir,'git.htdocs')
# Step 1: git pull
# Run the pull command, but only if
# the htdocs dir already exists
# /www/<domain>/htdocs
if os.path.exists(htdocsdir):
# Git pull, respecting the non-standard
# layout of the .git directory
pullcmd = ['git',
'-C',basedir,
'--git-dir=%s'%(dotgitdir),
'--work-tree=%s'%(htdocsdir),
'pull','origin','gh-pages']
print("About to pull /www content for %s using command:\n"%(SERVER_NAME_DEFAULT))
print(" $ " + " ".join(pullcmd))
print("\n")
print("Command Output:")
for loo in execute(pullcmd):
print(loo)
print("\n")
print("Done.")
# Step 2: chown everybody
# Construct chown command
chowncmd = ['chown','-R',
USERNAME+":"+USERNAME,
basedir]
print("About to chown /www directory using command:\n")
print(" $ " + " ".join(chowncmd))
print("\n")
print("Command Output:")
for loo in execute(chowncmd):
print(loo)
print("\n")
print("Done.")

View File

@@ -1,12 +0,0 @@
the way we're writing to output files,
we can't stream it, because it writes each line
of output from the command as it happens,
but it writes it to an open stream,
which it does not close until the command
is finished.
what if you just want everything to frikin
work like a unix command line utility?
the logging, the flags, the robustness,
the simplicity, the lightweight footprint

33
utils-backup/daily_gitea.sh Executable file
View File

@@ -0,0 +1,33 @@
#!/bin/bash
#
# Just make a daily gitea files backup.
set -x
stamp="`date +"%Y-%m-%d"`"
backup_tool="${HOME}/codes/docker/pod-charlesreid1/utils-gitea/backup_gitea.sh"
backup_dir="/junkinthetrunk/backups/daily/gitea_${stamp}"
backup_target="${backup_dir}"
log_dir="${HOME}/.logs/backups/daily"
log_target="${log_dir}/gitea_${stamp}.log"
mkdir -p ${backup_dir}
mkdir -p ${log_dir}
cat /dev/null > ${log_target}
echo "================================" >> ${log_target}
echo "=== Gitea Files Backup =========" >> ${log_target}
echo "================================" >> ${log_target}
echo "" >> ${log_target}
echo "Backup Utility: ${backup_tool}" >> ${log_target}
echo "Backup Target: ${backup_target}" >> ${log_target}
echo "Log Target: ${log_target}" >> ${log_target}
echo "" >> ${log_target}
echo "Command: ${backup_tool} ${backup_target} >> ${log_target} 2>&1 " >> ${log_target}
echo "" >> ${log_target}
${backup_tool} ${backup_target} >> ${log_target} 2>&1
echo "Done" >> ${log_target}

34
utils-backup/daily_mysql.sh Executable file
View File

@@ -0,0 +1,34 @@
#!/bin/bash
#
# Just make a daily MySQL backup.
stamp=$(date +"%Y-%m-%d")
backup_tool="${HOME}/codes/docker/pod-charlesreid1/utils-mysql/dump_database.sh"
backup_dir="/junkinthetrunk/backups/daily/wikidb_${stamp}"
backup_target="${backup_dir}/wikidb_${stamp}.sql"
log_dir="${HOME}/.logs/backups/daily"
log_target="${log_dir}/wikidb_${stamp}.log"
mkdir -p ${backup_dir}
mkdir -p ${log_dir}
cat /dev/null > ${log_target}
echo "=======================================" | tee ${log_target}
echo "=== MediaWiki Database Backup =========" | tee ${log_target}
echo "=======================================" | tee ${log_target}
echo "" | tee ${log_target}
echo "Backup Utility: ${backup_tool}" | tee ${log_target}
echo "Backup Target: ${backup_target}" | tee ${log_target}
echo "Log Target: ${log_target}" | tee ${log_target}
echo "" | tee ${log_target}
set -x
${backup_tool} ${backup_target} >> ${log_target} 2>&1
set +x
echo "Done" | tee ${log_target}
cat ${log_target}

33
utils-backup/daily_wikifiles.sh Executable file
View File

@@ -0,0 +1,33 @@
#!/bin/bash
#
# Just make a daily MediaWiki files backup.
stamp="`date +"%Y-%m-%d"`"
backup_tool="${HOME}/codes/docker/pod-charlesreid1/utils-mw/backup_wikifiles.sh"
backup_dir="/junkinthetrunk/backups/daily/wikifiles_${stamp}"
backup_target="${backup_dir}/wikifiles_${stamp}.tar.gz"
log_dir="${HOME}/.logs/backups/daily"
log_target="${log_dir}/wikifiles_${stamp}.log"
mkdir -p ${backup_dir}
mkdir -p ${log_dir}
cat /dev/null > ${log_target}
echo "====================================" | tee ${log_target}
echo "=== MediaWiki Files Backup =========" | tee ${log_target}
echo "====================================" | tee ${log_target}
echo "" | tee ${log_target}
echo "Backup Utility: ${backup_tool}" | tee ${log_target}
echo "Backup Target: ${backup_target}" | tee ${log_target}
echo "Log Target: ${log_target}" | tee ${log_target}
echo "" | tee ${log_target}
set -x
${backup_tool} ${backup_target} >> ${log_target} 2>&1
set +x
echo "Done" | tee ${log_target}
cat ${log_target}

View File

@@ -1,83 +0,0 @@
#!/bin/bash
#
# Run the gitea dump command and send the dump file
# to the specified backup directory.
#
# Backup directory:
# /home/user/backups/gitea
BACKUP_DIR="$HOME/backups/gitea"
CONTAINER_NAME="pod-charlesreid1_stormy_gitea_1"
function usage {
set +x
echo ""
echo "gitea_dump.sh script:"
echo ""
echo "Run the gitea dump command inside the gitea docker container,"
echo "and copy the resulting zip file to the specified directory."
echo "The resulting gitea dump zip file will be timestamped."
echo ""
echo " ./gitea_dump.sh"
echo ""
echo "Example:"
echo ""
echo " ./gitea_dump.sh"
echo " (creates ${BACKUP_DIR}/gitea-dump_20200101_000000.zip)"
echo ""
exit 1;
}
if [ "$(id -u)" == "0" ]; then
echo ""
echo ""
echo "This script should NOT be run as root!"
echo ""
echo ""
exit 1;
fi
if [ "$#" == "0" ]; then
STAMP="`date +"%Y-%m-%d"`"
TARGET="gitea-dump_${STAMP}.zip"
echo ""
echo "pod-charlesreid1: gitea_dump.sh"
echo "-------------------------------"
echo ""
echo "Backup target: ${BACKUP_DIR}/${TARGET}"
echo ""
mkdir -p $BACKUP_DIR
# If this script is being run from a cron job,
# don't use -i flag with docker
CRON="$( pstree -s $$ | /bin/grep -c cron )"
DOCKERX=""
if [[ "$CRON" -eq 1 ]];
then
DOCKERX="docker exec -t"
else
DOCKERX="docker exec -it"
fi
echo "Step 1: Run gitea dump command inside docker machine"
set -x
${DOCKERX} --user git ${CONTAINER_NAME} /bin/bash -c 'cd /app/gitea && /app/gitea/gitea dump --file gitea-dump.zip --skip-repository'
set +x
echo "Step 2: Copy gitea dump file out of docker machine"
set -x
docker cp ${CONTAINER_NAME}:/app/gitea/gitea-dump.zip ${BACKUP_DIR}/${TARGET}
set +x
echo "Step 3: Clean up gitea dump file"
set -x
${DOCKERX} ${CONTAINER_NAME} /bin/bash -c "rm -f /app/gitea/gitea-dump.zip"
set +x
echo "Done."
else
usage
fi

33
utils-backup/monthly_gitea.sh Executable file
View File

@@ -0,0 +1,33 @@
#!/bin/bash
#
# Just make a monthly MediaWiki files backup.
set -x
stamp="`date +"%Y-%m-%d"`"
backup_tool="${HOME}/codes/docker/pod-charlesreid1/utils-gitea/backup_gitea.sh"
backup_dir="/junkinthetrunk/backups/monthly/gitea_${stamp}"
backup_target="${backup_dir}"
log_dir="${HOME}/.logs/backups/monthly"
log_target="${log_dir}/gitea_${stamp}.log"
mkdir -p ${backup_dir}
mkdir -p ${log_dir}
cat /dev/null > ${log_target}
echo "================================" >> ${log_target}
echo "=== Gitea Files Backup =========" >> ${log_target}
echo "================================" >> ${log_target}
echo "" >> ${log_target}
echo "Backup Utility: ${backup_tool}" >> ${log_target}
echo "Backup Target: ${backup_target}" >> ${log_target}
echo "Log Target: ${log_target}" >> ${log_target}
echo "" >> ${log_target}
echo "Command: ${backup_tool} ${backup_target} >> ${log_target} 2>&1 " >> ${log_target}
echo "" >> ${log_target}
${backup_tool} ${backup_target} >> ${log_target} 2>&1
echo "Done" >> ${log_target}

View File

@@ -1,73 +0,0 @@
#!/bin/bash
#
# Run the mysql dump command to back up wikidb table, and send the
# resulting SQL file to the specified backup directory.
#
# Backup directory:
# /home/user/backups/mysql
BACKUP_DIR="$HOME/backups/mysql"
CONTAINER_NAME="pod-charlesreid1_stormy_mysql_1"
function usage {
set +x
echo ""
echo "wikidb_dump.sh script:"
echo ""
echo "Run the mysql dump command on the wikidb table in the container,"
echo "and copy the resulting SQL file to the specified directory."
echo "The resulting mysql dump SQL file will be timestamped."
echo ""
echo " ./wikidb_dump.sh"
echo ""
echo "Example:"
echo ""
echo " ./wikidb_dump.sh"
echo " (creates ${BACKUP_DIR}/wikidb_20200101_000000.sql)"
echo ""
exit 1;
}
if [ "$(id -u)" == "0" ]; then
echo ""
echo ""
echo "This script should NOT be run as root!"
echo ""
echo ""
exit 1;
fi
if [ "$#" == "0" ]; then
STAMP="`date +"%Y-%m-%d"`"
TARGET="wikidb_${STAMP}.sql"
echo ""
echo "pod-charlesreid1: wikidb_dump.sh"
echo "--------------------------------"
echo ""
echo "Backup target: ${BACKUP_DIR}/${TARGET}"
echo ""
mkdir -p $BACKUP_DIR
# If this script is being run from a cron job,
# don't use -i flag with docker
CRON="$( pstree -s $$ | /bin/grep -c cron )"
DOCKERX=""
if [[ "$CRON" -eq 1 ]];
then
DOCKERX="docker exec -t"
else
DOCKERX="docker exec -it"
fi
echo "Running mysqldump"
set -x
${DOCKERX} ${CONTAINER_NAME} sh -c 'exec mysqldump wikidb --databases -uroot -p"$MYSQL_ROOT_PASSWORD"' > ${BACKUP_DIR}/${TARGET}
set +x
echo "Done."
else
usage
fi

View File

@@ -1,85 +0,0 @@
#!/bin/bash
#
# Create a tar file containing wiki files
# from the mediawiki docker container.
#
# Backup directory:
# /home/user/backups/mediawiki
BACKUP_DIR="$HOME/backups/mediawiki"
CONTAINER_NAME="pod-charlesreid1_stormy_mw_1"
STAMP="`date +"%Y-%m-%d"`"
function usage {
set +x
echo ""
echo "wikifiles_dump.sh script:"
echo ""
echo "Create a tar file containing wiki files"
echo "from the mediawiki docker container."
echo "The resulting tar file will be timestamped."
echo ""
echo " ./wikifiles_dump.sh"
echo ""
echo "Example:"
echo ""
echo " ./wikifiles_dump.sh"
echo " (creates ${BACKUP_DIR}/wikifiles_20200101_000000.tar.gz)"
echo ""
exit 1;
}
if [ "$(id -u)" == "0" ]; then
echo ""
echo ""
echo "This script should NOT be run as root!"
echo ""
echo ""
exit 1;
fi
if [ "$#" == "0" ]; then
TARGET="wikifiles_${STAMP}.tar.gz"
echo ""
echo "pod-charlesreid1: wikifiles_dump.sh"
echo "-----------------------------------"
echo ""
echo "Backup target: ${BACKUP_DIR}/${TARGET}"
echo ""
mkdir -p $BACKUP_DIR
# If this script is being run from a cron job,
# don't use -i flag with docker
CRON="$( pstree -s $$ | /bin/grep -c cron )"
DOCKERX=""
if [[ "$CRON" -eq 1 ]];
then
DOCKERX="docker exec -t"
else
DOCKERX="docker exec -it"
fi
echo "Step 1: Compress wiki files inside container"
set -x
${DOCKERX} ${CONTAINER_NAME} /bin/tar czf /tmp/${TARGET} /var/www/html/images
set +x
echo "Step 2: Copy tar.gz file out of container"
mkdir -p $(dirname "$1")
set -x
docker cp ${CONTAINER_NAME}:/tmp/${TARGET} ${BACKUP_DIR}/${TARGET}
set +x
echo "Step 3: Clean up tar.gz file"
set -x
${DOCKERX} ${CONTAINER_NAME} /bin/rm -f /tmp/${TARGET}
set +x
echo "Done."
else
usage
fi

View File

@@ -0,0 +1,66 @@
# Dump Gitea Backup
Running the dump command creates two zip files.
The first zip file is created by gitea via `gitea dump`.
The second zip file is a directory in gitea containing user avatars
(not backed up using the above `gitea dump` command).
### The gitea dump command
When you run `gitea dump`, gitea will create a single zip file archive
of the entire contents of the gitea site, in the current directory
(where the `gitea dump` command was run from).
### The gitea dmp directory structure
The built-in `gitea dump` functionality will create a zip
that contains the following directory structure:
```
gitea-repo.zip
gitea-db.sql
custom/
log/
```
When the `gitea-repo.zip` folder is unzipped, it generates a `repositories/` folder
containing the contents of every git repo in the gitea site.
In a real gitea server, here is where these should go:
The `repositories/` dir should be at:
```
<gitea-base-dir>/repositories
```
The `custom/` dir should be at:
```
<gitea-base-dir>/bin/custom
```
The database file should be at:
```
<gitea-base-dir>/data/gitea-db.sql
```
The log should be at:
```
<gitea-base-dir>/log
```
If you are running gitea using docker,
`<gitea-base-dir>` will be `/data/gitea/`.
### The avatars directory
Not much to it, just create a zip file from the
`avatars/` directory and move that zip file
out of the container.

View File

@@ -0,0 +1,34 @@
# Quick Start
We provide a backup and restore script.
The backup script takes a directory as an argument,
and places two backup zip files at the specified location:
```
./backup_gitea.sh <target-dir>
```
Example:
```
$ ./backup_gitea.sh /path/to/backup/target/
$ ls /path/to/backup/target/
gitea-dump-000000.zip
gitea-avatars.zip
```
The restore script will take two zip files as inputs,
the dump zip and the avatars zip:
```
./backup_gitea.sh <gitea-dump-zip> <gitea-avatars-zip>
```
Example using some bash completion magic:
```
$ ./backup_gitea.sh /path/to/backup/target/gitea-{dump-00000,avatars}.zip
```

17
utils-gitea/README.md Normal file
View File

@@ -0,0 +1,17 @@
# Gitea Dump/Restore Scripts
Fortunately, gitea provides a `gitea dump` command to create a backup.
Unfortunately, gitea does not provide a `gitea restore` command to restore from a backup.
## Gitea Dump Command
See [GiteaDumpCommand.md](GiteaDumpCommand.md)
## Dump Gitea Backup
See [DumpGiteaBackup.md](DumpGiteaBackup.md)
## Restore Gitea Backup
See [RestoreGiteaBackup.md](RestoreGiteaBackup.md)

View File

@@ -0,0 +1,11 @@
# Restore Gitea Backup
The restore script takes two separate arguments.
The first is the zip file created from the `gitea dump` command above.
The second is the zip file containing user avatars.
Not much more to it than that.

View File

@@ -0,0 +1,38 @@
#!/bin/bash
#
# This stupid script is needed because
# ssh and and sudo don't play nice together.
#
# the sudo cron script already extracts
# stats from the repo, which lives in a docker
# volume and hence requires sudo to access.
# also fixes ownership to charles:charles.
#
# now we use non sudo to check new data in.
if [ "$(id -u)" == "0" ]; then
echo ""
echo ""
echo "This script should be run as a regular user."
echo ""
echo ""
exit 1;
fi
WORKDIR="/tmp/gitea-temp"
DATDIR="/tmp/gitea-temp/charlesreid1-data"
CLONEDIR="/tmp/gitea-temp/charlesreid1-data2"
rm -rf ${CLONEDIR}
git clone ssh://git@gitdatabot:222/data/charlesreid1-data.git ${CLONEDIR}
(
cp ${DATDIR}/commit_counts.csv ${CLONEDIR}/.
cd ${CLONEDIR}
git config user.name "databot"
git config user.email "databot@charlesreid1.com"
git add commit_counts.csv
git commit commit_counts.csv -m '[scrape_gitea_as_sudo.sh] updating gitea commit count data'
git push origin master
)

86
utils-gitea/backup_gitea.sh Executable file
View File

@@ -0,0 +1,86 @@
#!/bin/bash
set -x
function usage {
set +x
echo ""
echo "backup_gitea.sh script:"
echo "Run a gitea dump from the gitea container,"
echo "and back up the gitea avatars."
echo "Gitea backups are dumped to gitea-dump-*.zip"
echo "and gitea-avatars.zip and copied to the target directory."
echo ""
echo " ./backup_gitea.sh <target-dir>"
echo ""
echo "Example:"
echo ""
echo " ./backup_gitea.sh /path/to/backups/"
echo ""
echo "creates the files:"
echo""
echo " /path/to/backups/gitea-dump-*.zip"
echo " /path/to/backups/gitea-avatars.zip"
echo ""
echo ""
exit 1;
}
if [[ "$#" -gt 0 ]];
then
echo ""
echo "Backup Gitea:"
echo "----------------"
echo ""
NAME="podcharlesreid1_stormy_gitea_1"
# If this script is being run from a cron job,
# don't use -i flag with docker
CRON="$( pstree -s $$ | /bin/grep -c cron )"
DOCKER=""
if [[ "$CRON" -eq 1 ]];
then
DOCKER="docker exec -t"
else
DOCKER="docker exec -it"
fi
echo "Step 1: Creating backup target"
${DOCKER} $NAME /bin/bash -c 'mkdir /backup'
echo "Step 2: Creating backup zip files:"
echo " Step 2A: gitea dump zip"
${DOCKER} $NAME /bin/bash -c '/app/gitea/gitea dump'
echo " Step 2B: gitea avatars zip"
${DOCKER} $NAME /bin/bash -c 'cd /data/gitea/ && tar czf /backup/gitea-avatars.tar.gz avatars'
echo "Step 3: Moving gitea dump to /backup directory"
${DOCKER} $NAME /bin/bash -c 'mv /tmp/gitea-dump-*/* /backup/.'
TEMP_BACKUP=`mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir'`
echo "Step 4: Copying backup directory (with zip files) to backup location $1"
echo " Step 4A: Making temporary backup location"
#mkdir -p $TEMP_BACKUP
echo " Step 4B: Copying /backup directory to temporary backup location $1"
docker cp $NAME:/backup/* $1/.
TAR_PREFIX="$(echo $V | sed 's+/$++g')"
tar -cvf ${TAR_PREFIX}.tar $1
rm -fr $1
echo "Step 6: Cleaning up container"
${DOCKER} $NAME /bin/bash -c 'rm -rf /backup'
${DOCKER} $NAME /bin/bash -c 'rm -rf /tmp/gitea-dump-*'
echo "Step 7: Cleaning up local host"
#rm -rf $TEMP_BACKUP
echo " ~ ~ ~ ~ PEACE OUT ~ ~ ~ ~"
else
usage
fi

82
utils-gitea/restore_gitea.sh Executable file
View File

@@ -0,0 +1,82 @@
#!/bin/bash
set -x
function usage {
echo ""
echo "restore_gitea.sh script:"
echo "Restore a gitea site from a .zip dump file and a .zip avatars file."
echo ""
echo " ./restore_gitea.sh <dump-zip-file> <avatars-zip-file>"
echo ""
echo "Example:"
echo ""
echo " ./restore_gitea.sh /path/to/gitea-dump.zip /path/to/gitea-avatars.zip"
echo ""
echo ""
exit 1;
}
echo ""
echo "Restore Gitea:"
echo "----------------"
echo ""
NAME="podcharlesreid1_stormy_gitea_1"
if [[ "$#" -eq 2 ]];
then
EXEC="docker exec -it $NAME"
CP="docker cp"
echo "- Copying files into container"
${EXEC} /bin/bash -c 'mkdir /restore'
${CP} $1 $NAME:/restore/gitea-dump.zip
${CP} $2 $NAME:/restore/gitea-avatars.zip
echo "- Unpacking files inside container"
${EXEC} /bin/bash -c 'unzip -qq /restore/gitea-dump.zip -d /restore'
${EXEC} /bin/bash -c 'unzip -qq /restore/gitea-avatars.zip -d /restore'
echo " - Unpacking repositories inside container"
${EXEC} /bin/bash -c 'unzip -qq /restore/gitea-repo.zip -d /restore'
echo " - Restoring 1/5: repositories"
${EXEC} /bin/bash -c 'rm -rf /data/git/repositories && cp -r /restore/repositories /data/git/repositories'
# We are actually just gonna skip this whole step,
# since everything here should be in d-gitea repo
echo " - Restoring 2/5: (skipping custom files)"
#echo " - Restoring 2/5: custom files"
#echo " - Moving old app.ini"
#${EXEC} /bin/bash -c 'mv /data/gitea/conf/app.ini /data/gitea/conf/app.ini.old'
#echo " - Restoring custom files"
#${EXEC} /bin/bash -c 'rm -rf /data/gitea && cp -r /restore/custom /data/gitea'
echo " - Restoring 3/5: sqlite database"
${EXEC} /bin/bash -c 'cat /restore/gitea-db.sql | sed "s/false/0/g" | sed "s/true/1/g" | sqlite3 /data/gitea/gitea.db'
echo " - Restoring 4/5: avatars"
${EXEC} /bin/bash -c 'rm -rf /data/gitea/avatars && cp -r /restore/avatars /data/gitea/avatars'
echo " - Restoring 5/5: repairing paths in .git/hooks"
###################
# NOTE: This is entirely case-dependent.
# If backup/restore happened from same gitea dir structure,
# this section should be removed entirely.
#
# Example below swaps out /www/gitea with /data/gitea
#
#${EXEC} /bin/bash -c 'find /data/git/repositories -type f -exec sed -i -e "s%/www/gitea/bin/custom/conf%/data/gitea/conf%g" {} \;'
#${EXEC} /bin/bash -c 'find /data/git/repositories -type f -exec sed -i -e "s%/www/gitea/bin/gitea%/app/data/gitea%g" {} \;'
#
###################
echo " - Cleaning up"
${EXEC} /bin/bash -c 'rm -rf /restore'
else
usage
fi

View File

@@ -0,0 +1,96 @@
#!/bin/bash
#
# This stupid script needs too be scaled back,
# because sudo and ssh can't play nicely together.
#
# This entire idiotic adventure in docker land
# has been chock full of the most inane, stupid
# landmines that containers cannot avoid,
# like this one - if you try and run ssh through sudo,
# you can't deal with keys or passphrases.
#
# Seriously. Gimme a fucking break.
#
#
# This script scrapes repository logs
# from the docker volume holding gitea.
#
# It assembles a commit count for use
# in visualizing git commits.
#
# It commits the new commit count data
# to https://git.charlesreid1.com/data/charlesreid1-data
if [ "$(id -u)" != "0" ]; then
echo ""
echo ""
echo "This script should be run as root."
echo ""
echo ""
exit 1;
fi
WORKDIR="/tmp/gitea-temp"
GITDIR="/tmp/gitea-temp/charlesreid1-data"
rm -rf ${WORKDIR}
mkdir -p ${WORKDIR}
sudo chown -R charles:charles ${WORKDIR}
rm -rf ${GITDIR}
mkdir -p ${GITDIR}
# Because sudo and ssh are too stupid to play nicely,
# we're forced to use this sudo script to dump out
# information every hour,
# and leave it up to some user script somewhere
# to grab the latest whenever they need it.
#
# This is the most idiotic problem yet.
# don't clone data repo, that's the whole stupid problem
### sudo -H -u charles git clone ssh://git@gitdatabot:222/data/charlesreid1-data.git ${GITDIR}
# Step 2: extract commit dates
for dir in `find /var/lib/docker/volumes/podcharlesreid1_stormy_gitea_data/_data/git/repositories -mindepth 2 -maxdepth 2 -type d`; do
git --git-dir=$dir --work-tree=${WORKDIR} \
log \
--all --author="harles" --oneline --pretty="%H %ai" \
| cut -f 2 -d " " >> ${GITDIR}/commit_dates
done
# Step 3: bin commit dates
words=$( cat ${GITDIR}/commit_dates )
echo "date,commits" > ${GITDIR}/commit_counts.csv
echo $words | sort | python -c 'import sys;
from collections import Counter; c=Counter(sys.stdin.read().strip().split(" "));
print("\n".join(("%s,%s"%(k, c[k]) for k in c.keys())));' | sort | awk 'NF' >> ${GITDIR}/commit_counts.csv
rm -f ${GITDIR}/commit_dates
chown charles:charles ${GITDIR}/commit_counts.csv
# leave commits to the user.
###
### (
### cd ${GITDIR}
### sudo -H -u charles git config user.name "databot"
### sudo -H -u charles git config user.email "databot@charlesreid1.com"
### sudo -H -u charles git add commit_counts.csv
### sudo -H -u charles git commit commit_counts.csv -m '[scrape_gitea_as_sudo.sh] updating gitea commit count data'
### sudo -H -u charles git push origin master
### )

60
utils-mw/backup_wikifiles.sh Executable file
View File

@@ -0,0 +1,60 @@
#!/bin/bash
#
# Create a tar file containing wiki files
# from the stormy_mw container.
function usage {
echo ""
echo "backup_wikifiles.sh script:"
echo "Create a tar file containing wiki files"
echo "from the stormy_mw container"
echo ""
echo " ./backup_wikifiles.sh <tar-file>"
echo ""
echo "Example:"
echo ""
echo " ./backup_wikifiles.sh /path/to/wikifiles.tar.gz"
echo ""
echo ""
exit 1;
}
if [[ "$#" -gt 0 ]];
then
echo ""
echo "Backup MediaWiki Files:"
echo "------------------------"
echo ""
NAME="podcharlesreid1_stormy_mw_1"
TAR="wikifiles.tar.gz"
# If this script is being run from a cron job,
# don't use -i flag with docker
CRON="$( pstree -s $$ | /bin/grep -c cron )"
DOCKER=""
if [[ "$CRON" -eq 1 ]];
then
DOCKER="docker exec -t"
else
DOCKER="docker exec -it"
fi
DOCKERCP="docker cp"
set -x
# zip to temp dir inside container
${DOCKER} ${NAME} tar czf /tmp/${TAR} /var/www/html/images
# copy from container to target $1
mkdir -p $(dirname $1)
${DOCKERCP} ${NAME}:/tmp/${TAR} $1
# clean up container
${DOCKER} ${NAME} rm /tmp/${TAR}
set +x
else
usage
fi

View File

@@ -2,6 +2,7 @@
#
# Restore wiki files from a tar file
# into the stormy_mw container.
set -x
function usage {
echo ""
@@ -27,14 +28,11 @@ function usage {
if [[ "$#" -eq 1 ]];
then
NAME="pod-charlesreid1_stormy_mw_1"
NAME="podcharlesreid1_stormy_mw_1"
TAR=$(basename "$1")
set -x
docker cp $1 ${NAME}:/tmp/${TAR}
docker exec -it ${NAME} mv /var/www/html/images /var/www/html/images.old
docker exec -it ${NAME} tar -xf /tmp/${TAR} -C / && rm -f /tmp/${TAR}
docker exec -it ${NAME} chown -R www-data:www-data /var/www/html/images
set +x
docker exec -it ${NAME} tar -xf /tmp/${TAR} -C /var/www/html/ && rm -f /tmp/${TAR}
else
usage

View File

@@ -23,7 +23,7 @@ function usage {
if [[ "$#" -eq 0 ]];
then
NAME="pod-charlesreid1_stormy_mw_1"
NAME="podcharlesreid1_stormy_mw_1"
docker exec -it ${NAME} php /var/www/html/maintenance/update.php
else

View File

@@ -20,14 +20,14 @@ function usage {
exit 1;
}
CONTAINER_NAME="pod-charlesreid1_stormy_mysql_1"
NAME="podcharlesreid1_stormy_mysql_1"
if [[ "$#" -gt 0 ]];
then
TARGET="$1"
mkdir -p $(dirname $TARGET)
docker exec -i ${CONTAINER_NAME} sh -c 'exec mysqldump wikidb --databases -uroot -p"$MYSQL_ROOT_PASSWORD"' > $TARGET
docker exec -i ${NAME} sh -c 'exec mysqldump wikidb --databases -uroot -p"$MYSQL_ROOT_PASSWORD"' > $TARGET
else
usage

View File

@@ -6,15 +6,14 @@
# Note that this expects the .sql dump
# to create its own databases.
# Use the --databases flag with mysqldump.
set -x
function usage {
echo ""
echo "restore_database.sh script:"
echo ""
echo "Restores a database from an SQL dump."
echo "Restores the database into the "
echo "stormy_msyql container."
echo ""
echo "Obtains MySQL password from"
echo "MYSQL_ROOT_PASSWORD env var"
echo "inside mysql container."
@@ -41,32 +40,13 @@ function usage {
# a complete and utterpain in the ass
# because of all these one-off
# "whoopsie we don't do that" problems.
CONTAINER_NAME="pod-charlesreid1_stormy_mysql_1"
TARGET=$(basename $1)
TARGET_DIR=$(dirname $1)
#
if [[ "$#" -eq 1 ]];
then
# Step 1: Copy the sql dump into the container
set -x
docker cp $1 ${CONTAINER_NAME}:/tmp/${TARGET}
set +x
# Step 2: Run sqldump inside the container
set -x
docker exec -i ${CONTAINER_NAME} sh -c "/usr/bin/mysql --defaults-file=/root/.mysql.rootpw.cnf < /tmp/${TARGET}"
set +x
# Step 3: Clean up sql dump from inside container
set -x
docker exec -i ${CONTAINER_NAME} sh -c "/bin/rm -fr /tmp/${TARGET}.sql"
set +x
set +x
docker exec -i podcharlesreid1_stormy_mysql_1 \
mysql -uroot -p$MYSQL_ROOT_PASSWORD \
< $1
else
usage
fi