ghost to hugo migration

This commit is contained in:
CaptainArk 2019-01-06 16:29:44 +01:00
parent d6e810a425
commit a9dd223bd6
26 changed files with 3138 additions and 0 deletions

3
.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "themes/hugo-theme-casper"]
path = themes/hugo-theme-casper
url = https://github.com/captainark/hugo-theme-casper.git

6
archetypes/default.md Normal file
View File

@ -0,0 +1,6 @@
---
title: "{{ replace .Name "-" " " | title }}"
date: {{ .Date }}
draft: true
share: false
---

69
config.toml Normal file
View File

@ -0,0 +1,69 @@
baseURL = "https://www.captainark.net/"
languageCode = "en-us"
title = "Sysadmining. All day. Every day."
theme = "hugo-theme-casper"
copyright = "© 2015 - 2019"
summaryLength = "35"
Paginate = "5"
issoUrl = "https://www.captainark.net/comments"
[params]
description = "Yet Another Blog about Linux and Networking"
cover = "images/cover.jpg"
author = "Antoine Joubert"
authorlocation = "Angers, France"
authorwebsite = "https://www.captainark.net"
authorbio = "Geek | Gamer | TV Shows Aficionado"
authoravatar = "images/author.jpg"
logo = "images/logo.png"
hideHUGOSupport = false
hjsVersion = "9.13.1"
hjsStyle = "solarized-light"
customHeaderPartial = "partialheader.html"
customFooterPartial = "partialfooter.html"
[[menu.main]]
identifier = "home"
name = "Home"
pre = "<h3>This site</h3>"
url = "/"
weight = 500
[[menu.main]]
identifier = "about"
name = "About"
url = "/about"
weight = 490
[[menu.main]]
identifier = "resume"
name = "Resume"
url = "/resume"
weight = 480
[[menu.main]]
identifier = "gitea"
pre = "<h3>Other services</h3>"
name = "Gitea"
url = "https://git.captainark.net"
weight = 470
[[menu.main]]
identifier = "chevereto"
name = "Chevereto"
url = "https://pics.captainark.net"
weight = 460
[[menu.main]]
identifier = "privatebin"
name = "Privatebin"
url = "https://paste.captainark.net"
weight = 450
[[menu.main]]
identifier = "rocketchat"
name = "Rocket.Chat"
url = "https://chat.captainark.net"
weight = 440
[permalinks]
post = "/:year/:month/:day/:slug/"
[sitemap]
ChangeFreq = ""
Filename = "sitemap.xml"
Priority = "-1"

9
content/about.md Normal file
View File

@ -0,0 +1,9 @@
---
title: "About"
date: 2019-01-06T12:20:50+01:00
draft: false
type: "page"
share: false
---
My blog.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,254 @@
---
title: "Flexget init script"
date: 2015-05-25T00:00:00+01:00
draft: false
share: false
---
I've been using [Flexget](http://flexget.com/) for the past two years or so as a download automator.
Since I wrote an [init script](http://flexget.com/wiki/Daemon/Startup#InsservscriptDebiancompatible) for it a while back, and it is compatible with Debian Jessie / systemd, I figured I'd share it here.
## The script
All of the following should be done as the root user.
First, create a /etc/default/flexget file with the following content :
```bash
# Configuration file for /etc/init.d/flexget
# User to run flexget as.
# Daemon will not start if left empty.
FGUSER=""
# Full path to the flexget config.yml file to use.
# Defaults to FGUSER $HOME/.flexget/config.yml
CONFIG=""
# Path to the directory where flexget should log. Do not add trailing slash.
# Defaults to the FGUSER $HOME/.flexget directory
LOG=""
# Log verbosity
# Available options : none critical error warning info verbose debug trace
# Defaults to info
LEVEL=""
```
Please note that the FGUSER variable needs to be defined for the daemon to start. It can be set to your current user, or you can run flexget as its own user.
You can create a flexget user with the following command :
```bash
useradd -m -d /var/lib/flexget -r -s /bin/false flexget
```
Then, create the /etc/init.d/flexget file :
```bash
#!/bin/bash
### BEGIN INIT INFO
# Provides: flexget
# Required-Start: $network $remote_fs
# Required-Stop: $network $remote_fs
# Should-Start:
# Should-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Flexget
# Description: FlexGet is a multipurpose automation tool
# for content like torrents, nzbs, podcasts,
# comics, series, movies, etc.
### END INIT INFO
# Author: Antoine Joubert, 19/01/2014
NAME="flexget"
DAEMON="/usr/local/bin/flexget"
SETTINGS="/etc/default/$NAME"
DESC="Flexget"
PIDFILE="/var/run/$NAME.pid"
set -e
. /lib/lsb/init-functions
unset FGUSER CONFIG LOG LEVEL
# Exit if flexget not installed
if [ ! -x "$DAEMON" ]; then
log_action_msg "$DESC: Could not find flexget executable. Exiting."
exit 2
fi
# Read configuration variables
if [ -r /etc/default/$NAME ]; then
. /etc/default/$NAME
else
log_action_msg "$DESC: /etc/default/$NAME not found. Exiting."
exit 2
fi
# Exit if FGUSER has not been set in /etc/default/flexget
if [ -z $FGUSER ]; then
log_action_msg "$DESC: FGUSER not set in /etc/default/$NAME. Exiting."
exit 2
fi
# Function to verify if flexget is already running
run_check() {
if [ -e $PIDFILE ]; then
status_of_proc -p $PIDFILE $DAEMON $NAME > /dev/null && RETVAL=0 || RETVAL="$?"
else
RETVAL="2"
fi
}
end_log() {
if [ $RETVAL -eq 0 ]; then
log_end_msg 0
return 0
else
log_end_msg 1
exit 1
fi
}
# Function to define config file, log file and log level
conf_check() {
if [ -z $CONFIG ]; then
OPTIONS="$OPTIONS"
else
OPTIONS="-c $CONFIG"
fi
if [ -z $LOG ]; then
OPTIONS="$OPTIONS"
else
OPTIONS="$OPTIONS -l $LOG/flexget.log"
if [ ! -d $LOG ]; then
mkdir -p -m 750 $LOG
chown $FGUSER $LOG
fi
fi
if [ -z $LEVEL ]; then
OPTIONS="$OPTIONS"
else
OPTIONS="$OPTIONS -L $LEVEL"
fi
}
start_flexget() {
run_check
if [ $RETVAL = 0 ]; then
log_action_msg "$DESC: Already running with PID $(cat $PIDFILE). Aborting."
exit 2
else
conf_check
log_daemon_msg "$DESC: Starting the daemon."
start-stop-daemon --start --background --quiet --pidfile $PIDFILE --make-pidfile \
--chuid $FGUSER --user $FGUSER --exec $DAEMON -- $OPTIONS daemon start
RETVAL=$?
end_log
fi
}
stop_flexget() {
run_check
if [ $RETVAL = 0 ]; then
log_daemon_msg "$DESC: Stopping the daemon."
start-stop-daemon --stop --quiet --chuid "$FGUSER" --pidfile "$PIDFILE" --retry 30
RETVAL=$?
[ -e "$PIDFILE" ] && rm -f "$PIDFILE"
end_log
else
log_action_msg "$DESC: Not currently running. Aborting."
exit 2
fi
}
status_flexget() {
run_check
if [ $RETVAL = 0 ]; then
log_action_msg "$DESC: Currently running with PID $(cat $PIDFILE)."
else
log_action_msg "$DESC: Not currently running."
fi
exit $RETVAL
}
case "$1" in
start)
start_flexget
;;
stop)
stop_flexget
;;
restart)
stop_flexget && sleep 2 && start_flexget
;;
status)
status_flexget
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
;;
esac
exit 0
```
Then, give execution rights to the script :
```bash
chmod +x /etc/init.d/flexget
```
And then, generate the necessary symlinks for the service to start on boot :
*Debian Jessie*
```bash
systemctl enable flexget
```
*Debian Wheezy*
```bash
insserv flexget
```
To start, stop or check if the daemon is running :
*Debian Jessie*
```bash
systemctl start flexget
systemctl stop flexget
systemctl status flexget
```
*Debian Wheezy / Jessie*
```bash
service flexget start
service flexget stop
service flexget status
```
*Debian Wheezy*
```bash
/etc/init.d/flexget start
/etc/init.d/flexget stop
/etc/init.d/flexget status
```
## Conclusion
That's all ! If you are using this script, please let me know in the comment section below !

View File

@ -0,0 +1,83 @@
---
title: "Debian updates with Ansible"
date: 2016-01-31T01:00:00+01:00
draft: false
share: false
---
I've recently bought a [HP Proliant Microserver Gen8](http://www8.hp.com/us/en/products/proliant-servers/product-detail.html?oid=5379860) to play around with LXC and try new stuff.
From the 4 Debian machines I had to keep up-to-date, I now have 7, so it became quite time-consumming to manually SSH to each of them whenever an update became available.
I ended up looking at [Ansible](http://www.ansible.com/) to speed up the process and, within an hour, I had a working playbook that updates the debian packages, pip packages and git repos installed on all of my servers with a single command.
I figured I'd share the playbook I use to update the Debian packages !
## The playbook
I modified [this gist](https://gist.github.com/maethor/380676f6b1cec8cc7439) to only use apt-get instead of both apt-get and aptitude.
```yaml
- hosts: all
tasks:
- name: update cache
apt: update_cache=yes
- name: list packages to upgrade (1/2)
shell: apt-get upgrade -s -V | awk '/=>/{print $1}'
register: updates
changed_when: False
- name: list packages to upgrade (2/2)
debug: msg="{{ updates.stdout_lines | count }} packages to upgrade ({{ updates.stdout_lines | join(', ') }})"
when: (updates.stdout_lines)
- name: upgrade packages
apt: upgrade=dist
when: (updates.stdout_lines)
- name: check what the new version is
shell: lsb_release -r | awk '{print $2}'
changed_when: False
register: new_release
- name: notify distribution version upgrade
debug: msg="Debian has been upgraded from {{ ansible_lsb.release }} to {{ new_release.stdout }}"
when: ansible_lsb.release != new_release.stdout
- name: /wheezy/ install the debian-goodies package if it is missing
apt: name=debian-goodies state=present
when: ansible_distribution_release == 'wheezy'
- name: /jessie/ install the needrestart package if it is missing
apt: name=needrestart state=present default_release=jessie-backports
when: ansible_distribution_release == 'jessie'
- name: /wheezy/ list services to restart (1/3)
shell: checkrestart | awk '/^service/{print $2}'
register: wheezy_services
changed_when: False
when: ansible_distribution_release == 'wheezy'
- name: /jessie/ list services to restart (1/3)
shell: needrestart -blrl | awk '/^NEEDRESTART-SVC/{print $2}'
register: jessie_services
changed_when: False
when: ansible_distribution_release != 'wheezy'
- name: merge services list (2/3)
set_fact:
services: "{{ wheezy_services if ansible_distribution_release == 'wheezy' else jessie_services }}"
- name: list services to restart (3/3)
debug: msg="{{ services.stdout_lines | count }} services to restart ({{ services.stdout_lines | join (', ') }})"
when: (services.stdout_lines)
- name: cache cleanup
shell: apt-get autoclean
```
## Conclusion
That's all ! Please leave a comment if you've found this playbook helpful !

View File

@ -0,0 +1,183 @@
---
title: "Private Git repo"
date: 2016-01-31T00:00:00+01:00
draft: false
share: false
---
I've decided to migrate this blog to [Pelican](http://blog.getpelican.com/). I've been playing around with it over the week-end, and it turns out to be way easier to manage than [Jekyll](https://jekyllrb.com/). Themes are much easier to install and configure, so it ends up looking better as well !
Since I'm basically recreating this blog from scratch, I've decided to delete the old git repo that was hosting it and to create a new one.
Setting up your own private git repo is pretty easy to achieve and is already well-documented on the [Git](https://git-scm.com/book/en/v2/Git-on-the-Server-Setting-Up-the-Server) website.
Every time I want to create a new repo, I've had time to forget how to do it and I end up looking for that page, so I figured I'd write a few lines on the subject.
In this tutorial, I'll configure a git repo on a distant server running Debian 8 (Jessie). This repo will be remotely accessible using SSH. Two users will be able to connect to it : me and the www-data user on my webserver.
## SSH Keys
If you don't have one already, you'll need a ssh-key to connect to the git repo.
On your computer, in a shell, as your usual user :
```bash
ssh-keygen -t rsa -b 3072
Generating public/private rsa key pair.
Enter file in which to save the key (/home/user/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/user/.ssh/id_rsa.
Your public key has been saved in /home/user/id_rsa.pub.
The key fingerprint is:
[Redacted]
```
For security reasons, configuring a passphrase is recommended. On Mac OS X and most desktop environnements on Linux, you can store this passphrase for the duration of your session using the `ssh-add` command, so you won't have to type it every time you want to connect to a host.
On the server, we also have to create a ssh-key for the user that is running our webserver (you'll need to have sudo installed) :
```bash
sudo -H -u www-data ssh-keygen -t rsa -b 3072
Generating public/private rsa key pair.
Enter file in which to save the key (/var/www/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /var/www/.ssh/id_rsa.
Your public key has been saved in /var/www/.ssh/id_rsa.pub.
The key fingerprint is:
[Redacted]
```
If you decide to configure a passphrase for that ssh-key, you'll have to type it every time you'll want to pull from your repo.
## Server management
All of the commands in this section have to be run as root.
First thing first, we have to install the git package on the server that will be hosting our git repos :
```bash
apt update && apt install git -y
```
Then, we have to create a user named git :
```bash
useradd -s /usr/bin/git-shell -m -r git
```
This will create a system user (UID < 1000) with a /home/git home directory. If you want to host your git repos somewhere else on your filesystem, you should add a `-d /home/directory/for/git` in the previous command.
This user will use the git-shell shell. This limits remote connection to that user to git commands (like the rssh shell can limit remote connection to a user to scp or rsync commands).
We have to configure our system to allow the use of this shell :
```bash
echo '/usr/bin/git-shell' >> /etc/shells
```
From this point, you should have to following output if you try to SSH to your server with that user :
```bash
ssh git@git.captainark.net
fatal: Interactive git shell is not enabled.
hint: ~/git-shell-commands should exist and have read and execute access.
Connection to git@git.captainark.net closed.
```
We now need to create the .ssh/authorized_keys file for the git user with the correct permissions :
```bash
sudo -H -u git mkdir /home/git/.ssh && chmod 700 /home/git/.ssh
sudo -H -u git touch /home/git/.ssh/authorized_keys && chmod 600 /home/git/.ssh/authorized_keys
```
You can now copy/paste the content of the two `$HOME/.ssh/id_rsa.pub` files we've created earlier using the `ssh-keygen` command in `/home/git/.ssh/authorized_keys`.
The last thing we have to do is to create our first git repo. In this example, my project will be called 'captainarkdotnet' as it will be hosting this blog :
```bash
sudo -H -u git mkdir /home/git/captainarkdotnet.git
cd /home/git/captainarkdotnet.git
sudo -H -u git git init --bare
```
The last command should give you the following output :
```bash
Initialized empty Git repository in /home/git/captainarkdotnet.git/.git/
```
We're done with the server configuration. Let's now actually push stuff to our repo !
### Initial push
The files for my blog are store in the ~/Documents/projects/captainarkdotnet on my computer. Before doing anything else, we first have to make sure that we currently are in that folder :
```bash
cd ~/Documents/projects/captainarkdotnet
```
Let's now push the content of that folder to our repo :
```bash
git init
git add .
git commit -m 'initial commit'
git remote add origin git@git.captainark.net:captainarkdotnet.git
git push origin master
```
Please note that you'll need to edit **git.captainark.net** to the FQDN or IP of your git server, and **captainarkdotnet.git** to the name of the git project on your server.
If everything went well, the last command should give you the following output :
```bash
Counting objects: 69, done.
Delta compression using up to 4 threads.
Compressing objects: 100% (64/64), done.
Writing objects: 100% (69/69), 1.01 MiB | 0 bytes/s, done.
Total 69 (delta 15), reused 0 (delta 0)
To git@git.captainark.net:captainarkdotnet.git
* [new branch] master -> master
```
That's it, we've now pushed our first commit to our server !
## First pull
Alright, time to pull the files we've just pushed on our webserver. I personally store my web content in `/var/www` ; if you don't, you'll have to adjust the path accordingly :
```bash
cd /var/www
sudo -H -u www-data git clone git@git.captainark.net:captainarkdotnet.git
```
SSH will ask you to type 'yes' since it's the first time the www-data user connects to the server. If everything goes well, you should have the following output :
```bash
Cloning into 'captainarkdotnet'...
remote: Counting objects: 70, done.
remote: Compressing objects: 100% (65/65), done.
remote: Total 70 (delta 16), reused 0 (delta 0)
Receiving objects: 100% (70/70), 1.01 MiB | 0 bytes/s, done.
Resolving deltas: 100% (16/16), done.
Checking connectivity... done.
```
## Conclusion
That's it ! We now have a working private git repo ! I won't go into details into the git commands in this tutorial, but here's a quick overwiew of the ones I use the most :
- `git add .` recursively adds all files from the directory to the repo ;
- `git commit -a -m 'This is a comment'` commits the current state of your local repo with the 'This is a comment' comment ;
- `git push` pushes your commits to the distant repo ;
- `git pull` pulls the latest version of the distant repo locally ;
- `git branch -av` shows all available branches for the repo ;
- `git checkout -b testing remotes/origin/testing` create a local 'testing' branch based on the remote 'remotes/origin/testing' branch ;
- once a branch has been copied locally, you can switch to it with the `git checkout {branch}` command.
For more information on git a command, use `man git-{command}` !
If you've found this tutorial in any way helpful, please feel free to leave a comment !

View File

@ -0,0 +1,192 @@
---
title: "My tmux configuration"
date: 2016-02-02T00:00:00+01:00
draft: false
share: false
---
[tmux](https://tmux.github.io/) is a terminal mutiplexer. It lets you have multiples shells running in a single terminal emulator window and it keeps those shells running in the background should you need to close your terminal emulator.
I've played around with the configuration quite a bit to find settings that suit my needs. Here's what it ended up looking like :
![tmux_fullsize](/images/tmux.png)
This screenshot was done on Mac OS X, using the Terminal app and this [Solarized theme](https://github.com/tomislav/osx-terminal.app-colors-solarized).
I figured I'd share my tmux configuration here !
## Installing tmux
tmux is available on Debian. I suggest using the [jessie backports](https://packages.debian.org/jessie-backports/tmux) version :
`apt -t jessie-backports install tmux`
tmux is also available on Mac OS X using [brew](http://brew.sh/) :
`brew install tmux`
## tmux.conf
I used screen before tmux, so I configured the prefix key on C-a instead of C-b. tmux has the advantage of being *much* simpler to configure than screen.
If you want to use this configuration, simply copy the following in ~/.tmux.conf. This file is read by default when tmux starts.
If you simply want to try it out, copy it in a file somewhere else and have tmux load with the -f parameter (`tmux -f ~/tmux-test.conf`).
```
# use utf8
set -g utf8
set-option -g status-utf8 on
set-window-option -g utf8 on
# do not wait on esc key
set-option -g escape-time 0
# completely disable automatic rename
set-window-option -g automatic-rename off
# basic settings
set -g default-terminal "screen-256color"
set -g aggressive-resize off
set-window-option -g xterm-keys on
#set-window-option -g mode-mouse off
# command history
set -g history-limit 10000
# messages
set -g message-bg default
set -g message-fg red
# no visual activity
set -g visual-activity off
set -g visual-bell off
# status bar
set-option -g status-justify centre
set-option -g status-bg default
set-option -g status-fg blue
set-option -g status-interval 5
set-option -g status-left-length 30
set-option -g status-left '#[fg=red][ #[fg=white]#H #[fg=red]]#[default]'
set-option -g status-right '#[fg=red][ #[fg=white]%R %d/%m #[fg=red]]#[default]'
# modes
set-option -g mode-bg default
set-option -g mode-fg blue
# inactive window format
set-window-option -g window-status-format '#I:#W#F'
set-window-option -g monitor-activity on
#set-window-option -g monitor-content on # not available in tmux 2.0
# activity in a window
set-window-option -g window-status-activity-attr dim
set-window-option -g window-status-activity-bg default
set-window-option -g window-status-activity-fg yellow
# content in a window # not available in tmux 2.0
#set-window-option -g window-status-content-attr dim
#set-window-option -g window-status-content-bg default
#set-window-option -g window-status-content-fg red
# active window format
set-window-option -g window-status-current-fg white
set-window-option -g window-status-current-bg default
set-window-option -g window-status-current-format '#[fg=red](#[default]#I:#W#F#[fg=red])#[default]'
# reload tmux configuration
unbind r
bind r source-file ~/.tmux.conf \; display "Configuration reloaded!"
# Screen-like keybinds
unbind C-b
set -g prefix ^A
set -g prefix2 ^Q
bind a send-prefix
bind q send-prefix
unbind c
bind c new-window
unbind ^C
bind ^C new-window
unbind n
bind n next-window
unbind ^N
bind ^N next-window
unbind A
bind A command-prompt "rename-window %%"
unbind p
bind p previous-window
unbind ^P
bind ^P previous-window
unbind a
bind a last-window
unbind ^A
bind ^A last-window
unbind [
bind Escape copy-mode
unbind w
bind w list-windows
unbind k
bind k confirm-before "kill-window"
unbind l
bind l refresh-client
unbind '"'
bind '"' choose-window
```
## Aliases
I also use two functions with tmux (in ~/.bash_aliases).
The first one creates a new "mytmux" tmux session if one doesn't exist yet, opens 10 shells and selects the first one.
```bash
mytmux() {
tmux has-session -t mytmux
if [ $? != 0 ]; then
tmux new-session -s mytmux -n $(hostname) -d
tmux new-window -t mytmux:1 -n $(hostname)
tmux new-window -t mytmux:2 -n $(hostname)
tmux new-window -t mytmux:3 -n $(hostname)
tmux new-window -t mytmux:4 -n $(hostname)
tmux new-window -t mytmux:5 -n $(hostname)
tmux new-window -t mytmux:6 -n $(hostname)
tmux new-window -t mytmux:7 -n $(hostname)
tmux new-window -t mytmux:8 -n $(hostname)
tmux new-window -t mytmux:9 -n $(hostname)
tmux select-window -t mytmux:0
fi
tmux attach -t mytmux
}
```
The second one changes the tmux window name whenever I ssh to a remote host, and switches the window name back to the name of my computer when I logout from the host.
```bash
if [ -n "$TMUX" ]; then
ssh() {
if [ $# -le 2 ]; then
tmux rename-window "${@: -1}"
command ssh "$@"
tmux rename-window "$(hostname)"
else
command ssh "$@"
fi
}
fi
```
## Conclusion
That's all ! As always, please do leave a comment if you've found something useful in this article !

View File

@ -0,0 +1,52 @@
---
title: "MySQL backup script"
date: 2016-03-13T00:00:00+01:00
draft: false
share: false
---
I wrote a MySQL database backup script a while back. I known they are more than enough of them already floating around the internet, but hey, I figured I'd share it here anyway.
## The script
For the script to work, you'll need to edit a few variable to match your configuration.
- `BACKUPDIR` is the path of the directory where you want your backups to be stored.
- `BACKUPUSR` is the user that will connect to MySQL to dump the databases. It should have access to all you databases without needing a password.
- `EXCLUDELIST` is a list of databases that should not be backed-up. Leaving it as is is probably fine.
```bash
#!/bin/bash
BACKUPDIR="/home/user/backup"
BACKUPUSR="user"
EXCLUDELIST="^Databases$|^information_schema$|^mysql$|^performance_schema$"
sqlbk() {
for each in $(mysqlshow | awk '/[[:alnum:]]/{print $2}'); do
if [[ $each =~ $EXCLUDELIST ]]; then
true
else
mysqldump $each | bzip2 > ${BACKUPDIR}/${each}.sql.bz2
chown ${BACKUPUSR}: ${BACKUPDIR}/${each}.sql.bz2 && chmod 600 ${BACKUPDIR}/${each}.sql.bz2
fi
done
}
[[ -e /etc/init.d/mysql ]] && sqlbk
```
I personnaly have this script running once a week, in my user's personnal crontab (editable using the `crontab -e` command) :
```
## WEEKLY DATABASE BACKUP
@weekly /home/user/bin/backupdb
```
# Conclusion
You've probably noticed that the script erases the previous backup when a new one is made.
I don't need to keep multiple versions of the same database backup on my servers because they are all saved remotely on a daily basis using [Rsnapshot](http://rsnapshot.org/). I'll probably write an article on the subject in the future.
As usual, feedback is always appreciated !

View File

@ -0,0 +1,361 @@
---
title: "WebDAV with nginx"
date: 2016-03-26T00:00:00+01:00
draft: false
share: false
---
This website has been hosted on an [Online.net](https://www.online.net) dedicated server since its creation. I've been one of their customers for the past 3 years now, and I still don't have anything bad to say about them.
They recently upgraded their personnal range, and I took the opportunity to upgrade from a single server running all of my services to 2 servers running LXC containers that are hosting my services.
It took me 2 days to migrate everything, but it was worth it. If I decide to switch servers again, I'll have to migrate the containers instead of the services themselves. Considering they are stored on a separate BTRFS volume, it shouldn't take me more than a few hours at most.
During the migration, I realized that I needed to make files that were hosted on one server accessible to the other. I could have gone with CIFS or NFS, but I wanted to have encryption built-in instead of having to rely on a VPN for that. Since I figured it was a good opportunity to learn something new, I ended up going with WebDAV.
In this tutorial, I'll explain how I've configured a read-only WebDAV share using [nginx](https://www.nginx.com/) and [Let'sEncrypt](https://letsencrypt.org/) SSL certificates between two Debian Jessie containers.
## Server configuration
### Installing the required packages
First thing first, we need to install the packages we'll need for this configuration :
```bash
apt update
apt -t jessie-backports install nginx letsencrypt
apt install apache2-utils
```
### Getting our first certificate from letsencrypt
#### letsencrypt configuration
Let's create a configuration file for letsencrypt :
```bash
mkdir /etc/letsencrypt
echo 'rsa-key-size = 3072
renew-by-default
text = True
agree-tos = True
renew-by-default = True
authenticator = webroot
email = admin@example.com
webroot-path = /var/www/letsencrypt/' > /etc/letsencrypt/cli.ini
```
*Please do modify admin@example.com by your actual e-mail address.*
We also need to create the directory structure where letsencrypt ACME challenge temporary files will be stored :
```
mkdir -p /var/www/letsencrypt/.well-known
```
#### nginx configuration
We now need to configure nginx by adding the following in the `/etc/nginx/sites-available/default` file, anywhere in the `server{}` block that is configured to listen on port 80.
```
location /.well-known/acme-challenge {
root /var/www/letsencrypt;
}
```
Let's make sure that we haven't done anything wrong :
```bash
nginx -t
```
The command should give you the following output :
```
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
```
If that's the case, you can safely reload the nginx daemon :
```
nginx -s reload
```
#### Certificate request
Now that letsencrypt and nginx are properly configured, we can request our certificate from letsencrypt :
```bash
letsencrypt --config /etc/letsencrypt/cli.ini certonly -w /var/www/letsencrypt -d www.example.com
```
*Please do modify www.example.com by your server's FQDN, and please note that the letsencrypt servers need to be able to resolve that name to your server's IP.*
If everything goes well, your certificates will be generated and stored in the /etc/letsencrypt folder.
### WebDAV configuration
Now that we've obtained our certificate from letsencrypt, we can begin configuring nginx.
First, we need to comment two SSL directives from the default nginx configuration :
```
sed -i '/ssl_/ s/^/#/' /etc/nginx/nginx.conf
```
Let's now create a `/etc/nginx/conf.d/ssl.conf` with the following content :
```
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off;
ssl_certificate /etc/letsencrypt/live/www.example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/www.example.com/privkey.pem;
ssl_trusted_certificate /etc/letsencrypt/live/www.example.com/fullchain.pem;
ssl_dhparam /etc/nginx/ssl/dhparam.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers 'ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS';
ssl_prefer_server_ciphers on;
add_header Strict-Transport-Security max-age=15768000;
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
ssl_stapling on;
ssl_stapling_verify on;
resolver 127.0.0.1 valid=300s;
resolver_timeout 5s;
```
*This configuration will work if you're using a single certificate on your server. If not, you'll have to remove the `ssl_certificate`, `ssl_certificate_key` and `ssl_trusted_certificate` directives from this file and move them to the correct `server{}` block.*
We now need to generate a `dhparam.pem` file :
```bash
mkdir /etc/nginx/ssl && chmod 700 /etc/nginx/ssl
openssl dhparam -out /etc/nginx/ssl/dhparam.pem 3072
chmod 600 /etc/nginx/ssl/dhparam.pem
```
Let's now generate a HTTP basic authentication file. This example creates a user named example :
```
mkdir /etc/nginx/auth
htpasswd -c /etc/nginx/auth/webdav example
New password:
Re-type new password:
Adding password for user user
```
This file has to be readable by the user running your webserver. For security reasons, we'll make it readable only by him :
```
chown -R www-data:nogroup /etc/nginx/auth
chmod 700 /etc/nginx/auth
chmod 400 /etc/nginx/auth/webdav
```
Let's now modify our `/etc/nginx/sites-available/default` file with the following content :
```
server {
listen 80 default_server;
listen [::]:80 default_server ipv6only=on;
server_name "";
return 444;
}
server {
listen 443 default_server ssl http2;
listen [::]:443 default_server ipv6only=on ssl http2;
server_name "";
return 444;
}
```
We now have to create a `/etc/nginx/sites-available/example` file that will contain our actual webdav configuration. This example makes a `data` folder stored in `/var/www/` accessible.
```
server {
listen 80;
listen [::]:80;
server_name www.example.com;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name www.example.com;
root /var/www;
location / {
index index.html;
}
location /.well-known/acme-challenge {
root /var/www/letsencrypt;
}
location /data {
client_body_temp_path /tmp;
dav_methods PUT DELETE MKCOL COPY MOVE;
dav_ext_methods PROPFIND OPTIONS;
create_full_put_path on;
dav_access user:r group:r;
auth_basic "Restricted access";
auth_basic_user_file auth/webdav;
limit_except GET {
allow <YOUR IP HERE>;
deny all;
}
}
}
```
The last thing we have to do is to create a symlink so that nginx will load our configuration :
```
ln -s /etc/nginx/sites-available/example /etc/nginx/sites-enabled/example
```
Like before, let's make sure our configuration is correct and then reload the daemon :
```
nginx -t
nginx -s reload
```
That's it for the WebDAV configuration server-side !
### nginx monitoring
If you're using monit, you can easily monitor the nginx daemon by copying the following in `/etc/monit/conf.d/nginx` :
```
check process nginx
with pidfile "/run/nginx.pid"
start program = "/bin/systemctl start nginx"
stop program = "/bin/systemctl stop nginx"
alert monit@example.com
```
### Certificates auto-renewal
This goes beyond the scope of the article, but since letsencrypt certficates are only valid for 3 months, you'll need to renew them regularily. You can do so manually or you can setup a cron that does it for you.
I personnaly use the following script :
```
#!/bin/bash
PRG="/usr/bin/letsencrypt"
CONFIG="/etc/letsencrypt/cli.ini"
MAILDEST="admin@example.com"
GLOBAL=0
# www.example.com
$PRG --config $CONFIG certonly -w /var/www/letsencrypt -d www.example.com
[[ $? != 0 ]] && GLOBAL=$(( $GLOBAL + 1 ))
if [[ $GLOBAL == 0 ]]; then
/usr/sbin/nginx -s reload
else
echo "Something went wrong while renewing the certificates on $(hostname -f)
Manual action needed." | mail -s "Letsencrypt error on $(hostname -f)" $MAILDEST
fi
```
You can add multiple domains in the script. As long as you add all 3 lines for each domain, it will not automatically reload nginx if one or more certificate could not be renewed and will send an e-mail to the address configured in the `MAILDEST` variable.
You can configure this script in the root user crontab using the `crontab -e` command :
```
## LETSENCRYPT CERTIFICATE AUTORENEWAL
30 03 01 */2 * /root/bin/tlsrenew
```
This will run the script every two months, on the first day of the month, at 3:30 AM.
## Client configuration
### Installing the required packages
A single package is required to mount a webdav volume on Debian :
```
apt update && apt install davfs2
```
### Mounting the share manually
If like me, you want to mount your webdav share in a LXC container, you'll first need to make sure that the following line is present in its configuration file :
```
lxc.cgroup.devices.allow = c 10:229 rwm
```
You'll also need to create the `/dev/fuse` node in the container :
```
mknod /dev/fuse c 10 229
```
In any case, we have to edit the `/etc/davfs2/secrets` file to add the mount point, username and password that will be used to mount the share :
```
echo '/data webdav notanactualpassword' >> /etc/davfs2/secrets
```
Once that's done, we can mount our share with the following command :
```
mount -t davfs https://www.example.com/data /data -o ro,dir_mode=750,file_mode=640,uid=root,gid=root
```
You might need to edit the parameters depending on which users you want to make the share available to.
### Mouting the share on boot
A davfs volume can be mounted via the `/etc/fstab` file, but I decided to use monit instead so that the volume would be mounted again automatically should my WebDAV server reboot.
In order to do so, I first created a `davfs.txt` file in the `/var/www/data` folder on my WebDAV server :
```
touch /var/www/data/davfs.txt
```
I then created the following `/root/bin/mount_davfs` script :
```
#!/bin/bash
mknod /dev/fuse c 10 229
mount -t davfs https://www.example.com/data /data -o ro,dir_mode=750,file_mode=640,uid=root,gid=root
```
The last thing I did was create a `/etc/monit/conf.d/davfs` file with the following content :
```
check file davfs with path /data/davfs.txt
alert monit@example.com
if does not exist then exec "/root/bin/mount_davfs"
```
That way, if monit notices that the `/data/davfs.txt` file becomes inaccessible for some reason, it will try remouting the share.
## Conclusion
That's all ! Hopefully this has been useful to someone. Please do comment below if you have any question or if this has been helpful !

View File

@ -0,0 +1,301 @@
---
title: "Installing Ghost"
date: 2015-11-19T00:00:00+01:00
draft: false
share: false
---
I haven't published an article on here for over a year and a half... While this was mostly due to a lack of motivation, another reason was that I didn't enjoy the blogging system I was using.
As lightweight as [Pelican](https://blog.getpelican.com/) is, I found it cumbersome to use on a regular basis. Every time I wanted to publish or update an article, I had to :
- edit local markdown files ;
- regenerate the website files ;
- start a webserver locally to proofread the article ;
- commit and push the files to my git repo ;
- pull the files on the webserver.
I hadn't had a look at the CMS landscape for a while, and I started searching for one with a web editor that supports markdown. I also wanted to avoid anything that runs on PHP if possible.
I quickly discovered [Ghost](https://ghost.org/), and decided to give it a shot. I was convinced by it within a few hours and I decided to migrate this blog.
So, to celebrate my move to Ghost, I figured I'd write an article on how I've installed it on my server.
All commands in this article have to be run as the `root` user on a Debian server.
# Installing nodejs
Unlike most CMS (Wordpress, for example), Ghost is not files that you have to upload to a webserver, but a daemon that runs on [nodejs](https://nodejs.org/en/).
Here's the official recommended way of installing the current LTS version of nodejs on Debian :
```bash
curl -sL https://deb.nodesource.com/setup_8.x | bash -
apt-get install -y nodejs
```
If, like me, you don't want to run a bash script downloaded from the internet on your server, here are the commands you have to run to install it manually.
Since the nodejs repo uses https, we'll first need to install the required package to use those :
```bash
apt install apt-transport-https
```
We'll then have to add the nodejs repository public key to the system :
```bash
curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -
```
Now we have to add the nodejs repository to our sourcelist :
```bash
echo 'deb https://deb.nodesource.com/node_8.x stretch main' > /etc/apt/sources.list.d/nodesource.list
```
We can now install nodejs
```bash
apt update
apt install nodejs
```
# System configuration
Before installing Ghost, some system configuration is required.
First, let's create a new `ghost` system user that'll be used to run the Ghost daemon :
```bash
useradd -s /bin/false -r -d /opt/ghost -m ghost
```
Ghost needs an empty folder for the automated installation script to work. For that purpose, let's create a subfolder in the `ghost` user home folder :
```bash
sudo -Hu ghost mkdir /opt/ghost/app
```
# Database
Ghost requires a MySQL/MariaDB database to store its data (technically, you could use a SQLite database, but please don't).
I personnally have all my databases stored on a single LXC container running MariaDB. However, if you need to, you can install MariaDB locally this way :
```bash
apt install mariadb-server mariadb-common
```
We now have to declare a `ghost` user and database in the MariaDB shell :
```mysql
create database ghost;
create user `ghost`@`%` identified by 'password';
grant all privileges on ghost.* to 'ghost'@`%`;
```
You can change the `%` to `localhost` in the `create user` command if you've installed MariaDB locally. Please also remember to change `'password'` by an actual password.
Once that's done, we're ready to install Ghost !
# Installing Ghost
## Ghost CLI
To install Ghost, we first have to install the Ghost CLI :
```bash
npm i -g ghost-cli
```
The Ghost CLI is a tool that lets you install, upgrade and manage your Ghost installation easily. Its usage is thoroughly documented on the official website [here](https://docs.ghost.org/v1/docs/ghost-cli).
## Installing Ghost
Let's install Ghost :
```bash
cd /opt/ghost/app
sudo -Hu ghost ghost install --no-setup-nginx --no-setup-systemd --no-setup-linux-user --no-setup-mysql
```
The command will ask you for the following information :
- the URL of your website ;
- the hostname or IP of the server that's hosting your MariaDB installation ;
- the username to use to connect to the database (`ghost`) ;
- the password you've configured for the database user ;
- the database name (`ghost`).
Once the script has finished running, you've successfully installed Ghost ! However, the daemon won't start since we haven't configured systemd yet.
Since it contains a password, let's fix the permissions on our installation's configuration file to make sure it's not world-readable :
```bash
chmod 600 /opt/ghost/app/config.production.json
```
As you can see from the `ghost install` command, it can install and configure pretty much all of its dependencies on its own. However, since I'm a [_sysadmin_](https://xkcd.com/705/), that's not how I roll.
## Systemd configuration
As I wrote earlier, Ghost runs as a daemon. For us to be able to start it, we now need to declare a systemd unit file :
Let's create the file :
```bash
vim /etc/systemd/system/ghost.service
```
And add the following content to it :
```text
[Unit]
Description=Ghost systemd service
Documentation=https://docs.ghost.org
[Service]
Type=simple
WorkingDirectory=/opt/ghost/app
User=ghost
Group=ghost
Environment="NODE_ENV=production"
ExecStart=/usr/bin/ghost run
[Install]
WantedBy=multi-user.target
```
We can now reload systemd an start Ghost :
```
systemctl daemon-reload
systemctl start ghost.service
```
The daemon should now be running :
```
pgrep -alf ghost
14184 ghost run
```
# Nginx
With its default configuration, Ghost runs as a webserver on localhost, on a non-standard HTTP port (TCP 2368). For your website to be publicly browseable, you'll need to configure a webserver as a reverse-proxy in front of your Ghost installation. We'll use nginx for that purpose.
If you already have nginx running on a different server from your Ghost installation, you can use it for that purpose. For it to work, you'll need to edit the server host IP in Ghost's `config.production.json` configuration file with your Ghost server public IP and to restart Ghost. If you do so, make sure to limit direct access to your Ghost installation to the IP of your reverse-proxy by using iptables.
If you need to, you can install nginx locally this way :
```bash
apt install nginx
```
I won't go into details on how to configure and secure a nginx installation here as it is beyond the scope of this article.
Here is my nginx configuration for this website :
```
location / {
proxy_pass http://127.0.0.1:2368;
include proxy.conf;
add_header Front-End-Https on;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_set_header Authorization "";
proxy_set_header Accept-Encoding "";
proxy_redirect off;
}
location /ghost/ {
proxy_pass http://127.0.0.1:2368/ghost/;
allow 192.0.2.100;
deny all;
include proxy.conf;
add_header Front-End-Https on;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_set_header Accept-Encoding "";
proxy_redirect off;
}
```
As you can see, I've declared two location blocks :
- `/` is publicly visible by anyone ;
- `/ghost` (Ghost's administation interface) is only accessible from 192.0.2.100 (my public IP address).
I'd rather have left Ghost's administation interface accessible from anywhere. However, since there is currently no way to replace `/ghost` by another subfolder and two-factor authentification is not available, I've decided against it.
# Monit
As I mentionned in previous articles, I have monit running on all of my servers to make sure that my services are running and to restart them should they crash.
I've created a configuration file for Ghost :
```bash
vim /etc/monit/conf.d/ghost
```
With the following content :
```text
check process ghost
matching "ghost run"
start program = "/bin/systemctl start ghost"
stop program = "/bin/systemctl stop ghost"
if changed pid then alert
if changed ppid then alert
```
Let's reload monit :
```bash
monit reload
```
Ghost should now appear in your `monit summary`.
# Logging
Ghost writes its logs through `syslog`. If you don't want those messages to end up in `/var/log/syslog`, you'll have to configure your `syslog` daemon. For me, that's `syslog-ng`.
## Syslog-ng
Let's create a dedicated folder for the Ghost daemon's log files :
```bash
mkdir /var/log/ghost
chown root:adm /var/log/ghost
```
Then, we need to create a configuration file :
```bash
vim /etc/syslog-ng/conf.d/ghost.conf
```
And add the following content to it :
```
filter f_ghost { program ("ghost"); };
destination d_ghost { file (/var/log/ghost/ghost.log); };
log { source(s_src); filter (f_ghost); destination (d_ghost); flags(final); };
```
We can now reload `syslog-ng` :
```
service syslog-ng reload
```
Once that's done, Ghost should start logging in `/var/log/ghost/ghost.log`. Accessing a page on your site will create a new log entry, so that'll be enough to make sure it's working properly.
## Logrotate
As always with logs, let's configure logrotate to make sure we don't end up with huge files.
Let's create a new logrotate configuration file :
```bash
vim /etc/logrotate.d/ghost
```
And add the following content to it :
```text
/var/log/ghost/ghost.log {
rotate 8
weekly
notifempty
missingok
create 640 root adm
compress
copytruncate
}
```
There's no need to reload anything here. This new configuration file will be read by logrotate automatically next time its cron runs.
# Conclusion
This blog uses a previous version of Ghost's default theme, [Casper](https://github.com/TryGhost/Casper).
I've modified it a bit, and I really enjoy how it looks now ! You can get the theme with my modifications from my [GitHub](https://github.com/captainark/Casper) ! Credits to [this article](http://www.brycematheson.io/fixing-ghosts-default-casper-theme/) for some of the changes, and thanks [@Aguay](https://mastodon.fun/@aguay) for the help !
You've also probably noticed that I now use a private installation of [NodeBB](https://nodebb.org/) for the comments section. I'll probably write an article on how I've installed and implemented it in my Ghost installation in the near future. In the meantime, please feel free to make use of it !

View File

@ -0,0 +1,145 @@
---
title: "DNS zone versioning"
date: 2018-04-14T00:00:00+01:00
draft: false
share: false
---
I've been using [PowerDNS](https://doc.powerdns.com/md/) with a SQL backend as a hidden master DNS server for a few years now.
I've been wanting to write a quick shell script to version my DNS zones for a while, and since I've finally taken the time to do so today, I figured I'd share it here.
The script uses PowerDNS API to list the configured zones. It then exports them to a file in an AXFR-like format, commits and finally pushes them on a git repository
# Configuration
## PowerDNS
For the script to work, we have to activate PowerDNS' API.
To do so, let's create a `/etc/powerdns/pdns.d/api.conf` file with the following content :
```
api=yes
api-key=mysupersecretapikey
webserver=yes
webserver-address=10.0.0.10
webserver-allow-from=10.0.0.0/8
webserver-port=8081
```
You should change *mysupersecretapikey* to an actual secret.
You should also adapt the `webserver-address` and `webserver-allow-from` to reflect your network configuration.
Once the file is created, we have to restart pdnsd :
```
systemctl restart pdns.service
```
**N.B. :** As with all my other articles, I'm assuming here you're running Debian. The path of the configuration file you have to create or edit might not be the same if you're running another distribution or if you've installed PowerDNS from source.
## jq
[jq](https://stedolan.github.io/jq/) is required for the script to work, so let's install it !
```
apt install jq
```
## Git
We now have to create a git repository to host our zone files.
To do so, you can follow my [previous tutorial](https://www.captainark.net/2016/01/31/private-git-repo/) on the subject if you want.
I've personnaly migrated my git repos to a self-hosted [Gogs](https://gogs.io/) installation a while back.
If you don't care about your zones content being public (it already is, technically), you could create a GitHub repo for that use (or on any other available git hosting).
Once you've created your repo, you should clone it on the machine that will run the script. For me, the path to the repo will be `/home/captainark/backup/dnsexport`.
```
apt install git
mkdir ~/backup && cd ~/backup
git clone ssh://git@git.captainark.net/captainark/dnsexport.git
```
You should also create a `~/.gitconfig` for the user that will run the script with the following parameters configured :
```
[user]
email = captainark@captainark.net
name = CaptainArk
[push]
default = simple
```
Also, make sure your user can push to the remote server before running the script. The following should work :
```
cd ~/backup/dnsexport
echo '# DNSEXPORT' > README.md
git add README.md
git commit README.md -m 'adding README'
git push
```
# Script
Once we've finished configuring PowerDNS and Git, we can run the script.
You can copy the following to `~/bin/dnsexport` :
```bash
#!/bin/bash
ApiKey="mysupersecretapikey"
PdnsUrl="10.0.0.10:8081"
PdnsServerName="localhost"
PdnsZoneUrl="http://${PdnsUrl}/api/v1/servers/${PdnsServerName}/zones"
ZoneList=$(/usr/bin/curl -sH "X-API-Key: ${ApiKey}" ${PdnsZoneUrl} | jq -r '.[].id')
ExportFolder="/home/captainark/backup/dnsexport"
updateremote() {
cd $ExportFolder
git add db.${Zone%.}
git commit -m "Automated commit due to modification on ${Zone%.} at $(date -Iseconds)"
git push
cd -
}
for Zone in ${ZoneList}; do
ZoneFile="${ExportFolder}/db.${Zone%.}"
CurrentShaSum=$(/usr/bin/sha256sum ${ZoneFile})
/usr/bin/curl -o ${ZoneFile} -sH "X-API-Key: ${ApiKey}" ${PdnsZoneUrl}/${Zone}/export
NewShaSum=$(/usr/bin/sha256sum ${ZoneFile})
[[ ${NewShaSum% *} != ${CurrentShaSum% *} ]] && updateremote
done
```
It's nothing fancy, but it does the job.
You'll have to adapt the `ApiKey`, `PdnsUrl` and `ExportFolder` variables to your configuration.
Once that's done, let's fix the permissions on the script :
```
chmod 700 ~/bin/dnsexport
```
You should run the script manually once to make sure everything is working OK. If it is, you should see a new commit on the repo for each zone you have configured in PowerDNS.
Once the script has executed once without issue, you can schedule it regularly. I have it running every 10 minutes in my user's crontab :
```
crontab -e
# DNSEXPORT
*/10 * * * * /home/captainark/bin/dnsexport
```
# Conclusion
That's all !
As always, if you've found this article useful, please feel free to make use of the comments section below !
Hopefully it won't take as long before I write another article here next time !

View File

@ -0,0 +1,153 @@
---
title: "Self-hosted report-uri"
date: 2018-11-27T00:00:00+01:00
draft: false
share: false
---
I've been playing with the security headers for this website for the past few days, most notably with the `Content-Security-Policy` as well as the `Expect-CT` headers.
After having spent a few hours on this, I'm pretty happy with the results !
![Screenshot-2018-11-27-at-21.52.58](/images/mozilla_observatory.png)
Source : [Observatory by Mozilla](https://observatory.mozilla.org/)
This website runs on a [Ghost](https://ghost.org/) installation that I keep up-to-date. Since an update might mean that the site will try to load new external resources, the `Content-Security-Policy` header might need updating as well.
This header has a `report-uri` directive that makes web browsers send json-formatted messages of policy violations they encounter.
There's a great website ([Report-URI](https://report-uri.com/)) that you can use to handle these reports. It allows up to 10.000 reports per month with a free account, which should be enough for a low to mid trafic website once you've setup your initial policy.
However, since I'm all about self-hosting *all of the things*, I figured I would configure my own report-uri using a php script.
## The script
This script is heavily inspired from the ones available [here](https://github.com/LastBreach/csp-report-to-syslog) and [here](https://mathiasbynens.be/notes/csp-reports).
The script checks that the content that was sent by the web browser is correctly formatted json message. It then removes the backslashes from the message, opens a connection to the local syslog daemon and sends the message.
```php
<?php
// Send `204 No Content` status code.
http_response_code(204);
// collect data from post request
$data = file_get_contents('php://input');
if ($data = json_decode($data)) {
// Remove slashes from the JSON-formatted data.
$data = json_encode(
$data, JSON_UNESCAPED_SLASHES
);
# set options for syslog daemon
openlog('report-uri', LOG_NDELAY, LOG_USER);
# send warning about csp report
syslog(LOG_WARNING, $data);
}
?>
```
## Nginx
I won't go into too much details regarding the nginx configuration here as I've written on this subject before.
Since I now have a wildcard Let's Encrypt certificate on captainark.net, I've decided to use a dedicated vhost for my report-uri. However, a subfolder would work just as well. Just make sure the script is stored in a folder that nginx can access.
I've also decided to call the script `index.php`. You can call it whatever you want, but your `report-uri` directive will have to match the full URL of the script (if I had named the script `report.php`, my `report-uri` would have been `https://report-uri.captainark.net/report.php` instead of `https://report-uri.captainark.net`).
A nginx location configured as follows should do the trick :
```
location / {
index index.php;
location ~ \.php$ {
try_files $uri =404;
fastcgi_split_path_info ^(.+?\.php)(/.*)$;
fastcgi_pass unix:/run/php/php7.0-fpm.sock;
fastcgi_index index.php;
include fastcgi.conf;
fastcgi_hide_header X-Powered-By;
}
}
```
I've omitted the security headers I usually configure in all locations here because they are outside of the scope of this article (HSTS, X-Frame-Options, etc.)
Once you've configured nginx, you can `nginx -t` to check that the syntax is correct, and `nginx -s reload` to reload the configuration.
## Syslog-ng
Now that our reports are being sent to syslog-ng, we need to log them as proprely formatted json messages, in a dedicated file.
I've created a `/etc/syslog-ng/conf.d/report-uri.conf` configuration file for that :
```
filter f_report-uri { program ("report-uri"); };
destination d_report-uri { file ("/var/log/report-uri/report-uri.json" template("{\"@timestamp\": \"${ISODATE}\", \"host\": \"${HOST}\", \"message\": ${MSG} }\n")); };
log { source(s_src); filter (f_report-uri); destination (d_report-uri); flags(final); };
```
We'll also need to create the folder for the logs :
```
mkdir -m 0750 /var/log/report-uri
chown root:adm /var/log/report-uri
```
You can then reload syslog-ng with a `systemctl reload syslog-ng.service`
Policy violation messages should now start to appear in the `/var/log/report-uri/report-uri.json`
If you want to test that it's working, you can create a `csp.json` file with the following content :
```json
{"csp-report":{"document-uri":"https://www.captainark.net/foo/bar","referrer":"https://www.google.com/","violated-directive":"default-src self","original-policy":"default-src self; report-uri https://report-uri.captainark.net","blocked-uri":"http://jscryptocurrency.cx"}}
```
You can now `POST` it to your report-uri :
```
curl -XPOST https://report-uri.captainark.net -d @csp.json
```
The message should be added to your `report-uri.json` log file, and you should be able to prettify it with `jq` :
```json
tail -n1 /var/log/report-uri/report-uri.json | jq
{
"@timestamp": "2018-11-27T22:57:06+01:00",
"host": "webserver",
"message": {
"csp-report": {
"document-uri": "https://www.captainark.net/foo/bar",
"referrer": "https://www.google.com/",
"violated-directive": "default-src self",
"original-policy": "default-src self; report-uri https://report-uri.captainark.net",
"blocked-uri": "http://jscryptocurrency.cx"
}
}
}
```
## Logrotate
It's always a good idea to configure a log rotation when you add a new log file. To do so, let's create the `/etc/logrotate.d/report-uri` file with the following content :
```
/var/log/report-uri/report-uri.json {
rotate 8
weekly
notifempty
missingok
create 640 root adm
compress
copytruncate
}
```
## Conclusion
This configuration works as a report-uri for the `Content-Security` header as well as the newer `Expect-CT` header, and any future header that uses a report-uri directive (as long as the generated messages are json formatted).
Having a log file instead of the clean web interface of [Report URI](https://report-uri.com/) is not for everybody, but it is more than enough for my use case (this site gets like 10 clicks a day when I'm not playing with it so... yeah.)
Since the log messages are formatted in json, they should be pretty easy to integrate in [Elasticsearch](https://www.elastic.co/) or [Graylog](https://www.graylog.org/). If I ever decide to configure one of those solutions, I should then be able to configure cool looking dashboards in Grafana as well.
As always, if you've found this article useful in any way, please let me know in the comments here, on [Twitter](https://twitter.com/captainark) or on the [Fediverse](https://social.captainark.net/users/captainark) if you're a real cool kid !

View File

@ -0,0 +1,46 @@
---
title: "Debian repos over HTTPS"
date: 2018-12-03T00:00:00+01:00
draft: false
share: false
---
I've been using [deb.debian.org](https://deb.debian.org/) as the main debian repo on my servers pretty much since it's become available.
I've recently realized that the service is available over HTTPS, and since I'm all about encrypting *all of the things*, I figured I'd configure it on my servers.
This is going to be a very short post on how to do the same.
## Required package
`apt` can't use repositories available over https without installing a required package first.
```
apt install apt-transport-https
```
## The source.list file
Once the package has been installed, you can edit your `/etc/apt/sources.list` file with the following content :
```
deb https://deb.debian.org/debian stable main contrib non-free
deb https://deb.debian.org/debian-security stable/updates main contrib non-free
deb https://deb.debian.org/debian stable-updates main contrib non-free
deb https://deb.debian.org/debian stretch-backports main contrib non-free
```
Or, if you'd rather follow the `stretch` releases instead of `stable` (to avoid upgrading to `buster` until you're ready once it will become the new `stable`) :
```
deb https://deb.debian.org/debian stretch main contrib non-free
deb https://deb.debian.org/debian-security stretch/updates main contrib non-free
deb https://deb.debian.org/debian stretch-updates main contrib non-free
deb https://deb.debian.org/debian stretch-backports main contrib non-free
```
## Conclusion
That's all! As I said, this was going to be a short post.
As always, feel free to contact me here, on [Twitter](https://twitter.com/captainark) or on the [Fediverse](https://social.captainark.net/users/1)!

85
content/resume.md Normal file
View File

@ -0,0 +1,85 @@
---
title: "Resume"
date: 2019-01-06T12:20:53+01:00
draft: false
type: "page"
share: false
---
## Profile
Hi ! I'm Antoine. I'm a 31 years old Systems and Network administrator, specialized in Linux and network management. I am not currently looking for a new opportunity.
If you find my profile interesting or if you have any questions, please [send me an email](mailto:contact@captainark.net) !
## Skills Summary
- Expertise in Linux and Cisco IOS routing, firewalling, QoS and VLAN configuration, for IPv4 and IPv6 ;
- Knowledge of dynamic routing protocols (BGP, OSPF, EIGRP) and VPN software (OpenVPN) ;
- Experience with a DDOS mitigation system (Arbor TMS) ;
- Expertise in standard network and systems analyzing and troubleshooting tools (tcpdump, dig, atop, wireshark, traceroute) ;
- Knowledge of monitoring software (nagios, shinken, cacti, smokeping, observium, ELK) ;
- Experience with Linux servers and desktops installation, configuration, administration and troubleshooting (on both Debian and RedHat based distributions) ;
- Familiarity with the most common network protocols (HTTP, DNS, DHCP, SMTP, POP, IMAP, CIFS) and their associated daemons (nginx, apache, bind, powerdns, dhcpd, dnsmasq, postfix, dovecot, samba) ;
- Ability to write and debug bash, batch and powershell scripts ;
- Experience with clustering and high-availability technologies (heartbeat, ipvsadm, VRRP, HSRP, SLB) ;
- Knowledge of virtualization technologies (VMWare Workstation, KVM, Xen, Proxmox, LXC) ;
- Experience with information resources management and incident management software (GLPI, OCS Inventory, FusionInventory) ;
- Familiarity with Windows desktop (8, 7 and XP) and server (2012, 2008, 2003) families, and with Mac OS X.
## Work Experience
**NAMESHIELD (Angers, France)**
*Network Architect, from 09/2015 to now*
- Technical projects management :
* WiFi deployment, using Cisco WLC and Aironet devices as well as Freeradius for EAP-TLS/802.1x user authentication ;
* VLAN deployment ;
* L2 VPN setup using OpenVPN to securely propagate private VLANs over the internet ;
*Systems and Network Administrator, from 10/2013 to 08/2015*
- Technical projects management, notably :
* Definition, configuration and maintenance of a highly-available networking architecture for WAN, site-to-site and road warrior VPN access ;
* Setup of a DDOS mitigation system and its associated procedures ;
* IPv6 deployment, on both LAN and data-center hosted machines ;
* Setup of a centralized logging solution and its associated scripts to generate statistics ;
- Linux systems, VOIP Phones, Cisco switches and routers configuration, deployment, administration and troubleshooting ;
- Daily monitoring and production control, and incident management ;
- User support on Linux and Windows systems.
**INIT SYS - Alphalink Group (Pornic, France)**
*Systems and Network Administration Technician, from 10/2012 to 08/2013*
- Linux systems and services installation and configuration, on both physical and virtual machines ;
- Documentation of newly installed systems and their role within the existing infrastructure ;
- Servers and network monitoring and optimisation ;
- Systems and network maintenance operations during closed business hours ;
- Automation of redundant tasks through scripting ;
- Level 3 customer and internal support ;
*Technical Support Engineer, from 02/2012 to 10/2012*
- Level 1 customer support for both French and international customers and suppliers over the telephone and by e-mail ;
- Troubleshooting customers and internal users networking and system issues using standard tools and, when needed, through research and reproduction ;
- Contacting and following up with the appropriate internal services when needed to solve the issue.
**CHALLANS City Hall (Challans, France)**
*Internship, from 09/2011 to 11/2011*
- Installation and configuration of a highly-available front-end reverse proxy ;
- Documentation of the installation and configuration process ;
- Level 1 user support.
## Education
**Technical Support Engineer - Six months training course**
*IMIE, Rezé (France) from 02/2011 to 09/2011*
**Bachelor Graduate in French Civil Law**
*Universities of Nantes and Poitiers (France) from 2006 to 2010*

View File

@ -0,0 +1,44 @@
<center>
<a class="fa-icons" href="mailto:contact@captainark.net">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-envelope fa-stack-1x fa-inverse"></i>
</span>
</a>
<a class="fa-icons" href="https://twitter.com/captainark">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-twitter fa-stack-1x fa-inverse"></i>
</span>
</a>
<a class="fa-icons" href="https://social.captainark.net/users/captainark">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-mastodon-alt fa-stack-1x fa-inverse"></i>
</span>
</a>
<a class="fa-icons" href="https://github.com/captainark">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-github fa-stack-1x fa-inverse"></i>
</span>
</a>
<a class="fa-icons" href="https://www.last.fm/user/captainark">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-lastfm fa-stack-1x fa-inverse"></i>
</span>
</a>
<a class="fa-icons" href="https://steamcommunity.com/id/captainark">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-steam fa-stack-1x fa-inverse"></i>
</span>
</a>
<a class="fa-icons" href="https://www.twitch.tv/captainark">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-twitch fa-stack-1x fa-inverse"></i>
</span>
</a>
</center>

View File

@ -0,0 +1,16 @@
<!-- Fathom - simple website analytics - https://github.com/usefathom/fathom -->
<script>
(function(f, a, t, h, o, m){
a[h]=a[h]||function(){
(a[h].q=a[h].q||[]).push(arguments)
};
o=f.createElement('script'),
m=f.getElementsByTagName('script')[0];
o.async=1; o.src=t; o.id='fathom-script';
m.parentNode.insertBefore(o,m)
})(document, window, '//stats.captainark.net/tracker.js', 'fathom');
fathom('set', 'siteId', 'GEWGL');
fathom('trackPageview');
</script>
<!-- / Fathom -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/fork-awesome@1.1.5/css/fork-awesome.min.css" integrity="sha256-P64qV9gULPHiZTdrS1nM59toStkgjM0dsf5mK/UwBV4=" crossorigin="anonymous">

1
static/favicon.ico Normal file
View File

@ -0,0 +1 @@
Found. Redirecting to /favicon.png

BIN
static/favicon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 KiB

BIN
static/images/author.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

BIN
static/images/cover.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.6 MiB

BIN
static/images/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

BIN
static/images/tmux.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 187 KiB

@ -0,0 +1 @@
Subproject commit bcaa0d55ee4ce949ac3e88494c10671cc11332b9