A while back I started a small project which involved deploying a web service.
The solution I landed on was using docker compose
, Caddy, SQLite and
Litestream. That project is a little stale now, but I wanted to make a note of
how this all looked, in case I want to use it again in another situation.
The workflow involves separately coding and building the app, with that process resulting in a new image deployed to a registry. The image version was generated by the build process, based on the build datetime and commit hash of the app source repo.
Regardless the specifics, there was now a new app image to be deployed, which
simply involved updating the app/image
value in the compose.yaml
file in
the deployment project, committing the change and pushing the commit to the
deployment server.
The deployment project should also have an origin
repository you can push
to, to ensure continuity should the deployment server fail.
deployment project layout
example-deployment/
.git-hooks/
post-receive
caddy/
config/.gitkeep # simply to ensure the caddy/config/ directory exists on checkout
data/.gitkeep # likewise for the caddy/data/ directory
Caddyfile
coming-soon.html
litestream/
litestream.sh
app/
data/.gitkeep # simply to ensure the app/data/ directory exists on checkout
compose.yaml
.env # not committed to source control
example-deployment/.git-hooks/post-receive
#!/bin/bash
target_branch="master"
working_tree="$HOME/example"
compose_file="$working_tree/compose.yaml"
aws_region=...
registry=...
registry_username=AWS
while read oldrev newrev refname
do
branch=$(git rev-parse --symbolic --abbrev-ref $refname)
if [ -n "$branch" ] && [ "$target_branch" == "$branch" ]; then
echo "===================="
echo "Starting deployment"
git --work-tree=$working_tree checkout $target_branch -f
# potentially tag deployment
#NOW=$(date +"%Y%m%d-%H%M")
#TAG="deployment/$NOW"
#git tag $TAG $target_branch
# login to the docker container registry, on AWS in this case
aws ecr get-login-password --region $aws_region | docker login --username $registry_username --password-stdin $registry
docker-compose -f $compose_file pull --quiet
docker-compose -f $compose_file up --detach --remove-orphans
echo "Deployment completed"
echo "===================="
fi
done
example-deployment/caddy/Caddyfile
{
email webmaster@example.com
# The staging LetsEncrypt, do not use this for production!
# acme_ca https://acme-staging-v02.api.letsencrypt.org/directory
}
# this block is for serving up static files, namely the coming-soon.html file
example.com {
root * /usr/share/caddy/
try_files {path} =404
file_server
}
# this block is for proxying to the app server
#example.com {
# reverse_proxy app:8080
#}
www.example.com {
redir https://example.com{uri}
}
example-deployment/caddy/coming-soon.html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Example - Coming Soon</title>
</head>
<body>
Coming Soon
</body>
</html>
example-deployment/litestream/litestream.sh
#!/bin/sh
SOURCE="/data/app.db"
DESTINATION="s3://example-litestream/app.db"
# if the local db doesn't exist then try to restore from the remote replica
if [ ! -f "$SOURCE" ] ; then
litestream restore -v -if-replica-exists -o "$SOURCE" "$DESTINATION"
fi
# database is now presumably ready, so flag to compose that this container is alive
touch /tmp/litestream-restore-complete
# start the replication process
exec litestream replicate "$SOURCE" "$DESTINATION"
example-deployment/compose.yaml
services:
caddy:
container_name: "caddy"
restart: "unless-stopped"
image: "caddy:2.4.5-alpine"
ports:
- "80:80"
- "443:443"
networks:
- "public"
- "private"
volumes:
- "./caddy/Caddyfile:/etc/caddy/Caddyfile"
- "./caddy/config:/config"
- "./caddy/data:/data"
- "./caddy/coming-soon.html:/usr/share/caddy/index.html"
logging:
driver: "awslogs"
options:
awslogs-region: "$AWS_DEFAULT_REGION"
awslogs-group: "example-log-group"
awslogs-stream: "caddy-log-stream"
litestream:
container_name: "litestream"
restart: "unless-stopped"
image: "litestream/litestream:0.3.6"
entrypoint: "/tmp/litestream.sh"
environment:
AWS_ACCESS_KEY_ID:
AWS_SECRET_ACCESS_KEY:
AWS_DEFAULT_REGION:
volumes:
- "./litestream/litestream.sh:/tmp/litestream.sh"
- "./app/data:/data"
healthcheck:
test: "[ -f /tmp/litestream-restore-complete ]"
interval: "1s"
timeout: "5s"
retries: 5
logging:
driver: "awslogs"
options:
awslogs-region: "$AWS_DEFAULT_REGION"
awslogs-group: "example-log-group"
awslogs-stream: "litestream-log-stream"
app:
container_name: "app"
restart: "unless-stopped"
image: registry.example.com/app:snapshot-YYMMDDHHMMSS-HASH # version generated by the build
depends_on:
litestream:
condition: "service_healthy"
expose:
- "8080"
networks:
- "private"
volumes:
- "./app/data:/opt/app/data"
logging:
driver: "awslogs"
options:
awslogs-region: "$AWS_DEFAULT_REGION"
awslogs-group: "example-log-group"
awslogs-stream: "app-log-stream"
networks:
public:
external: true
private: {}
example-deployment/.env
AWS_DEFAULT_REGION=...
AWS_SECRET_ACCESS_KEY=...
AWS_ACCESS_KEY_ID=...