GitHub Actions: multiarch build, new -debug version, much lighter layers

pull/52/head
Ricardo Pardini 2020-10-08 01:57:49 +02:00
parent 6b4a26e23c
commit ec4df7b4b7
5 changed files with 231 additions and 12 deletions

View File

@ -6,3 +6,5 @@ LICENSE
README.md
docker_mirror_cache
docker_mirror_certs
.github
Makefile

View File

@ -0,0 +1,99 @@
name: master-latest
on:
push:
branches: [ master ]
jobs:
multiarch-to-dockerhub-latest:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
# Setup qEMU for arm64
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
with:
platforms: arm64
# We use buildx instead of regular build so we can take advantage of Docker layer cache via GithubActions's cache
# Also buildx offers multi-arch builds
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
# Setup the Github Actions cache.
- name: Cache Docker layers
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildxarch-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildxarch-
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
# the arm64 is of course much slower due to qemu, so build and push amd64 **first**
# due to the way manifests work, the gap between this and the complete push below
# could result in pull failures or inconsistencies for arm64, such is life.
# further duplicated by building both release and debug builds
- name: Build and push amd64 Release Docker Image to DockerHub
uses: docker/build-push-action@v2
with:
build-args: DEBUG_BUILD=0,BASE_IMAGE_SUFFIX=""
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64
tags: rpardini/docker-registry-proxy:latest
push: true
cache-from: type=local,src=/tmp/.buildx-cache/release
# first run does not export the cache
- name: Build and push amd64 Debug Docker Image to DockerHub
uses: docker/build-push-action@v2
with:
build-args: DEBUG_BUILD=1,BASE_IMAGE_SUFFIX="-debug"
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64
tags: rpardini/docker-registry-proxy:latest-debug
push: true
cache-from: type=local,src=/tmp/.buildx-cache/debug
# first run does not export the cache
# Do it all again with both archs. the amd64 will be a huge cache hit
# this will update the manifest have both arches
- name: Build and push multiarch Release Docker Image to DockerHub
uses: docker/build-push-action@v2
with:
build-args: DEBUG_BUILD=0,BASE_IMAGE_SUFFIX=""
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/arm64,linux/amd64
tags: rpardini/docker-registry-proxy:latest
push: true
cache-from: type=local,src=/tmp/.buildx-cache/release
cache-to: type=local,mode=max,dest=/tmp/.buildx-cache/release
- name: Build and push multiarch Debug Docker Image to DockerHub
uses: docker/build-push-action@v2
with:
build-args: DEBUG_BUILD=1,BASE_IMAGE_SUFFIX="-debug"
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/arm64,linux/amd64
tags: rpardini/docker-registry-proxy:latest-debug
push: true
cache-from: type=local,src=/tmp/.buildx-cache/debug
cache-to: type=local,mode=max,dest=/tmp/.buildx-cache/debug

105
.github/workflows/tags.yaml vendored 100644
View File

@ -0,0 +1,105 @@
name: tags
on:
push:
tags:
- '*'
jobs:
multiarch-to-dockerhub-tag:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
# Parse the ref to get the clean tag name.
- id: get_version
uses: battila7/get-version-action@v2
- run: echo Current tag ${{ steps.get_version.outputs.version }}
# Setup qEMU for arm64
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
with:
platforms: arm64
# We use buildx instead of regular build so we can take advantage of Docker layer cache via GithubActions's cache
# Also buildx offers multi-arch builds
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
# Setup the Github Actions cache.
- name: Cache Docker layers
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildxarch-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildxarch-
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
# the arm64 is of course much slower due to qemu, so build and push amd64 **first**
# due to the way manifests work, the gap between this and the complete push below
# could result in pull failures or inconsistencies for arm64, such is life.
# further duplicated by building both release and debug builds
- name: Build and push amd64 Release Docker Image to DockerHub
uses: docker/build-push-action@v2
with:
build-args: DEBUG_BUILD=0,BASE_IMAGE_SUFFIX=""
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64
tags: rpardini/docker-registry-proxy:${{ steps.get_version.outputs.version }}
push: true
cache-from: type=local,src=/tmp/.buildx-cache/release
# first run does not export the cache
- name: Build and push amd64 Debug Docker Image to DockerHub
uses: docker/build-push-action@v2
with:
build-args: DEBUG_BUILD=1,BASE_IMAGE_SUFFIX="-debug"
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64
tags: rpardini/docker-registry-proxy:${{ steps.get_version.outputs.version }}-debug
push: true
cache-from: type=local,src=/tmp/.buildx-cache/debug
# first run does not export the cache
# Do it all again with both archs. the amd64 will be a huge cache hit
# this will update the manifest have both arches
- name: Build and push multiarch Release Docker Image to DockerHub
uses: docker/build-push-action@v2
with:
build-args: DEBUG_BUILD=0,BASE_IMAGE_SUFFIX=""
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/arm64,linux/amd64
tags: rpardini/docker-registry-proxy:${{ steps.get_version.outputs.version }}
push: true
cache-from: type=local,src=/tmp/.buildx-cache/release
cache-to: type=local,mode=max,dest=/tmp/.buildx-cache/release
- name: Build and push multiarch Debug Docker Image to DockerHub
uses: docker/build-push-action@v2
with:
build-args: DEBUG_BUILD=1,BASE_IMAGE_SUFFIX="-debug"
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/arm64,linux/amd64
tags: rpardini/docker-registry-proxy:${{ steps.get_version.outputs.version }}-debug
push: true
cache-from: type=local,src=/tmp/.buildx-cache/debug
cache-to: type=local,mode=max,dest=/tmp/.buildx-cache/debug

View File

@ -1,22 +1,25 @@
# We start from my nginx fork which includes the proxy-connect module from tEngine
# Source is available at https://github.com/rpardini/nginx-proxy-connect-stable-alpine
# This is not multiarch yet.
ARG BASE_IMAGE="rpardini/nginx-proxy-connect-stable-alpine:nginx-1.18.0-alpine-3.12"
FROM ${BASE_IMAGE}
# This is already multi-arch!
ARG BASE_IMAGE="rpardini/nginx-proxy-connect-stable-alpine:nginx-1.18.0-alpine-3.12.0"
# Could be "-debug"
ARG BASE_IMAGE_SUFFIX=""
FROM ${BASE_IMAGE}${BASE_IMAGE_SUFFIX}
# apk packages that will be present in the final image both debug and release
RUN apk add --no-cache --update bash ca-certificates-bundle coreutils openssl
# If set to 1, enables building mitmproxy, which helps a lot in debugging, but is super heavy to build.
ARG DEBUG_BUILD="1"
ENV DO_DEBUG_BUILD="$DEBUG_BUILD"
# Add openssl, bash and ca-certificates, then clean apk cache -- yeah complain all you want.
# Also added deps for mitmproxy.
# Build mitmproxy via pip. This is heavy, takes minutes do build and creates a 90mb+ layer. Oh well.
RUN [[ "a$DO_DEBUG_BUILD" == "a1" ]] && { echo "Debug build ENABLED." \
&& apk add --update openssl bash ca-certificates su-exec coreutils git g++ libffi libffi-dev libstdc++ openssl openssl-dev python3 python3-dev py3-pip py3-wheel \
&& apk add --no-cache --update su-exec git g++ libffi libffi-dev libstdc++ openssl-dev python3 python3-dev py3-pip py3-wheel \
&& LDFLAGS=-L/lib pip install mitmproxy==4.0.4 \
&& apk del --purge git g++ libffi-dev openssl-dev python3-dev \
&& rm -rf /var/cache/apk/* \
&& apk del --purge git g++ libffi-dev openssl-dev python3-dev py3-pip py3-wheel \
&& rm -rf ~/.cache/pip \
; } || { echo "Debug build disabled." && apk add --update bash ca-certificates coreutils openssl && rm -rf /var/cache/apk/*; }
; } || { echo "Debug build disabled." ; }
# Required for mitmproxy
ENV LANG=en_US.UTF-8

View File

@ -68,7 +68,7 @@ if [[ "a${VERIFY_SSL}" == "atrue" ]]; then
cat << EOD > /etc/nginx/docker.verify.ssl.conf
# We actually wanna be secure and avoid mitm attacks.
# Fitting, since this whole thing is a mitm...
# We'll accept any cert signed by a CA trusted by Mozilla (ca-certificates in alpine)
# We'll accept any cert signed by a CA trusted by Mozilla (ca-certificates-bundle in alpine)
proxy_ssl_verify on;
proxy_ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt;
proxy_ssl_verify_depth 2;
@ -88,9 +88,14 @@ CACHE_MAX_SIZE=${CACHE_MAX_SIZE:-32g}
echo "proxy_cache_path /docker_mirror_cache levels=1:2 max_size=$CACHE_MAX_SIZE inactive=60d keys_zone=cache:10m use_temp_path=off;" > /etc/nginx/conf.d/cache_max_size.conf
# normally use non-debug version of nginx
NGINX_BIN="nginx"
NGINX_BIN="/usr/sbin/nginx"
if [[ "a${DEBUG}" == "atrue" ]]; then
if [[ ! -f /usr/bin/mitmweb ]]; then
echo "To debug, you need the -debug version of this image, eg: :latest-debug"
exit 3
fi
# in debug mode, change caching layer to listen on 444, so that mitmproxy can sit in the middle.
echo " listen 444 ssl default_server;" > /etc/nginx/caching.layer.listen
@ -105,10 +110,15 @@ if [[ "a${DEBUG}" == "atrue" ]]; then
fi
if [[ "a${DEBUG_NGINX}" == "atrue" ]]; then
if [[ ! -f /usr/sbin/nginx-debug ]]; then
echo "To debug, you need the -debug version of this image, eg: :latest-debug"
exit 4
fi
echo "Starting in DEBUG MODE (nginx)."
echo "error_log /var/log/nginx/error.log debug;" > /etc/nginx/error.log.debug.warn
# use debug binary
NGINX_BIN="nginx-debug"
NGINX_BIN="/usr/sbin/nginx-debug"
fi
echo "Testing nginx config..."