2018-06-28 23:39:02 +00:00
|
|
|
# We start from my nginx fork which includes the proxy-connect module from tEngine
|
|
|
|
# Source is available at https://github.com/rpardini/nginx-proxy-connect-stable-alpine
|
2020-10-07 23:57:49 +00:00
|
|
|
# This is already multi-arch!
|
2021-07-11 18:18:39 +00:00
|
|
|
ARG BASE_IMAGE="docker.io/rpardini/nginx-proxy-connect-stable-alpine:nginx-1.20.1-alpine-3.12.7"
|
2020-10-07 23:57:49 +00:00
|
|
|
# Could be "-debug"
|
|
|
|
ARG BASE_IMAGE_SUFFIX=""
|
|
|
|
FROM ${BASE_IMAGE}${BASE_IMAGE_SUFFIX}
|
|
|
|
|
2020-10-30 12:24:08 +00:00
|
|
|
# Link image to original repository on GitHub
|
|
|
|
LABEL org.opencontainers.image.source https://github.com/rpardini/docker-registry-proxy
|
|
|
|
|
2020-10-07 23:57:49 +00:00
|
|
|
# apk packages that will be present in the final image both debug and release
|
|
|
|
RUN apk add --no-cache --update bash ca-certificates-bundle coreutils openssl
|
2020-06-08 12:49:21 +00:00
|
|
|
|
|
|
|
# If set to 1, enables building mitmproxy, which helps a lot in debugging, but is super heavy to build.
|
|
|
|
ARG DEBUG_BUILD="1"
|
|
|
|
ENV DO_DEBUG_BUILD="$DEBUG_BUILD"
|
2018-06-27 11:08:09 +00:00
|
|
|
|
2020-10-07 23:57:49 +00:00
|
|
|
# Build mitmproxy via pip. This is heavy, takes minutes do build and creates a 90mb+ layer. Oh well.
|
2020-09-22 08:15:15 +00:00
|
|
|
RUN [[ "a$DO_DEBUG_BUILD" == "a1" ]] && { echo "Debug build ENABLED." \
|
2020-10-08 00:32:00 +00:00
|
|
|
&& apk add --no-cache --update su-exec git g++ libffi libffi-dev libstdc++ openssl-dev python3 python3-dev py3-pip py3-wheel py3-six py3-idna py3-certifi py3-setuptools \
|
Mitmproxy won't start due to soft_unicode removed from markupsafe
After building the debug image and trying to run it, the mitproxy
would fail to start:
Traceback (most recent call last):
File "/usr/bin/mitmweb", line 8, in <module>
sys.exit(mitmweb())
File "/usr/lib/python3.8/site-packages/mitmproxy/tools/_main.py", line 172, in mitmweb
from mitmproxy.tools import web
File "/usr/lib/python3.8/site-packages/mitmproxy/tools/web/__init__.py", line 1, in <module>
from mitmproxy.tools.web import master
File "/usr/lib/python3.8/site-packages/mitmproxy/tools/web/master.py", line 5, in <module>
from mitmproxy import addons
File "/usr/lib/python3.8/site-packages/mitmproxy/addons/__init__.py", line 12, in <module>
from mitmproxy.addons import onboarding
File "/usr/lib/python3.8/site-packages/mitmproxy/addons/onboarding.py", line 2, in <module>
from mitmproxy.addons.onboardingapp import app
File "/usr/lib/python3.8/site-packages/mitmproxy/addons/onboardingapp/__init__.py", line 3, in <module>
from flask import Flask, render_template
File "/usr/lib/python3.8/site-packages/flask/__init__.py", line 14, in <module>
from jinja2 import escape
File "/usr/lib/python3.8/site-packages/jinja2/__init__.py", line 12, in <module>
from .environment import Environment
File "/usr/lib/python3.8/site-packages/jinja2/environment.py", line 25, in <module>
from .defaults import BLOCK_END_STRING
File "/usr/lib/python3.8/site-packages/jinja2/defaults.py", line 3, in <module>
from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401
File "/usr/lib/python3.8/site-packages/jinja2/filters.py", line 13, in <module>
from markupsafe import soft_unicode
ImportError: cannot import name 'soft_unicode' from 'markupsafe' (/usr/lib/python3.8/site-packages/markupsafe/__init__.py)
Fixed the issue by explicitly requiring an older version of MarkupSafe.
The issue is related to:
https://github.com/pallets/markupsafe/issues/282
2022-08-11 19:06:16 +00:00
|
|
|
&& LDFLAGS=-L/lib pip install MarkupSafe==2.0.1 mitmproxy==5.2 \
|
2020-10-07 23:57:49 +00:00
|
|
|
&& apk del --purge git g++ libffi-dev openssl-dev python3-dev py3-pip py3-wheel \
|
2020-06-08 12:49:21 +00:00
|
|
|
&& rm -rf ~/.cache/pip \
|
2020-10-07 23:57:49 +00:00
|
|
|
; } || { echo "Debug build disabled." ; }
|
2018-11-04 10:23:52 +00:00
|
|
|
|
|
|
|
# Required for mitmproxy
|
|
|
|
ENV LANG=en_US.UTF-8
|
|
|
|
|
2020-06-08 12:49:21 +00:00
|
|
|
# Check the installed mitmproxy version, if built.
|
2020-10-08 00:45:49 +00:00
|
|
|
RUN [[ "a$DO_DEBUG_BUILD" == "a1" ]] && { mitmproxy --version && mitmweb --version ; } || { echo "Debug build disabled."; }
|
2018-06-27 11:08:09 +00:00
|
|
|
|
2018-06-28 23:39:02 +00:00
|
|
|
# Create the cache directory and CA directory
|
|
|
|
RUN mkdir -p /docker_mirror_cache /ca
|
2018-06-27 11:08:09 +00:00
|
|
|
|
|
|
|
# Expose it as a volume, so cache can be kept external to the Docker image
|
|
|
|
VOLUME /docker_mirror_cache
|
|
|
|
|
2018-06-28 23:39:02 +00:00
|
|
|
# Expose /ca as a volume. Users are supposed to volume mount this, as to preserve it across restarts.
|
|
|
|
# Actually, its required; if not, then docker clients will reject the CA certificate when the proxy is run the second time
|
|
|
|
VOLUME /ca
|
|
|
|
|
2018-06-27 11:08:09 +00:00
|
|
|
# Add our configuration
|
|
|
|
ADD nginx.conf /etc/nginx/nginx.conf
|
2020-10-30 17:54:10 +00:00
|
|
|
ADD nginx.manifest.common.conf /etc/nginx/nginx.manifest.common.conf
|
|
|
|
ADD nginx.manifest.stale.conf /etc/nginx/nginx.manifest.stale.conf
|
2018-06-27 11:08:09 +00:00
|
|
|
|
2018-06-28 23:39:02 +00:00
|
|
|
# Add our very hackish entrypoint and ca-building scripts, make them executable
|
|
|
|
ADD entrypoint.sh /entrypoint.sh
|
2022-01-01 10:49:08 +00:00
|
|
|
RUN chmod +x /entrypoint.sh
|
2018-06-28 23:39:02 +00:00
|
|
|
|
|
|
|
# Clients should only use 3128, not anything else.
|
|
|
|
EXPOSE 3128
|
|
|
|
|
2020-10-29 23:20:18 +00:00
|
|
|
# In debug mode, 8081 exposes the mitmweb interface (for incoming requests from Docker clients)
|
2018-11-04 10:23:52 +00:00
|
|
|
EXPOSE 8081
|
2020-10-29 23:20:18 +00:00
|
|
|
# In debug-hub mode, 8082 exposes the mitmweb interface (for outgoing requests to DockerHub)
|
|
|
|
EXPOSE 8082
|
2018-11-04 10:23:52 +00:00
|
|
|
|
2018-06-28 23:39:02 +00:00
|
|
|
## Default envs.
|
|
|
|
# A space delimited list of registries we should proxy and cache; this is in addition to the central DockerHub.
|
|
|
|
ENV REGISTRIES="k8s.gcr.io gcr.io quay.io"
|
|
|
|
# A space delimited list of registry:user:password to inject authentication for
|
|
|
|
ENV AUTH_REGISTRIES="some.authenticated.registry:oneuser:onepassword another.registry:user:password"
|
|
|
|
# Should we verify upstream's certificates? Default to true.
|
|
|
|
ENV VERIFY_SSL="true"
|
2018-11-04 10:23:52 +00:00
|
|
|
# Enable debugging mode; this inserts mitmproxy/mitmweb between the CONNECT proxy and the caching layer
|
2019-01-16 19:53:45 +00:00
|
|
|
ENV DEBUG="false"
|
2020-10-29 23:20:18 +00:00
|
|
|
# Enable debugging mode; this inserts mitmproxy/mitmweb between the caching layer and DockerHub's registry
|
|
|
|
ENV DEBUG_HUB="false"
|
2018-11-04 15:43:53 +00:00
|
|
|
# Enable nginx debugging mode; this uses nginx-debug binary and enabled debug logging, which is VERY verbose so separate setting
|
|
|
|
ENV DEBUG_NGINX="false"
|
2018-06-28 23:39:02 +00:00
|
|
|
|
2020-10-30 17:54:10 +00:00
|
|
|
# Manifest caching tiers. Disabled by default, to mimick 0.4/0.5 behaviour.
|
|
|
|
# Setting it to true enables the processing of the ENVs below.
|
|
|
|
# Once enabled, it is valid for all registries, not only DockerHub.
|
|
|
|
# The envs *_REGEX represent a regex fragment, check entrypoint.sh to understand how they're used (nginx ~ location, PCRE syntax).
|
|
|
|
ENV ENABLE_MANIFEST_CACHE="false"
|
|
|
|
|
|
|
|
# 'Primary' tier defaults to 10m cache for frequently used/abused tags.
|
|
|
|
# - People publishing to production via :latest (argh) will want to include that in the regex
|
|
|
|
# - Heavy pullers who are being ratelimited but don't mind getting outdated manifests should (also) increase the cache time here
|
|
|
|
ENV MANIFEST_CACHE_PRIMARY_REGEX="(stable|nightly|production|test)"
|
|
|
|
ENV MANIFEST_CACHE_PRIMARY_TIME="10m"
|
|
|
|
|
|
|
|
# 'Secondary' tier defaults any tag that has 3 digits or dots, in the hopes of matching most explicitly-versioned tags.
|
|
|
|
# It caches for 60d, which is also the cache time for the large binary blobs to which the manifests refer.
|
|
|
|
# That makes them effectively immutable. Make sure you're not affected; tighten this regex or widen the primary tier.
|
|
|
|
ENV MANIFEST_CACHE_SECONDARY_REGEX="(.*)(\d|\.)+(.*)(\d|\.)+(.*)(\d|\.)+"
|
|
|
|
ENV MANIFEST_CACHE_SECONDARY_TIME="60d"
|
|
|
|
|
|
|
|
# The default cache duration for manifests that don't match either the primary or secondary tiers above.
|
|
|
|
# In the default config, :latest and other frequently-used tags will get this value.
|
|
|
|
ENV MANIFEST_CACHE_DEFAULT_TIME="1h"
|
|
|
|
|
2020-12-02 13:58:19 +00:00
|
|
|
# Should we allow actions different than pull, default to false.
|
|
|
|
ENV ALLOW_PUSH="false"
|
|
|
|
|
2022-01-08 00:29:18 +00:00
|
|
|
# If push is allowed, buffering requests can cause issues on slow upstreams.
|
|
|
|
# If you have trouble pushing, set this to false first, then fix remainig timouts.
|
|
|
|
# Default is true to not change default behavior.
|
|
|
|
ENV PROXY_REQUEST_BUFFERING="true"
|
|
|
|
|
2021-01-28 11:12:18 +00:00
|
|
|
# Timeouts
|
|
|
|
# ngx_http_core_module
|
|
|
|
ENV SEND_TIMEOUT="60s"
|
|
|
|
ENV CLIENT_BODY_TIMEOUT="60s"
|
|
|
|
ENV CLIENT_HEADER_TIMEOUT="60s"
|
|
|
|
ENV KEEPALIVE_TIMEOUT="300s"
|
|
|
|
# ngx_http_proxy_module
|
|
|
|
ENV PROXY_READ_TIMEOUT="60s"
|
|
|
|
ENV PROXY_CONNECT_TIMEOUT="60s"
|
|
|
|
ENV PROXY_SEND_TIMEOUT="60s"
|
|
|
|
# ngx_http_proxy_connect_module - external module
|
|
|
|
ENV PROXY_CONNECT_READ_TIMEOUT="60s"
|
|
|
|
ENV PROXY_CONNECT_CONNECT_TIMEOUT="60s"
|
|
|
|
ENV PROXY_CONNECT_SEND_TIMEOUT="60s"
|
|
|
|
|
2018-11-04 15:43:53 +00:00
|
|
|
# Did you want a shell? Sorry, the entrypoint never returns, because it runs nginx itself. Use 'docker exec' if you need to mess around internally.
|
2020-06-08 12:49:21 +00:00
|
|
|
ENTRYPOINT ["/entrypoint.sh"]
|