docker-registry-proxy/nginx.conf

232 lines
8.9 KiB
Nginx Configuration File
Raw Normal View History

2018-06-27 11:08:09 +00:00
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
2018-06-27 11:08:09 +00:00
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Use a debug-oriented logging format.
log_format debugging '$remote_addr - $remote_user [$time_local] "$request" '
2018-06-27 11:08:09 +00:00
'$status $body_bytes_sent '
'"HOST: $host" "UPSTREAM: $upstream_addr" '
'"UPSTREAM-STATUS: $upstream_status" '
'"SSL-PROTO: $ssl_protocol" '
'"CONNECT-HOST: $connect_host" "CONNECT-PORT: $connect_port" "CONNECT-ADDR: $connect_addr" '
'"PROXY-HOST: $proxy_host" "UPSTREAM-REDIRECT: $upstream_http_location" "CACHE-STATUS: $upstream_cache_status" '
'"AUTH: $http_authorization" ' ;
log_format tweaked '$upstream_cache_status [$time_local] "$uri" '
'$status $body_bytes_sent '
'"HOST:$host" '
'"PROXY-HOST:$proxy_host" "UPSTREAM:$upstream_addr" ';
2018-06-27 11:08:09 +00:00
keepalive_timeout 300;
gzip off;
# The cache directory. This can get huge. Better to use a Docker volume pointing here!
# Set to 32gb which should be enough
proxy_cache_path /docker_mirror_cache levels=1:2 max_size=32g inactive=60d keys_zone=cache:10m use_temp_path=off;
# Just in case you want to rewrite some hosts. Default maps directly.
map $host $targetHost {
hostnames;
default $host;
}
# A map to enable authentication to some specific docker registries.
# This is auto-generated by the entrypoint.sh based on environment variables
map $host $dockerAuth {
hostnames;
include /etc/nginx/docker.auth.map;
default "";
}
# Map to decide which hosts get directed to the caching portion.
# This is automatically generated from the list of cached registries, plus a few fixed hosts
# By default, we don't intercept, allowing free flow of non-registry traffic
map $connect_host $interceptedHost {
hostnames;
include /etc/nginx/docker.intercept.map;
default "$connect_host:443";
}
map $dockerAuth $finalAuth {
"" "$http_authorization"; # if empty, keep the original passed-in from the client
default "Basic $dockerAuth"; # if not empty, add the Basic preamble to the auth
}
2018-06-27 11:08:09 +00:00
# These maps parse the original Host and URI from a /forcecache redirect.
map $request_uri $realHost {
~/forcecacheinsecure/([^:/]+)/originalwas(/.+) $1;
~/forcecachesecure/([^:/]+)/originalwas(/.+) $1;
default "DID_NOT_MATCH_HOST";
}
map $request_uri $realPath {
~/forcecacheinsecure/([^:/]+)/originalwas(/.+) $2;
~/forcecachesecure/([^:/]+)/originalwas(/.+) $2;
default "DID_NOT_MATCH_PATH";
}
# The proxy director layer, listens on 3128
server {
listen 3128;
server_name _;
# dont log the CONNECT proxy.
access_log off;
proxy_connect;
proxy_connect_address $interceptedHost;
proxy_max_temp_file_size 0;
# We need to resolve the real names of our proxied servers.
resolver 8.8.8.8 4.2.2.2 ipv6=off; # Avoid ipv6 addresses for now
# forward proxy for non-CONNECT request
location / {
return 403 "The docker caching proxy is working!";
}
location /ca.crt {
alias /ca/ca.crt;
}
# @TODO: add a dynamic root path that generates instructions for usage on docker clients
}
2018-06-27 11:08:09 +00:00
# The caching layer
2018-06-27 11:08:09 +00:00
server {
# Listen on both 80 and 443, for all hostnames.
listen 80 default_server;
listen 443 ssl default_server;
server_name _;
# Do some tweaked logging.
access_log /var/log/nginx/access.log tweaked;
2018-06-27 11:08:09 +00:00
# Use the generated certificates, they contain names for all the proxied registries.
ssl_certificate /certs/fullchain.pem;
ssl_certificate_key /certs/web.key;
2018-06-27 11:08:09 +00:00
# We need to resolve the real names of our proxied servers.
resolver 8.8.8.8 4.2.2.2 ipv6=off; # Avoid ipv6 addresses for now
# Docker needs this. Don't ask.
chunked_transfer_encoding on;
# Block POST/PUT/DELETE. Don't use this proxy for pushing.
if ($request_method = POST) {
return 405 "POST method is not allowed";
2018-06-27 11:08:09 +00:00
}
if ($request_method = PUT) {
return 405 "PUT method is not allowed";
2018-06-27 11:08:09 +00:00
}
if ($request_method = DELETE) {
return 405 "DELETE method is not allowed";
2018-06-27 11:08:09 +00:00
}
2018-06-27 11:08:09 +00:00
proxy_read_timeout 900;
# Use cache locking, with a huge timeout, so that multiple Docker clients asking for the same blob at the same time
# will wait for the first to finish instead of doing multiple upstream requests.
proxy_cache_lock on;
proxy_cache_lock_timeout 120s;
# Cache all 200, 301, 302, and 307 (emitted by private registries) for 60 days.
proxy_cache_valid 200 301 302 307 60d;
# Some extra settings to maximize cache hits and efficiency
2018-06-27 11:08:09 +00:00
proxy_force_ranges on;
proxy_ignore_client_abort on;
proxy_cache_revalidate on;
# Hide/ignore headers from caching. S3 especially likes to send Expires headers in the past in some situations.
proxy_hide_header Set-Cookie;
proxy_ignore_headers X-Accel-Expires Expires Cache-Control Set-Cookie;
# Add the authentication info, if the map matched the target domain.
proxy_set_header Authorization $finalAuth;
# This comes from a include file generated by the entrypoint.
include /etc/nginx/docker.verify.ssl.conf;
# Some debugging info
# add_header X-Docker-Caching-Proxy-Real-Host $realHost;
# add_header X-Docker-Caching-Proxy-Real-Path $realPath;
# add_header X-Docker-Caching-Proxy-Auth $finalAuth;
# Block API v1. We dont know how to handle these.
# Docker-client should start with v2 and fallback to v1 if something fails, for example, if authentication failed to a protected v2 resource.
location /v1 {
return 405 "API v1 is invalid -- you probably need auth to get a v2 endpoint working against $host -- Check the docs";
}
2018-06-27 11:08:09 +00:00
# don't cache mutable entity /v2/<name>/manifests/<reference> (unless the reference is a digest)
location ~ ^/v2/[^\/]+/manifests/(?![A-Fa-f0-9_+.-]+:) {
proxy_pass https://$targetHost;
}
# don't cache mutable entity /v2/<name>/tags/list
location ~ ^/v2/[^\/]+/tags/list {
proxy_pass https://$targetHost;
}
# don't cache mutable entity /v2/_catalog
location ~ ^/v2/_catalog$ {
proxy_pass https://$targetHost;
}
# force cache of the first hit which is always /v2/ - even for 401 unauthorized.
location = /v2/ {
proxy_pass https://$targetHost;
proxy_cache cache;
proxy_cache_valid 200 301 302 307 401 60d;
}
2018-06-27 11:08:09 +00:00
# cache everything else
location / {
proxy_pass https://$targetHost;
proxy_cache cache;
# Handling of redirects.
# Many registries (eg, quay.io, or k8s.gcr.io) emit a Location redirect
# pointing to something like cloudfront, or google storage.
# We hack into the response, extracting the host and URI parts, injecting them into a URL that points back to us
# That gives us a chance to intercept and cache those, which are the actual multi-megabyte blobs we originally wanted to cache.
# We to it twice, one for http and another for https.
proxy_redirect ~^https://([^:/]+)(/.+)$ https://docker.caching.proxy.internal/forcecachesecure/$1/originalwas$2;
proxy_redirect ~^http://([^:/]+)(/.+)$ http://docker.caching.proxy.internal/forcecacheinsecure/$1/originalwas$2;
2018-06-27 11:08:09 +00:00
}
# handling for the redirect case explained above, with https.
# The $realHost and $realPath variables come from a map defined at the top of this file.
location /forcecachesecure {
proxy_pass https://$realHost$realPath;
proxy_cache cache;
# Change the cache key, so that we can cache signed S3 requests and such. Only host and path are considered.
proxy_cache_key $proxy_host$uri;
2018-06-27 11:08:09 +00:00
}
# handling for the redirect case explained above, with http.
# The $realHost and $realPath variables come from a map defined at the top of this file.
location /forcecacheinsecure {
proxy_pass http://$realHost$realPath;
proxy_cache cache;
# Change the cache key, so that we can cache signed S3 requests and such. Only host and path are considered.
proxy_cache_key $proxy_host$uri;
2018-06-27 11:08:09 +00:00
}
}
}