docker-registry-proxy/nginx.conf

320 lines
12 KiB
Nginx Configuration File
Raw Permalink Normal View History

2018-06-27 11:08:09 +00:00
user nginx;
worker_processes auto;
# error log config comes from external file created by entrypoint, to toggle debug on/off.
include /etc/nginx/error.log.debug.warn;
2018-06-27 11:08:09 +00:00
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
map_hash_bucket_size 128;
2018-06-27 11:08:09 +00:00
include /etc/nginx/mime.types;
default_type application/octet-stream;
2021-02-05 06:46:49 +00:00
# Include nginx timeout configs
include /etc/nginx/nginx.timeouts.config.conf;
2018-06-27 11:08:09 +00:00
# Use a debug-oriented logging format.
log_format debugging escape=json
'{'
'"access_time":"$time_local",'
'"remote_addr":"$remote_addr",'
'"remote_user":"$remote_user",'
'"request":"$request",'
'"status":"$status",'
'"bytes_sent":"$body_bytes_sent",'
'"host":"$host",'
'"proxy_host":"$proxy_host",'
'"upstream":"$upstream_addr"'
'"upstream_status":"$upstream_status",'
'"ssl_protocol":"$ssl_protocol",'
'"connect_host":"$connect_host",'
'"connect_port":"$connect_port",'
'"connect_addr":"$connect_addr",'
'"upstream_http_location":"$upstream_http_location",'
'"upstream_cache_status":"$upstream_cache_status",'
'"http_authorization":"$http_authorization",'
'}';
log_format debug_proxy escape=json
'{'
'"access_time":"$time_local",'
'"remote_addr":"$remote_addr",'
'"remote_user":"$remote_user",'
'"request":"$request",'
'"status":"$status",'
'"bytes_sent":"$body_bytes_sent",'
'"host":"$host",'
'"proxy_host":"$proxy_host",'
'"upstream":"$upstream_addr"'
'"upstream_status":"$upstream_status",'
'"ssl_protocol":"$ssl_protocol",'
'"connect_host":"$connect_host",'
'"connect_port":"$connect_port",'
'"connect_addr":"$connect_addr",'
'"upstream_http_location":"$upstream_http_location",'
'"upstream_cache_status":"$upstream_cache_status",'
'"http_authorization":"$http_authorization",'
'}';
log_format tweaked escape=json
'{'
'"access_time":"$time_local",'
'"upstream_cache_status":"$upstream_cache_status",'
'"method":"$request_method",'
'"uri":"$uri",'
'"request_type":"$docker_proxy_request_type",'
'"status":"$status",'
'"bytes_sent":"$body_bytes_sent",'
'"upstream_response_time":"$upstream_response_time",'
'"host":"$host",'
'"proxy_host":"$proxy_host",'
'"upstream":"$upstream_addr"'
'}';
2018-06-27 11:08:09 +00:00
gzip off;
# Entrypoint generates the proxy_cache_path here, so it is configurable externally.
include /etc/nginx/conf.d/cache_max_size.conf;
2018-06-27 11:08:09 +00:00
# Just in case you want to rewrite some hosts. Default maps directly.
map $host $targetHost {
hostnames;
include /etc/nginx/docker.targetHost.map;
2018-06-27 11:08:09 +00:00
default $host;
}
# A map to enable authentication to some specific docker registries.
# This is auto-generated by the entrypoint.sh based on environment variables
map $host $dockerAuth {
hostnames;
include /etc/nginx/docker.auth.map;
default "";
}
# @TODO: actually for auth.docker.io, if we want to support multiple authentications, we'll need to decide
# @TODO: based not only on the hostname, but also URI (/token) and query string (?scope)
# @TODO: I wonder if this would help gcr.io and quay.io with authentication also....
map $dockerAuth $finalAuth {
"" "$http_authorization"; # if empty, keep the original passed-in from the docker client.
default "Basic $dockerAuth"; # if not empty, add the Basic preamble to the auth
}
# Map to decide which hosts get directed to the caching portion.
# This is automatically generated from the list of cached registries, plus a few fixed hosts
# By default, we don't intercept, allowing free flow of non-registry traffic
map $connect_host $interceptedHost {
hostnames;
include /etc/nginx/docker.intercept.map;
default "$connect_addr"; # $connect_addr is 'IP address and port of the remote host, e.g. "192.168.1.5:12345". IP address is resolved from host name of CONNECT request line.'
}
2018-06-27 11:08:09 +00:00
# These maps parse the original Host and URI from a /forcecache redirect.
map $request_uri $realHost {
~/forcecacheinsecure/([^:/]+)/originalwas(/.+) $1;
~/forcecachesecure/([^:/]+)/originalwas(/.+) $1;
default "DID_NOT_MATCH_HOST";
}
map $request_uri $realPath {
~/forcecacheinsecure/([^:/]+)/originalwas(/.+) $2;
~/forcecachesecure/([^:/]+)/originalwas(/.+) $2;
default "DID_NOT_MATCH_PATH";
}
# The proxy director layer, listens on 3128
server {
listen 3128;
2021-02-05 06:46:49 +00:00
listen [::]:3128;
server_name proxy_director_;
# dont log the CONNECT proxy.
#access_log /var/log/nginx/access.log debug_proxy;
access_log off;
set $docker_proxy_request_type "unknown-connect";
proxy_connect;
Allow proxying to any destination port number (not 443 only) The proxy refused to connect to a registry that was hosted on a port other than 443. For example, I was not able to connect to my registry that is hosted on port 5002: $ https_proxy=proxy.lab.example.com:3128 curl -v https://registry.lab.example.com:5002 * Uses proxy env variable https_proxy == 'proxy.lab.example.com:3128' * Trying 192.168.140.1:3128... * Connected to proxy.lab.example.com (192.168.140.1) port 3128 (#0) * allocate connect buffer! * Establish HTTP proxy tunnel to registry.lab.example.com:5002 > CONNECT registry.lab.example.com:5002 HTTP/1.1 > Host: registry.lab.example.com:5002 > User-Agent: curl/7.74.0 > Proxy-Connection: Keep-Alive > < HTTP/1.1 403 Forbidden < Server: nginx/1.20.1 < Date: Thu, 11 Aug 2022 15:12:23 GMT < Content-Type: text/html < Content-Length: 153 < Connection: keep-alive < * Received HTTP code 403 from proxy after CONNECT * CONNECT phase completed! * Closing connection 0 curl: (56) Received HTTP code 403 from proxy after CONNECT The proxy refused to pass through connections to URLs that used port other than 443. For example, trying to connect to port 8443: $ https_proxy=proxy.lab.example.com:3128 curl -v https://google.com:8443 * Uses proxy env variable https_proxy == 'proxy.lab.example.com:3128' * Trying 192.168.140.1:3128... * Connected to proxy.lab.example.com (192.168.140.1) port 3128 (#0) * allocate connect buffer! * Establish HTTP proxy tunnel to google.com:8443 > CONNECT google.com:8443 HTTP/1.1 > Host: google.com:8443 > User-Agent: curl/7.74.0 > Proxy-Connection: Keep-Alive > < HTTP/1.1 403 Forbidden < Server: nginx/1.20.1 < Date: Thu, 11 Aug 2022 16:05:52 GMT < Content-Type: text/html < Content-Length: 153 < Connection: keep-alive < * Received HTTP code 403 from proxy after CONNECT * CONNECT phase completed! * Closing connection 0 curl: (56) Received HTTP code 403 from proxy after CONNECT This commit fixes the issue by configuring the proxy_connect_allow paramater to allow connecting to any destination port number. By default only port 443 and 563 were allowed. See also documentation here: https://github.com/chobits/ngx_http_proxy_connect_module#proxy_connect_allow
2022-08-11 18:35:04 +00:00
proxy_connect_allow all;
proxy_connect_address $interceptedHost;
proxy_max_temp_file_size 0;
# We need to resolve the real names of our proxied servers.
#resolver 8.8.8.8 4.2.2.2 ipv6=off; # Avoid ipv6 addresses for now
include /etc/nginx/resolvers.conf;
# forward proxy for non-CONNECT request
location / {
add_header "Content-type" "text/plain" always;
return 200 "docker-registry-proxy: The docker caching proxy is working!";
}
location /ca.crt {
alias /ca/ca.crt;
}
location /setup/systemd {
add_header "Content-type" "text/plain" always;
return 200 '
set -e
if [ ! -d /etc/systemd ]; then
echo "Not a systemd system"
exit 1
fi
mkdir -p /etc/systemd/system/docker.service.d
cat << EOD > /etc/systemd/system/docker.service.d/http-proxy.conf
[Service]
Environment="HTTPS_PROXY=$scheme://$http_host/"
EOD
# Get the CA certificate from the proxy and make it a trusted root.
curl $scheme://$http_host/ca.crt > /usr/share/ca-certificates/docker_registry_proxy.crt
if fgrep -q "docker_registry_proxy.crt" /etc/ca-certificates.conf ; then
echo "certificate refreshed"
else
echo "docker_registry_proxy.crt" >> /etc/ca-certificates.conf
fi
update-ca-certificates --fresh
# Reload systemd
systemctl daemon-reload
# Restart dockerd
systemctl restart docker.service
echo "Docker configured with HTTPS_PROXY=$scheme://$http_host/"
';
} # end location /setup/systemd
} # end server
2018-06-27 11:08:09 +00:00
# The caching layer
2018-06-27 11:08:09 +00:00
server {
# Listen on both 80 and 443, for all hostnames.
# actually could be 443 or 444, depending on debug. this is now generated by the entrypoint.
2018-06-27 11:08:09 +00:00
listen 80 default_server;
include /etc/nginx/caching.layer.listen;
server_name proxy_caching_;
# Do some tweaked logging.
access_log /var/log/nginx/access.log tweaked;
set $docker_proxy_request_type "unknown";
2018-06-27 11:08:09 +00:00
# Send upstream status as header
add_header X-Docker-Registry-Proxy-Cache-Upstream-Status "$upstream_cache_status";
# Use the generated certificates, they contain names for all the proxied registries.
ssl_certificate /certs/fullchain.pem;
ssl_certificate_key /certs/web.key;
2018-06-27 11:08:09 +00:00
# We need to resolve the real names of our proxied servers.
#resolver 8.8.8.8 4.2.2.2 ipv6=off; # Avoid ipv6 addresses for now
include /etc/nginx/resolvers.conf;
2018-06-27 11:08:09 +00:00
# Docker needs this. Don't ask.
chunked_transfer_encoding on;
# configuration of the different allowed methods
include "/etc/nginx/conf.d/allowed.methods.conf";
2018-06-27 11:08:09 +00:00
proxy_read_timeout 900;
# Request buffering
include /etc/nginx/proxy.request.buffering.conf;
# Use cache locking, with a huge timeout, so that multiple Docker clients asking for the same blob at the same time
2018-06-27 11:08:09 +00:00
# will wait for the first to finish instead of doing multiple upstream requests.
proxy_cache_lock on;
proxy_cache_lock_timeout 880s;
# Cache all 200, 206 for 60 days.
proxy_cache_valid 200 206 60d;
# Some extra settings to maximize cache hits and efficiency
2018-06-27 11:08:09 +00:00
proxy_force_ranges on;
proxy_ignore_client_abort on;
proxy_cache_revalidate on;
# Hide/ignore headers from caching. S3 especially likes to send Expires headers in the past in some situations.
proxy_hide_header Set-Cookie;
proxy_ignore_headers X-Accel-Expires Expires Cache-Control Set-Cookie;
# Add the authentication info, if the map matched the target domain.
proxy_set_header Authorization $finalAuth;
# Use SNI during the TLS handshake with the upstream.
proxy_ssl_server_name on;
# This comes from a include file generated by the entrypoint.
include /etc/nginx/docker.verify.ssl.conf;
# Block API v1. We dont know how to handle these.
# Docker-client should start with v2 and fallback to v1 if something fails, for example, if authentication failed to a protected v2 resource.
location /v1 {
return 405 "docker-registry-proxy: docker is trying to use v1 API. Either the image does not exist upstream, or you need to configure docker-registry-proxy to authenticate against $host";
}
# For blob requests by digest, do cache, and treat redirects.
location ~ ^/v2/(.*)/blobs/sha256:(.*) {
set $docker_proxy_request_type "blob-by-digest";
include "/etc/nginx/nginx.manifest.common.conf";
}
# For manifest requests by digest, do cache, and treat redirects.
# These are some of the requests that DockerHub will throttle.
location ~ ^/v2/(.*)/manifests/sha256:(.*) {
set $docker_proxy_request_type "manifest-by-digest";
include "/etc/nginx/nginx.manifest.common.conf";
}
# Config for manifest URL caching is generated by the entrypoint based on ENVs.
# Go check it out, entrypoint.sh
include "/etc/nginx/nginx.manifest.caching.config.conf";
# Cache blobs requests that are not by digest
# Since these are mutable, we invalidate them immediately and keep them only in case the backend is down
location ~ ^/v2/(.*)/blobs/ {
set $docker_proxy_request_type "blob-mutable";
proxy_cache_valid 0s;
include "/etc/nginx/nginx.manifest.stale.conf";
2018-06-27 11:08:09 +00:00
}
location @handle_redirects {
#store the current state of the world so we can reuse it in a minute
# We need to capture these values now, because as soon as we invoke
# the proxy_* directives, these will disappear
set $original_uri $uri;
set $orig_loc $upstream_http_location;
# during this process, nginx will preserve the headers intended for the original destination.
# in most cases thats okay, but for some (eg: google storage), passing an Authorization
# header can cause problems. Also, that would leak the credentials for the registry
# into the storage system (unrelated).
proxy_set_header Authorization "";
# nginx goes to fetch the value from the upstream Location header
proxy_pass $orig_loc;
2018-06-27 11:08:09 +00:00
proxy_cache cache;
# But we store the result with the cache key of the original request URI
# so that future clients don't need to follow the redirect too
proxy_cache_key $original_uri;
2018-06-27 11:08:09 +00:00
}
# by default, dont cache anything.
location / {
proxy_pass https://$targetHost;
proxy_cache off;
}
2018-06-27 11:08:09 +00:00
}
}