351 lines
13 KiB
Nginx Configuration File
351 lines
13 KiB
Nginx Configuration File
user nginx;
|
|
worker_processes auto;
|
|
|
|
# error log config comes from external file created by entrypoint, to toggle debug on/off.
|
|
include /etc/nginx/error.log.debug.warn;
|
|
|
|
pid /var/run/nginx.pid;
|
|
|
|
events {
|
|
worker_connections 1024;
|
|
}
|
|
|
|
http {
|
|
map_hash_bucket_size 128;
|
|
include /etc/nginx/mime.types;
|
|
default_type application/octet-stream;
|
|
|
|
# Use a debug-oriented logging format.
|
|
log_format debugging escape=json
|
|
'{'
|
|
'"access_time":"$time_local",'
|
|
'"remote_addr":"$remote_addr",'
|
|
'"remote_user":"$remote_user",'
|
|
'"request":"$request",'
|
|
'"status":"$status",'
|
|
'"bytes_sent":"$body_bytes_sent",'
|
|
'"host":"$host",'
|
|
'"proxy_host":"$proxy_host",'
|
|
'"upstream":"$upstream_addr"'
|
|
'"upstream_status":"$upstream_status",'
|
|
'"ssl_protocol":"$ssl_protocol",'
|
|
'"connect_host":"$connect_host",'
|
|
'"connect_port":"$connect_port",'
|
|
'"connect_addr":"$connect_addr",'
|
|
'"upstream_http_location":"$upstream_http_location",'
|
|
'"upstream_cache_status":"$upstream_cache_status",'
|
|
'"http_authorization":"$http_authorization",'
|
|
'}';
|
|
|
|
log_format debug_proxy escape=json
|
|
'{'
|
|
'"access_time":"$time_local",'
|
|
'"remote_addr":"$remote_addr",'
|
|
'"remote_user":"$remote_user",'
|
|
'"request":"$request",'
|
|
'"status":"$status",'
|
|
'"bytes_sent":"$body_bytes_sent",'
|
|
'"host":"$host",'
|
|
'"proxy_host":"$proxy_host",'
|
|
'"upstream":"$upstream_addr"'
|
|
'"upstream_status":"$upstream_status",'
|
|
'"ssl_protocol":"$ssl_protocol",'
|
|
'"connect_host":"$connect_host",'
|
|
'"connect_port":"$connect_port",'
|
|
'"connect_addr":"$connect_addr",'
|
|
'"upstream_http_location":"$upstream_http_location",'
|
|
'"upstream_cache_status":"$upstream_cache_status",'
|
|
'"http_authorization":"$http_authorization",'
|
|
'}';
|
|
|
|
log_format tweaked escape=json
|
|
'{'
|
|
'"access_time":"$time_local",'
|
|
'"upstream_cache_status":"$upstream_cache_status",'
|
|
'"method":"$request_method",'
|
|
'"uri":"$uri",'
|
|
'"request_type":"$docker_proxy_request_type",'
|
|
'"status":"$status",'
|
|
'"bytes_sent":"$body_bytes_sent",'
|
|
'"upstream_response_time":"$upstream_response_time",'
|
|
'"host":"$host",'
|
|
'"proxy_host":"$proxy_host",'
|
|
'"upstream":"$upstream_addr"'
|
|
'}';
|
|
|
|
keepalive_timeout 300;
|
|
gzip off;
|
|
|
|
# Entrypoint generates the proxy_cache_path here, so it is configurable externally.
|
|
include /etc/nginx/conf.d/cache_max_size.conf;
|
|
|
|
# Just in case you want to rewrite some hosts. Default maps directly.
|
|
map $host $targetHost {
|
|
hostnames;
|
|
include /etc/nginx/docker.targetHost.map;
|
|
default $host;
|
|
}
|
|
|
|
# A map to enable authentication to some specific docker registries.
|
|
# This is auto-generated by the entrypoint.sh based on environment variables
|
|
map $host $dockerAuth {
|
|
hostnames;
|
|
include /etc/nginx/docker.auth.map;
|
|
default "";
|
|
}
|
|
|
|
# @TODO: actually for auth.docker.io, if we want to support multiple authentications, we'll need to decide
|
|
# @TODO: based not only on the hostname, but also URI (/token) and query string (?scope)
|
|
# @TODO: I wonder if this would help gcr.io and quay.io with authentication also....
|
|
|
|
map $dockerAuth $finalAuth {
|
|
"" "$http_authorization"; # if empty, keep the original passed-in from the docker client.
|
|
default "Basic $dockerAuth"; # if not empty, add the Basic preamble to the auth
|
|
}
|
|
|
|
|
|
# Map to decide which hosts get directed to the caching portion.
|
|
# This is automatically generated from the list of cached registries, plus a few fixed hosts
|
|
# By default, we don't intercept, allowing free flow of non-registry traffic
|
|
map $connect_host $interceptedHost {
|
|
hostnames;
|
|
include /etc/nginx/docker.intercept.map;
|
|
default "$connect_addr"; # $connect_addr is 'IP address and port of the remote host, e.g. "192.168.1.5:12345". IP address is resolved from host name of CONNECT request line.'
|
|
}
|
|
|
|
|
|
# These maps parse the original Host and URI from a /forcecache redirect.
|
|
map $request_uri $realHost {
|
|
~/forcecacheinsecure/([^:/]+)/originalwas(/.+) $1;
|
|
~/forcecachesecure/([^:/]+)/originalwas(/.+) $1;
|
|
default "DID_NOT_MATCH_HOST";
|
|
}
|
|
|
|
map $request_uri $realPath {
|
|
~/forcecacheinsecure/([^:/]+)/originalwas(/.+) $2;
|
|
~/forcecachesecure/([^:/]+)/originalwas(/.+) $2;
|
|
default "DID_NOT_MATCH_PATH";
|
|
}
|
|
|
|
|
|
# The proxy director layer, listens on 3128
|
|
server {
|
|
listen 3128;
|
|
server_name _;
|
|
|
|
# dont log the CONNECT proxy.
|
|
#access_log /var/log/nginx/access.log debug_proxy;
|
|
access_log off;
|
|
set $docker_proxy_request_type "unknown-connect";
|
|
|
|
proxy_connect;
|
|
proxy_connect_address $interceptedHost;
|
|
proxy_max_temp_file_size 0;
|
|
|
|
# We need to resolve the real names of our proxied servers.
|
|
#resolver 8.8.8.8 4.2.2.2 ipv6=off; # Avoid ipv6 addresses for now
|
|
include /etc/nginx/resolvers.conf;
|
|
|
|
# forward proxy for non-CONNECT request
|
|
location / {
|
|
add_header "Content-type" "text/plain" always;
|
|
return 200 "docker-registry-proxy: The docker caching proxy is working!";
|
|
}
|
|
|
|
location /ca.crt {
|
|
alias /ca/ca.crt;
|
|
}
|
|
|
|
location /setup/systemd {
|
|
add_header "Content-type" "text/plain" always;
|
|
return 200 '
|
|
set -e
|
|
|
|
if [ ! -d /etc/systemd ]; then
|
|
echo "Not a systemd system"
|
|
exit 1
|
|
fi
|
|
|
|
mkdir -p /etc/systemd/system/docker.service.d
|
|
cat << EOD > /etc/systemd/system/docker.service.d/http-proxy.conf
|
|
[Service]
|
|
Environment="HTTPS_PROXY=$scheme://$http_host/"
|
|
EOD
|
|
|
|
# Get the CA certificate from the proxy and make it a trusted root.
|
|
curl $scheme://$http_host/ca.crt > /usr/share/ca-certificates/docker_registry_proxy.crt
|
|
if fgrep -q "docker_registry_proxy.crt" /etc/ca-certificates.conf ; then
|
|
echo "certificate refreshed"
|
|
else
|
|
echo "docker_registry_proxy.crt" >> /etc/ca-certificates.conf
|
|
fi
|
|
|
|
update-ca-certificates --fresh
|
|
|
|
# Reload systemd
|
|
systemctl daemon-reload
|
|
|
|
# Restart dockerd
|
|
systemctl restart docker.service
|
|
echo "Docker configured with HTTPS_PROXY=$scheme://$http_host/"
|
|
';
|
|
} # end location /setup/systemd
|
|
} # end server
|
|
|
|
|
|
# The caching layer
|
|
server {
|
|
# Listen on both 80 and 443, for all hostnames.
|
|
# actually could be 443 or 444, depending on debug. this is now generated by the entrypoint.
|
|
listen 80 default_server;
|
|
include /etc/nginx/caching.layer.listen;
|
|
server_name _;
|
|
|
|
# Do some tweaked logging.
|
|
access_log /var/log/nginx/access.log tweaked;
|
|
set $docker_proxy_request_type "unknown";
|
|
|
|
# Send upstream status as header
|
|
add_header X-Docker-Registry-Proxy-Cache-Upstream-Status "$upstream_cache_status";
|
|
|
|
# Use the generated certificates, they contain names for all the proxied registries.
|
|
ssl_certificate /certs/fullchain.pem;
|
|
ssl_certificate_key /certs/web.key;
|
|
|
|
# We need to resolve the real names of our proxied servers.
|
|
#resolver 8.8.8.8 4.2.2.2 ipv6=off; # Avoid ipv6 addresses for now
|
|
include /etc/nginx/resolvers.conf;
|
|
|
|
# Docker needs this. Don't ask.
|
|
chunked_transfer_encoding on;
|
|
|
|
# Block POST/PUT/DELETE. Don't use this proxy for pushing.
|
|
if ($request_method = POST) {
|
|
return 405 "POST method is not allowed";
|
|
}
|
|
if ($request_method = PUT) {
|
|
return 405 "PUT method is not allowed";
|
|
}
|
|
if ($request_method = DELETE) {
|
|
return 405 "DELETE method is not allowed";
|
|
}
|
|
|
|
proxy_read_timeout 900;
|
|
|
|
# Use cache locking, with a huge timeout, so that multiple Docker clients asking for the same blob at the same time
|
|
# will wait for the first to finish instead of doing multiple upstream requests.
|
|
proxy_cache_lock on;
|
|
proxy_cache_lock_timeout 880s;
|
|
|
|
# Cache all 200, 206 for 60 days.
|
|
proxy_cache_valid 200 206 60d;
|
|
|
|
# Some extra settings to maximize cache hits and efficiency
|
|
proxy_force_ranges on;
|
|
proxy_ignore_client_abort on;
|
|
proxy_cache_revalidate on;
|
|
|
|
# Hide/ignore headers from caching. S3 especially likes to send Expires headers in the past in some situations.
|
|
proxy_hide_header Set-Cookie;
|
|
proxy_ignore_headers X-Accel-Expires Expires Cache-Control Set-Cookie;
|
|
|
|
# Add the authentication info, if the map matched the target domain.
|
|
proxy_set_header Authorization $finalAuth;
|
|
|
|
# Use SNI during the TLS handshake with the upstream.
|
|
proxy_ssl_server_name on;
|
|
|
|
# This comes from a include file generated by the entrypoint.
|
|
include /etc/nginx/docker.verify.ssl.conf;
|
|
|
|
# Block API v1. We dont know how to handle these.
|
|
# Docker-client should start with v2 and fallback to v1 if something fails, for example, if authentication failed to a protected v2 resource.
|
|
location /v1 {
|
|
return 405 "docker-registry-proxy: docker is trying to use v1 API. Either the image does not exist upstream, or you need to configure docker-registry-proxy to authenticate against $host";
|
|
}
|
|
|
|
# For blob requests by digest, do cache, and treat redirects.
|
|
location ~ ^/v2/(.*)/blobs/sha256:(.*) {
|
|
set $docker_proxy_request_type "blob-by-digest";
|
|
add_header X-Docker-Registry-Proxy-Cache-Upstream-Status "$upstream_cache_status";
|
|
add_header X-Docker-Registry-Proxy-Cache-Type "$docker_proxy_request_type";
|
|
proxy_pass https://$targetHost;
|
|
proxy_cache cache;
|
|
proxy_cache_key $uri;
|
|
proxy_intercept_errors on;
|
|
error_page 301 302 307 = @handle_redirects;
|
|
}
|
|
|
|
# For manifest requests by digest, do cache, and treat redirects.
|
|
# These are some of the requests that DockerHub will throttle.
|
|
location ~ ^/v2/(.*)/manifests/sha256:(.*) {
|
|
set $docker_proxy_request_type "manifest-by-digest";
|
|
add_header X-Docker-Registry-Proxy-Cache-Upstream-Status "$upstream_cache_status";
|
|
add_header X-Docker-Registry-Proxy-Cache-Type "$docker_proxy_request_type";
|
|
proxy_pass https://$targetHost;
|
|
proxy_cache cache;
|
|
proxy_cache_key $uri;
|
|
proxy_intercept_errors on;
|
|
error_page 301 302 307 = @handle_redirects;
|
|
}
|
|
|
|
# Cache manifest requests that are not by digest (e.g. tags)
|
|
# Since these are mutable, we invalidate them immediately and keep them only in case the backend is down
|
|
# These are some of the requests that DockerHub will throttle.
|
|
location ~ ^/v2/(.*)/manifests/ {
|
|
set $docker_proxy_request_type "manifest-mutable";
|
|
add_header X-Docker-Registry-Proxy-Cache-Upstream-Status "$upstream_cache_status";
|
|
add_header X-Docker-Registry-Proxy-Cache-Type "$docker_proxy_request_type";
|
|
proxy_pass https://$targetHost;
|
|
proxy_cache cache;
|
|
proxy_cache_key $uri;
|
|
proxy_intercept_errors on;
|
|
proxy_cache_use_stale error timeout http_500 http_502 http_504 http_429;
|
|
proxy_cache_valid 0s;
|
|
error_page 301 302 307 = @handle_redirects;
|
|
}
|
|
|
|
# Cache blobs requests that are not by digest
|
|
# Since these are mutable, we invalidate them immediately and keep them only in case the backend is down
|
|
location ~ ^/v2/(.*)/blobs/ {
|
|
set $docker_proxy_request_type "blob-mutable";
|
|
add_header X-Docker-Registry-Proxy-Cache-Upstream-Status "$upstream_cache_status";
|
|
add_header X-Docker-Registry-Proxy-Cache-Type "$docker_proxy_request_type";
|
|
proxy_pass https://$targetHost;
|
|
proxy_cache cache;
|
|
proxy_cache_key $uri;
|
|
proxy_intercept_errors on;
|
|
proxy_cache_use_stale error timeout http_500 http_502 http_504 http_429;
|
|
proxy_cache_valid 0s;
|
|
error_page 301 302 307 = @handle_redirects;
|
|
}
|
|
|
|
location @handle_redirects {
|
|
#store the current state of the world so we can reuse it in a minute
|
|
# We need to capture these values now, because as soon as we invoke
|
|
# the proxy_* directives, these will disappear
|
|
set $original_uri $uri;
|
|
set $orig_loc $upstream_http_location;
|
|
|
|
# during this process, nginx will preserve the headers intended for the original destination.
|
|
# in most cases thats okay, but for some (eg: google storage), passing an Authorization
|
|
# header can cause problems. Also, that would leak the credentials for the registry
|
|
# into the storage system (unrelated).
|
|
proxy_set_header Authorization "";
|
|
|
|
# nginx goes to fetch the value from the upstream Location header
|
|
proxy_pass $orig_loc;
|
|
proxy_cache cache;
|
|
# But we store the result with the cache key of the original request URI
|
|
# so that future clients don't need to follow the redirect too
|
|
proxy_cache_key $original_uri;
|
|
}
|
|
|
|
# by default, dont cache anything.
|
|
location / {
|
|
proxy_pass https://$targetHost;
|
|
proxy_cache off;
|
|
}
|
|
}
|
|
}
|