initial commit
parent
228777050c
commit
325dd23ae5
|
@ -0,0 +1,5 @@
|
||||||
|
.idea
|
||||||
|
.git
|
||||||
|
.gitignore
|
||||||
|
LICENSE
|
||||||
|
README.md
|
|
@ -0,0 +1 @@
|
||||||
|
.idea
|
|
@ -0,0 +1,26 @@
|
||||||
|
# Use stable nginx on alpine for a light container
|
||||||
|
FROM nginx:stable-alpine
|
||||||
|
|
||||||
|
# Add openssl and clean apk cache
|
||||||
|
RUN apk add --update openssl && rm -rf /var/cache/apk/*
|
||||||
|
|
||||||
|
# Generate a self-signed SSL certificate. It will be ignored by Docker clients due to insecure-registries.
|
||||||
|
RUN mkdir -p /etc/ssl && \
|
||||||
|
cd /etc/ssl && \
|
||||||
|
openssl genrsa -des3 -passout pass:x -out key.pem 2048 && \
|
||||||
|
cp key.pem key.pem.orig && \
|
||||||
|
openssl rsa -passin pass:x -in key.pem.orig -out key.pem && \
|
||||||
|
openssl req -new -key key.pem -out cert.csr -subj "/C=BR/ST=BR/L=Nowhere/O=Fake Docker Mirror/OU=Docker/CN=docker.proxy" && \
|
||||||
|
openssl x509 -req -days 3650 -in cert.csr -signkey key.pem -out cert.pem
|
||||||
|
|
||||||
|
# Create the cache directory
|
||||||
|
RUN mkdir -p /docker_mirror_cache
|
||||||
|
|
||||||
|
# Expose it as a volume, so cache can be kept external to the Docker image
|
||||||
|
VOLUME /docker_mirror_cache
|
||||||
|
|
||||||
|
# Add our configuration
|
||||||
|
ADD nginx.conf /etc/nginx/nginx.conf
|
||||||
|
|
||||||
|
# Test that the configuration is OK
|
||||||
|
RUN nginx -t
|
|
@ -0,0 +1,140 @@
|
||||||
|
user nginx;
|
||||||
|
worker_processes auto;
|
||||||
|
|
||||||
|
error_log /var/log/nginx/error.log debug;
|
||||||
|
pid /var/run/nginx.pid;
|
||||||
|
|
||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
http {
|
||||||
|
include /etc/nginx/mime.types;
|
||||||
|
default_type application/octet-stream;
|
||||||
|
|
||||||
|
# Use a debug-oriented logging format.
|
||||||
|
log_format tweaked '$remote_addr - $remote_user [$time_local] "$request" '
|
||||||
|
'$status $body_bytes_sent '
|
||||||
|
'"HOST: $host" "UPSTREAM: $upstream_addr" '
|
||||||
|
'"UPSTREAM-STATUS: $upstream_status" '
|
||||||
|
'"SSL-PROTO: $ssl_protocol" '
|
||||||
|
'"PROXY-HOST: $proxy_host" "UPSTREAM-REDIRECT: $upstream_http_location" "CACHE-STATUS: $upstream_cache_status"';
|
||||||
|
|
||||||
|
access_log /var/log/nginx/access.log tweaked;
|
||||||
|
keepalive_timeout 300;
|
||||||
|
gzip off;
|
||||||
|
|
||||||
|
# The cache directory. This can get huge. Better to use a Docker volume pointing here!
|
||||||
|
# Set to 32gb which should be enough
|
||||||
|
proxy_cache_path /docker_mirror_cache levels=1:2 max_size=32g inactive=60d keys_zone=cache:10m use_temp_path=off;
|
||||||
|
|
||||||
|
|
||||||
|
# Just in case you want to rewrite some hosts. Default maps directly.
|
||||||
|
map $host $targetHost {
|
||||||
|
hostnames;
|
||||||
|
default $host;
|
||||||
|
}
|
||||||
|
|
||||||
|
# These maps parse the original Host and URI from a /forcecache redirect.
|
||||||
|
map $request_uri $realHost {
|
||||||
|
~/forcecacheinsecure/([^:/]+)/originalwas(/.+) $1;
|
||||||
|
~/forcecachesecure/([^:/]+)/originalwas(/.+) $1;
|
||||||
|
default "DID_NOT_MATCH_HOST";
|
||||||
|
}
|
||||||
|
|
||||||
|
map $request_uri $realPath {
|
||||||
|
~/forcecacheinsecure/([^:/]+)/originalwas(/.+) $2;
|
||||||
|
~/forcecachesecure/([^:/]+)/originalwas(/.+) $2;
|
||||||
|
default "DID_NOT_MATCH_PATH";
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
# Listen on both 80 and 443, for all hostnames.
|
||||||
|
listen 80 default_server;
|
||||||
|
listen 443 ssl default_server;
|
||||||
|
server_name _;
|
||||||
|
|
||||||
|
# Use a fake SSL certificate. This does not matter, since the Docker clients will be configured with insecure registry
|
||||||
|
ssl_certificate /etc/ssl/cert.pem;
|
||||||
|
ssl_certificate_key /etc/ssl/key.pem;
|
||||||
|
|
||||||
|
# We need to resolve the real names of our proxied servers.
|
||||||
|
resolver 8.8.8.8 4.2.2.2 ipv6=off; # Avoid ipv6 addresses for now
|
||||||
|
|
||||||
|
# Docker needs this. Don't ask.
|
||||||
|
chunked_transfer_encoding on;
|
||||||
|
|
||||||
|
# Block POST/PUT/DELETE. Don't use this proxy for pushing.
|
||||||
|
if ($request_method = POST) {
|
||||||
|
return 405;
|
||||||
|
}
|
||||||
|
if ($request_method = PUT) {
|
||||||
|
return 405;
|
||||||
|
}
|
||||||
|
if ($request_method = DELETE) {
|
||||||
|
return 405;
|
||||||
|
}
|
||||||
|
|
||||||
|
proxy_read_timeout 900;
|
||||||
|
|
||||||
|
# Use cache locking, with a huge timeout, so that multiple Docker clients asking for the same blob at the same time
|
||||||
|
# will wait for the first to finish instead of doing multiple upstream requests.
|
||||||
|
proxy_cache_lock on;
|
||||||
|
proxy_cache_lock_timeout 120s;
|
||||||
|
proxy_cache_valid 200 301 302 60d; # Cache all 200, 301, and 302 for 60 days.
|
||||||
|
proxy_force_ranges on;
|
||||||
|
proxy_ignore_client_abort on;
|
||||||
|
proxy_cache_revalidate on;
|
||||||
|
|
||||||
|
# don't cache mutable entity /v2/<name>/manifests/<reference> (unless the reference is a digest)
|
||||||
|
location ~ ^/v2/[^\/]+/manifests/(?![A-Fa-f0-9_+.-]+:) {
|
||||||
|
proxy_pass https://$targetHost;
|
||||||
|
add_header X-Eh-Aqui $targetHost;
|
||||||
|
}
|
||||||
|
|
||||||
|
# don't cache mutable entity /v2/<name>/tags/list
|
||||||
|
location ~ ^/v2/[^\/]+/tags/list {
|
||||||
|
proxy_pass https://$targetHost;
|
||||||
|
}
|
||||||
|
|
||||||
|
# don't cache mutable entity /v2/_catalog
|
||||||
|
location ~ ^/v2/_catalog$ {
|
||||||
|
proxy_pass https://$targetHost;
|
||||||
|
}
|
||||||
|
|
||||||
|
# cache everything else
|
||||||
|
location / {
|
||||||
|
proxy_pass https://$targetHost;
|
||||||
|
proxy_cache cache;
|
||||||
|
|
||||||
|
# Handling of redirects.
|
||||||
|
# Many registries (eg, quay.io, or k8s.gcr.io) emit a Location redirect
|
||||||
|
# pointing to something like cloudfront, or google storage.
|
||||||
|
# We hack into the response, extracting the host and URI parts, injecting them into a URL that points back to us
|
||||||
|
# That gives us a chance to intercept and cache those, which are the actual multi-megabyte blobs we originally wanted to cache.
|
||||||
|
# We to it twice, one for http and another for https.
|
||||||
|
proxy_redirect ~^https://([^:/]+)(/.+)$ https://docker.proxy/forcecachesecure/$1/originalwas$2;
|
||||||
|
proxy_redirect ~^http://([^:/]+)(/.+)$ http://docker.proxy/forcecacheinsecure/$1/originalwas$2;
|
||||||
|
}
|
||||||
|
|
||||||
|
# handling for the redirect case explained above, with https.
|
||||||
|
# The $realHost and $realPath variables come from a map defined at the top of this file.
|
||||||
|
location /forcecachesecure {
|
||||||
|
proxy_pass https://$realHost$realPath;
|
||||||
|
proxy_cache cache;
|
||||||
|
add_header X-Docker-Caching-Proxy-Real-Proto https;
|
||||||
|
add_header X-Docker-Caching-Proxy-Real-Host $realHost;
|
||||||
|
add_header X-Docker-Caching-Proxy-Real-Path $realPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
# handling for the redirect case explained above, with http.
|
||||||
|
# The $realHost and $realPath variables come from a map defined at the top of this file.
|
||||||
|
location /forcecacheinsecure {
|
||||||
|
proxy_pass http://$realHost$realPath;
|
||||||
|
proxy_cache cache;
|
||||||
|
add_header X-Docker-Caching-Proxy-Real-Proto http;
|
||||||
|
add_header X-Docker-Caching-Proxy-Real-Host $realHost;
|
||||||
|
add_header X-Docker-Caching-Proxy-Real-Path $realPath;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue