Merge branch 'master' into readme_conflict_resolved
commit
fcb6888985
|
@ -19,7 +19,7 @@ ENV DO_DEBUG_BUILD="$DEBUG_BUILD"
|
|||
# Build mitmproxy via pip. This is heavy, takes minutes do build and creates a 90mb+ layer. Oh well.
|
||||
RUN [[ "a$DO_DEBUG_BUILD" == "a1" ]] && { echo "Debug build ENABLED." \
|
||||
&& apk add --no-cache --update su-exec git g++ libffi libffi-dev libstdc++ openssl-dev python3 python3-dev py3-pip py3-wheel py3-six py3-idna py3-certifi py3-setuptools \
|
||||
&& LDFLAGS=-L/lib pip install mitmproxy==5.2 \
|
||||
&& LDFLAGS=-L/lib pip install MarkupSafe==2.0.1 mitmproxy==5.2 \
|
||||
&& apk del --purge git g++ libffi-dev openssl-dev python3-dev py3-pip py3-wheel \
|
||||
&& rm -rf ~/.cache/pip \
|
||||
; } || { echo "Debug build disabled." ; }
|
||||
|
@ -99,6 +99,11 @@ ENV MANIFEST_CACHE_DEFAULT_TIME="1h"
|
|||
# Should we allow actions different than pull, default to false.
|
||||
ENV ALLOW_PUSH="false"
|
||||
|
||||
# If push is allowed, buffering requests can cause issues on slow upstreams.
|
||||
# If you have trouble pushing, set this to false first, then fix remainig timouts.
|
||||
# Default is true to not change default behavior.
|
||||
ENV PROXY_REQUEST_BUFFERING="true"
|
||||
|
||||
# Timeouts
|
||||
# ngx_http_core_module
|
||||
ENV SEND_TIMEOUT="60s"
|
||||
|
|
50
README.md
50
README.md
|
@ -88,6 +88,10 @@ for this to work it requires inserting a root CA certificate into system trusted
|
|||
- Env `AUTH_REGISTRIES_DELIMITER` to change the separator between authentication info. By default, a space: "` `". If you use keys that contain spaces (as with Google Cloud Registry), you should update this variable, e.g. setting it to `AUTH_REGISTRIES_DELIMITER=";;;"`. In that case, `AUTH_REGISTRIES` could contain something like `registry1.com:user1:pass1;;;registry2.com:user2:pass2`.
|
||||
- Env `AUTH_REGISTRY_DELIMITER` to change the separator between authentication info *parts*. By default, a colon: "`:`". If you use keys that contain single colons, you should update this variable, e.g. setting it to `AUTH_REGISTRIES_DELIMITER=":::"`. In that case, `AUTH_REGISTRIES` could contain something like `registry1.com:::user1:::pass1 registry2.com:::user2:::pass2`.
|
||||
- Env `AUTH_REGISTRIES_RAW` to specify raw auth mapping for nginx auth.map configuration. Not mutually exclusive with `AUTH_REGISTRIES`.
|
||||
- Env `PROXY_REQUEST_BUFFERING`: If push is allowed, buffering requests can cause issues on slow upstreams.
|
||||
If you have trouble pushing, set this to `false` first, then fix remainig timeouts.
|
||||
Default is `true` to not change default behavior.
|
||||
ENV PROXY_REQUEST_BUFFERING="true"
|
||||
- Timeouts ENVS - all of them can pe specified to control different timeouts, and if not set, the defaults will be the ones from `Dockerfile`. The directives will be added into `http` block.:
|
||||
- SEND_TIMEOUT : see [send_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#send_timeout)
|
||||
- CLIENT_BODY_TIMEOUT : see [client_body_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_timeout)
|
||||
|
@ -223,6 +227,52 @@ done
|
|||
wait $pids # Wait for all configurations to end
|
||||
```
|
||||
|
||||
### K3D Cluster
|
||||
|
||||
[K3d](https://k3d.io/) is similar to Kind but is based on k3s. In order to run with its registry you need to setup settings like shown below.
|
||||
|
||||
```sh
|
||||
# docker-registry-proxy
|
||||
docker run -d --name registry-proxy --restart=always \
|
||||
-v /tmp/registry-proxy/mirror_cache:/docker_mirror_cache \
|
||||
-v /tmp/registry-proxy/certs:/ca \
|
||||
rpardini/docker-registry-proxy:0.6.4
|
||||
|
||||
export PROXY_HOST=registry-proxy
|
||||
export PROXY_PORT=3128
|
||||
export NOPROXY_LIST="localhost,127.0.0.1,0.0.0.0,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.local,.svc"
|
||||
|
||||
cat <<EOF > /etc/k3d-proxy-config.yaml
|
||||
apiVersion: k3d.io/v1alpha3
|
||||
kind: Simple
|
||||
name: mycluster
|
||||
servers: 1
|
||||
agents: 0
|
||||
options:
|
||||
k3d:
|
||||
wait: true
|
||||
timeout: "60s"
|
||||
kubeconfig:
|
||||
updateDefaultKubeconfig: true
|
||||
switchCurrentContext: true
|
||||
env:
|
||||
- envVar: HTTP_PROXY=http://$PROXY_HOST:$PROXY_PORT
|
||||
nodeFilters:
|
||||
- all
|
||||
- envVar: HTTPS_PROXY=http://$PROXY_HOST:$PROXY_PORT
|
||||
nodeFilters:
|
||||
- all
|
||||
- envVar: NO_PROXY='$NOPROXY_LIST'
|
||||
nodeFilters:
|
||||
- all
|
||||
volumes:
|
||||
- volume: $REGISTRY_DIR/docker_mirror_certs/ca.crt:/etc/ssl/certs/registry-proxy-ca.pem
|
||||
nodeFilters:
|
||||
- all
|
||||
EOF
|
||||
|
||||
k3d cluster create --config /etc/k3d-proxy-config.yaml
|
||||
```
|
||||
|
||||
## Configuring the Docker clients using Docker Desktop for Mac
|
||||
|
||||
|
|
|
@ -259,6 +259,20 @@ echo -e "\nTimeout configs: ---"
|
|||
cat /etc/nginx/nginx.timeouts.config.conf
|
||||
echo -e "---\n"
|
||||
|
||||
# Request buffering
|
||||
echo "" > /etc/nginx/proxy.request.buffering.conf
|
||||
if [[ "a${PROXY_REQUEST_BUFFERING}" == "afalse" ]]; then
|
||||
cat << EOD > /etc/nginx/proxy.request.buffering.conf
|
||||
proxy_max_temp_file_size 0;
|
||||
proxy_request_buffering off;
|
||||
proxy_http_version 1.1;
|
||||
EOD
|
||||
fi
|
||||
|
||||
echo -e "\nRequest buffering: ---"
|
||||
cat /etc/nginx/proxy.request.buffering.conf
|
||||
echo -e "---\n"
|
||||
|
||||
# Upstream SSL verification.
|
||||
echo "" > /etc/nginx/docker.verify.ssl.conf
|
||||
if [[ "a${VERIFY_SSL}" == "atrue" ]]; then
|
||||
|
|
|
@ -142,6 +142,7 @@ http {
|
|||
set $docker_proxy_request_type "unknown-connect";
|
||||
|
||||
proxy_connect;
|
||||
proxy_connect_allow all;
|
||||
proxy_connect_address $interceptedHost;
|
||||
proxy_max_temp_file_size 0;
|
||||
|
||||
|
@ -227,6 +228,9 @@ echo "Docker configured with HTTPS_PROXY=$scheme://$http_host/"
|
|||
|
||||
proxy_read_timeout 900;
|
||||
|
||||
# Request buffering
|
||||
include /etc/nginx/proxy.request.buffering.conf;
|
||||
|
||||
# Use cache locking, with a huge timeout, so that multiple Docker clients asking for the same blob at the same time
|
||||
# will wait for the first to finish instead of doing multiple upstream requests.
|
||||
proxy_cache_lock on;
|
||||
|
|
Loading…
Reference in New Issue