refactor(suites): replace kind with k3d (#4553)

This change replaces Kind with a might lighter K8s variation K3D.
Many of our manifests have also been consolidated.
Other key changes have been highlighted below:
* Utilise K3D Traefik Ingress
* Automatically provision all manifests, removing the abundance of shell scripts
* Expose Traefik and K8s dashboards through the Ingress
pull/4556/head
Amir Zarrinkafsh 2022-12-13 10:15:32 +11:00 committed by GitHub
parent 180620e519
commit 505a7e90b3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
54 changed files with 891 additions and 1304 deletions

View File

@ -3,7 +3,7 @@
set +u set +u
declare -A BUILDS=(["linux"]="amd64 arm arm64 amd64-musl arm-musl arm64-musl" ["freebsd"]="amd64") declare -A BUILDS=(["linux"]="amd64 arm arm64 amd64-musl arm-musl arm64-musl" ["freebsd"]="amd64")
DOCKER_IMAGE=authelia/authelia DOCKER_IMAGE=authelia:dist
if [[ "${BUILDKITE_LABEL}" == ":hammer_and_wrench: Unit Test" ]]; then if [[ "${BUILDKITE_LABEL}" == ":hammer_and_wrench: Unit Test" ]]; then
if [[ ! "${BUILDKITE_BRANCH}" =~ ^renovate/ ]]; then if [[ ! "${BUILDKITE_BRANCH}" =~ ^renovate/ ]]; then

View File

@ -30,10 +30,13 @@ fi
if [[ "${BUILDKITE_LABEL}" =~ ":selenium:" ]]; then if [[ "${BUILDKITE_LABEL}" =~ ":selenium:" ]]; then
DEFAULT_ARCH=coverage DEFAULT_ARCH=coverage
echo "--- :docker: Extract, load and tag build container" echo "--- :docker: Extract and load build container"
buildkite-agent artifact download "authelia-image-${DEFAULT_ARCH}*" . buildkite-agent artifact download "authelia-image-${DEFAULT_ARCH}*" .
zstdcat "authelia-image-${DEFAULT_ARCH}.tar.zst" | docker load if [[ "${SUITE}" == "Kubernetes" ]]; then
docker tag authelia/authelia authelia:dist zstd -d authelia-image-coverage.tar.zst --stdout > ./internal/suites/example/kube/authelia-image-${DEFAULT_ARCH}.tar
else
zstdcat "authelia-image-${DEFAULT_ARCH}.tar.zst" | docker load
fi
if [[ "${BUILD_DUO}" == "true" ]] && [[ "${SUITE}" == "DuoPush" ]]; then if [[ "${BUILD_DUO}" == "true" ]] && [[ "${SUITE}" == "DuoPush" ]]; then
CONTAINER="integration-duo" CONTAINER="integration-duo"

View File

@ -7,7 +7,11 @@ trim_trailing_whitespace = true
end_of_line = lf end_of_line = lf
insert_final_newline = true insert_final_newline = true
[{.buildkite/hooks/**,*.sh,*.yml,*.yaml}] [*.{sh,yml,yaml}]
indent_style = space
indent_size = 2
[.buildkite/hooks/**]
indent_style = space indent_style = space
indent_size = 2 indent_size = 2

39
.gitignore vendored
View File

@ -1,50 +1,27 @@
# IDE user configuration
.idea/
.vscode/
# NodeJs modules # Nodejs modules
.pnpm-store/
node_modules/ node_modules/
# npm debug logs
npm-debug.log*
# Coverage reports # Coverage reports
coverage/ coverage/
.nyc_output/
coverage.txt coverage.txt
.vscode/
*.swp
*~
# Directory used by example
/notifications/
# VSCode user configuration
.vscode/
# Generated by TypeScript compiler # Generated by TypeScript compiler
dist/ dist/
.nyc_output/
*.tgz
# Specific files # Specific files
/configuration.yml /configuration.yml
/config.yml /config.yml
/config.test.yml /config.test.yml
internal/suites/example/ldap/private.ldif
Configuration.schema.json
.suite .suite
.kube .kube
.idea authelia-image-dev.tar
.authelia-interrupt /authelia
qemu-*-static
public_html.gen.go
authelia
__debug_bin __debug_bin

View File

@ -14,12 +14,17 @@
"workarounds:all" "workarounds:all"
], ],
"enabledManagers": [ "enabledManagers": [
"bundler",
"docker-compose", "docker-compose",
"dockerfile", "dockerfile",
"gomod", "gomod",
"kubernetes",
"npm" "npm"
], ],
{
"kubernetes": {
"fileMatch": ["kube/.+\\.yml$"]
}
}
"labels": [ "labels": [
"dependencies" "dependencies"
], ],
@ -47,6 +52,14 @@
"go" "go"
] ]
}, },
{
"datasources": [
"kubernetes"
],
"addLabels": [
"kubernetes"
]
},
{ {
"datasources": [ "datasources": [
"npm" "npm"

View File

@ -87,12 +87,19 @@ var hostEntries = []HostEntry{
{Domain: "mail.example.com", IP: "192.168.240.100"}, {Domain: "mail.example.com", IP: "192.168.240.100"},
{Domain: "duo.example.com", IP: "192.168.240.100"}, {Domain: "duo.example.com", IP: "192.168.240.100"},
// For Traefik suite.
{Domain: "traefik.example.com", IP: "192.168.240.100"},
// For HAProxy suite. // For HAProxy suite.
{Domain: "haproxy.example.com", IP: "192.168.240.100"}, {Domain: "haproxy.example.com", IP: "192.168.240.100"},
// Kubernetes dashboard.
{Domain: "kubernetes.example.com", IP: "192.168.240.100"},
// OIDC tester app.
{Domain: "oidc.example.com", IP: "192.168.240.100"},
{Domain: "oidc-public.example.com", IP: "192.168.240.100"},
// For Traefik suite.
{Domain: "traefik.example.com", IP: "192.168.240.100"},
// For testing network ACLs. // For testing network ACLs.
{Domain: "proxy-client1.example.com", IP: "192.168.240.201"}, {Domain: "proxy-client1.example.com", IP: "192.168.240.201"},
{Domain: "proxy-client2.example.com", IP: "192.168.240.202"}, {Domain: "proxy-client2.example.com", IP: "192.168.240.202"},
@ -107,12 +114,6 @@ var hostEntries = []HostEntry{
{Domain: "redis-sentinel-0.example.com", IP: "192.168.240.120"}, {Domain: "redis-sentinel-0.example.com", IP: "192.168.240.120"},
{Domain: "redis-sentinel-1.example.com", IP: "192.168.240.121"}, {Domain: "redis-sentinel-1.example.com", IP: "192.168.240.121"},
{Domain: "redis-sentinel-2.example.com", IP: "192.168.240.122"}, {Domain: "redis-sentinel-2.example.com", IP: "192.168.240.122"},
// Kubernetes dashboard.
{Domain: "kubernetes.example.com", IP: "192.168.240.110"},
// OIDC tester app.
{Domain: "oidc.example.com", IP: "192.168.240.100"},
{Domain: "oidc-public.example.com", IP: "192.168.240.100"},
} }
func runCommand(cmd string, args ...string) { func runCommand(cmd string, args ...string) {

View File

@ -58,6 +58,12 @@ const (
testPassword = "password" testPassword = "password"
) )
const (
namespaceAuthelia = "authelia"
namespaceDashboard = "kubernetes-dashboard"
namespaceKube = "kube-system"
)
var ( var (
storageLocalTmpConfig = schema.Configuration{ storageLocalTmpConfig = schema.Configuration{
TOTP: schema.TOTPConfiguration{ TOTP: schema.TOTPConfiguration{

View File

@ -58,6 +58,15 @@ func waitUntilAutheliaFrontendIsReady(dockerEnvironment *DockerEnvironment) erro
[]string{"dev server running at", "ready in"}) []string{"dev server running at", "ready in"})
} }
func waitUntilK3DIsReady(dockerEnvironment *DockerEnvironment) error {
return waitUntilServiceLogDetected(
5*time.Second,
90*time.Second,
dockerEnvironment,
"k3d",
[]string{"API listen on [::]:2376"})
}
func waitUntilSambaIsReady(dockerEnvironment *DockerEnvironment) error { func waitUntilSambaIsReady(dockerEnvironment *DockerEnvironment) error {
return waitUntilServiceLogDetected( return waitUntilServiceLogDetected(
5*time.Second, 5*time.Second,

View File

@ -0,0 +1,26 @@
---
version: '3'
services:
k3d:
image: ghcr.io/k3d-io/k3d:5.4.6-dind
volumes:
- './example/kube:/authelia'
- './example/kube/authelia/configs/configuration.yml:/configmaps/authelia/configuration.yml'
- './common/ssl:/configmaps/authelia/ssl'
- './example/compose/ldap/ldif:/configmaps/ldap'
- './example/compose/nginx/backend:/configmaps/nginx-backend'
privileged: true
networks:
authelianet:
aliases:
- public.example.com
- secure.example.com
- login.example.com
- admin.example.com
- dev.example.com
- mail.example.com
- kubernetes.example.com
- traefik.example.com
# Set the IP to be able to query on port 443
ipv4_address: 192.168.240.100
...

View File

@ -1,17 +0,0 @@
FROM alpine:3.17.0
WORKDIR /kind
RUN apk add --no-cache bash curl docker && \
curl -Lo kind https://github.com/kubernetes-sigs/kind/releases/download/v0.10.0/kind-linux-amd64 && chmod +x kind && \
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v1.20.2/bin/linux/amd64/kubectl && chmod +x kubectl
ADD entrypoint.sh entrypoint.sh
ADD patch-kubeconfig.sh patch-kubeconfig.sh
ENV HOME=/kind/config
ENV KUBECONFIG=/kind/config/.kube/kind-config-kind
VOLUME /kind/config
ENTRYPOINT ["./entrypoint.sh"]

View File

@ -1,9 +0,0 @@
---
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches:
# yamllint disable-line rule:indentation
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["http://registrycache.internal:5000"]
...

View File

@ -1,44 +0,0 @@
---
version: '3'
services:
authelia-kind-proxy:
build:
context: ./example/compose/kind
volumes:
- 'kind-volume:/kind/config'
- '/var/run/docker.sock:/var/run/docker.sock'
- './example/kube:/authelia'
- './example/compose/kind/config.yml:/etc/kind/config.yml'
command: 'kubectl port-forward --address 0.0.0.0 -n authelia service/nginx-ingress-controller-service 8080:443'
environment:
- KIND_EXPERIMENTAL_DOCKER_NETWORK=authelia_authelianet
networks:
authelianet:
aliases:
- public.example.com
- secure.example.com
- login.example.com
- admin.example.com
- dev.example.com
- mail.example.com
# Set the IP to be able to query on port 443
ipv4_address: 192.168.240.100
kube-dashboard:
build:
context: ./example/compose/kind
volumes:
- 'kind-volume:/kind/config'
- './example/compose/kind/entrypoint-dashboard.sh:/entrypoint-dashboard.sh'
command: '/entrypoint-dashboard.sh'
environment:
- KIND_EXPERIMENTAL_DOCKER_NETWORK=authelia_authelianet
networks:
authelianet:
aliases:
- kubernetes.example.com
ipv4_address: 192.168.240.110
volumes:
kind-volume: # yamllint disable-line rule:empty-values
...

View File

@ -1,25 +0,0 @@
#!/usr/bin/env bash
# Retries a command on failure.
# $1 - the max number of attempts
# $2... - the command to run
retry() {
local -r -i max_attempts="$1"; shift
local -r cmd="$@"
local -i attempt_num=1
until $cmd
do
if ((attempt_num==max_attempts))
then
echo "Attempt $attempt_num failed and there are no more attempts left!"
return 1
else
echo "Attempt $attempt_num failed! Trying again in 10 seconds..."
sleep 10
fi
done
}
kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')
retry 10 kubectl port-forward --address 0.0.0.0 -n kubernetes-dashboard service/kubernetes-dashboard 443:443

View File

@ -1,5 +0,0 @@
#!/bin/sh
export PATH=/kind:$PATH
exec "$@"

View File

@ -1,6 +0,0 @@
#!/bin/sh
# This script patches the kubeconfig generated by Kind in order to access the cluster container via this container
echo "Patching Kubeconfig to target Kube container without link"
sed -i "s/127.0.0.1:.*/$(docker inspect -f '{{(index .NetworkSettings.Networks "authelia_authelianet").IPAddress}}' kind-control-plane):6443/" ${KUBECONFIG}

View File

@ -1,62 +0,0 @@
# Authelia on Kubernetes
Authelia is now available on Kube in order to protect your most critical
applications using 2-factor authentication and Single Sign-On.
This example leverages [ingress-nginx](https://github.com/kubernetes/ingress-nginx)
to delegate authentication and authorization to Authelia within the cluster.
## Getting started
You can either try to install **Authelia** on your running instance of Kubernetes
or deploy the dedicated [suite](/docs/suites.md) called *kubernetes*.
### Set up a Kube cluster
The simplest way to start a Kubernetes cluster is to deploy the *kubernetes* suite with
authelia-scripts suites setup kubernetes
This will take a few seconds (or minutes) to deploy the cluster.
## How does it work?
### Authentication via Authelia
In a Kube clusters, the routing logic of requests is handled by ingress
controllers following rules provided by ingress configurations.
In this example, [ingress-nginx](https://github.com/kubernetes/ingress-nginx)
controller has been installed to handle the incoming requests. Some of them
(specified in the ingress configuration) are forwarded to Authelia so that
it can verify whether they are allowed and should reach the protected endpoint.
The authentication is provided at the ingress level by an annotation called
`nginx.ingress.kubernetes.io/auth-url` that is filled with the URL of
Authelia's verification endpoint.
The ingress controller also requires the URL to the
authentication portal so that the user can be redirected if he is not
yet authenticated. This annotation is as follows:
`nginx.ingress.kubernetes.io/auth-signin: "https://login.example.com:8080/"`
Those annotations can be seen in `apps/apps.yml` configuration.
### Production grade infrastructure
What is great with using [ingress-nginx](https://github.com/kubernetes/ingress-nginx)
is that it is compatible with [kube-lego](https://github.com/jetstack/kube-lego)
which removes the usual pain of manually renewing SSL certificates. It uses
letsencrypt to issue and renew certificates every three month without any
manual intervention.
## What do I need to know to deploy it in my cluster?
Given your cluster already runs a LDAP server, a Redis, a SQL database,
a SMTP server and a nginx ingress-controller, you can deploy **Authelia**
and update your ingress configurations. An example is provided
[here](authelia).
## Questions
If you have questions about the implementation, please post them on
[![Gitter](https://img.shields.io/gitter/room/badges/shields.svg)](https://gitter.im/authelia/general?utm_source=share-link&utm_medium=link&utm_campaign=share-link)

View File

@ -1,154 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: test-app
namespace: authelia
labels:
app: test-app
spec:
replicas: 1
selector:
matchLabels:
app: test-app
template:
metadata:
labels:
app: test-app
spec:
containers:
- name: test-app
image: nginx:alpine
command: ["/entrypoint.sh"]
ports:
- containerPort: 80
volumeMounts:
- name: config-volume
mountPath: /entrypoint.sh
subPath: entrypoint.sh
- name: config-volume
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
- name: config-volume
mountPath: /tmp/html.tar.gz
subPath: html.tar.gz
volumes:
- name: config-volume
configMap:
name: nginx-config
items:
- key: entrypoint.sh
path: entrypoint.sh
mode: 0755 # yamllint disable-line rule:octal-values
- key: nginx.conf
path: nginx.conf
- key: html.tar.gz
path: html.tar.gz
...
---
apiVersion: v1
kind: Service
metadata:
name: test-app-service
namespace: authelia
labels:
app: test-app
spec:
selector:
app: test-app
ports:
- port: 80
name: http
- port: 443
name: https
...
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: insecure-ingress
namespace: authelia
annotations:
kubernetes.io/ingress.class: "nginx"
kubernetes.io/ingress.allow-http: "false"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
spec:
tls:
- secretName: test-app-tls
hosts:
- home.example.com
rules:
- host: home.example.com
http:
paths:
- path: /
backend:
serviceName: test-app-service
servicePort: 80
...
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: secure-ingress
namespace: authelia
annotations:
kubernetes.io/ingress.class: "nginx"
kubernetes.io/ingress.allow-http: "false"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/auth-url: "https://authelia-service.authelia.svc.cluster.local/api/verify"
nginx.ingress.kubernetes.io/auth-signin: "https://login.example.com:8080/"
spec:
tls:
- secretName: test-app-tls
hosts:
- public.example.com
- admin.example.com
- dev.example.com
- mx1.mail.example.com
- mx2.mail.example.com
- singlefactor.example.com
rules:
- host: public.example.com
http:
paths:
- path: /
backend:
serviceName: test-app-service
servicePort: 80
- host: admin.example.com
http:
paths:
- path: /
backend:
serviceName: test-app-service
servicePort: 80
- host: dev.example.com
http:
paths:
- path: /
backend:
serviceName: test-app-service
servicePort: 80
- host: mx1.mail.example.com
http:
paths:
- path: /
backend:
serviceName: test-app-service
servicePort: 80
- host: mx2.mail.example.com
http:
paths:
- path: /
backend:
serviceName: test-app-service
servicePort: 80
- host: singlefactor.example.com
http:
paths:
- path: /
backend:
serviceName: test-app-service
servicePort: 80
...

View File

@ -1,5 +0,0 @@
#! /bin/sh
rm -rf /usr/share/nginx/html && \
tar xfz /tmp/html.tar.gz -C /usr/share/nginx/ && \
nginx "-g daemon off;"

View File

@ -1,51 +0,0 @@
worker_processes 1;
events {
worker_connections 1024;
}
http {
server {
listen 80;
root /usr/share/nginx/html/home;
server_name home.example.com;
}
server {
listen 80;
root /usr/share/nginx/html/public;
server_name public.example.com;
}
server {
listen 80;
root /usr/share/nginx/html/secure;
server_name secure.example.com;
}
server {
listen 80;
root /usr/share/nginx/html/admin;
server_name admin.example.com;
}
server {
listen 80;
root /usr/share/nginx/html/dev;
server_name dev.example.com;
}
server {
listen 80;
root /usr/share/nginx/html/mail;
server_name mx1.mail.example.com mx2.mail.example.com;
}
server {
listen 80;
root /usr/share/nginx/html/singlefactor;
server_name singlefactor.example.com;
}
}

View File

@ -0,0 +1,138 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-backend
namespace: authelia
labels:
app: nginx-backend
spec:
replicas: 1
selector:
matchLabels:
app: nginx-backend
template:
metadata:
labels:
app: nginx-backend
spec:
containers:
- name: nginx-backend
image: nginx:alpine
ports:
- containerPort: 80
volumeMounts:
- name: nginx-config
mountPath: /etc/nginx/nginx.conf
- name: nginx-html
mountPath: /usr/share/nginx/html
volumes:
- name: nginx-config
hostPath:
path: /configmaps/nginx-backend/nginx.conf
type: File
- name: nginx-html
hostPath:
path: /configmaps/nginx-backend/html
type: Directory
...
---
apiVersion: v1
kind: Service
metadata:
name: nginx-backend-service
namespace: authelia
labels:
app: nginx-backend
spec:
selector:
app: nginx-backend
ports:
- port: 80
name: http
- port: 443
name: https
...
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: nginx-backend-ingress
namespace: authelia
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: authelia-forwardauth-authelia@kubernetescrd
spec:
rules:
- host: home.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nginx-backend-service
port:
number: 80
- host: public.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nginx-backend-service
port:
number: 80
- host: admin.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nginx-backend-service
port:
number: 80
- host: dev.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nginx-backend-service
port:
number: 80
- host: mx1.mail.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nginx-backend-service
port:
number: 80
- host: mx2.mail.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nginx-backend-service
port:
number: 80
- host: singlefactor.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nginx-backend-service
port:
number: 80
...

View File

@ -1,19 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDEzCCAfugAwIBAgIUJZXxXExVQPJhc8TnlD+uAAYHlvwwDQYJKoZIhvcNAQEL
BQAwGDEWMBQGA1UEAwwNKi5leGFtcGxlLmNvbTAgFw0xOTA5MjYyMDAwMTBaGA8y
MTE5MDkwMjIwMDAxMFowGDEWMBQGA1UEAwwNKi5leGFtcGxlLmNvbTCCASIwDQYJ
KoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3DFTAdrxG6iOj5UjSeB5lMjMQQyeYm
OxUvswwwBzmQYPUt0inAJ9QmXJ8i9Fbye8HHYUeqE5zsEfeHir81MiWfhi9oUzJt
u3bmxGLDXYaApejd18hBKITX6MYogmK2lWrl/F9zPYxc2xM/fqWnGg2xwdrMmida
hZjDUfh0rtoz8zqOzJaiiDoFMwNO+NTGmDbeOwBFYOF1OTkS3aJWwJCLZmINUG8h
Z3YPR+SL8CpGGl0xhJYAwXD1AtMlYwAteTILqrqvo2XkGsvuj0mx0w/D0DDpC48g
oSNsRIVTW3Ql3uu+kXDFtkf4I63Ctt85rZk1kX3QtYmS0pRzvmyY/b0CAwEAAaNT
MFEwHQYDVR0OBBYEFMTozK79Kp813+8TstjXRFw1MTE5MB8GA1UdIwQYMBaAFMTo
zK79Kp813+8TstjXRFw1MTE5MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL
BQADggEBALf1bJf3qF3m54+q98E6lSE+34yi/rVdzB9reAW1QzvvqdJRtsfjt39R
SznsbmrvCfK4SLyOj9Uhd8Z6bASPPNsUux1XAGN4AqaGmlYI8b7j3LhKCdRBZQ0I
zWgPhocyWwp5VkFe68zR06NHme/2B6eBRFsdd/69DIOv9YnEGUHk3A/9v1zvolt9
krW57Oz63zWGYXmtPPTD8of/Ya6NKqwonVx1MUQ5QzqH3WySYhRsIYqwUEXm9jt5
GEM3Nx0phEltaOLXa71nqS/Rhg/5Kod0cFaNoSKb6N93I8bqKKTK0m5wMJ5Fisrm
Pw5+AIar7RT5gHU2DD2/OTb9bXXww8I=
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAvcMVMB2vEbqI6PlSNJ4HmUyMxBDJ5iY7FS+zDDAHOZBg9S3S
KcAn1CZcnyL0VvJ7wcdhR6oTnOwR94eKvzUyJZ+GL2hTMm27dubEYsNdhoCl6N3X
yEEohNfoxiiCYraVauX8X3M9jFzbEz9+pacaDbHB2syaJ1qFmMNR+HSu2jPzOo7M
lqKIOgUzA0741MaYNt47AEVg4XU5ORLdolbAkItmYg1QbyFndg9H5IvwKkYaXTGE
lgDBcPUC0yVjAC15Mguquq+jZeQay+6PSbHTD8PQMOkLjyChI2xEhVNbdCXe676R
cMW2R/gjrcK23zmtmTWRfdC1iZLSlHO+bJj9vQIDAQABAoIBAEZvkP/JJOCJwqPn
V3IcbmmilmV4bdi1vByDFgyiDyx4wOSA24+PubjvfFW9XcCgRPuKjDtTj/AhWBHv
B7stfa2lZuNV7/u562mZArA+IAr62Zp0LdIxDV8x3T8gbjVB3HhPYbv0RJZDKTYd
zV6jhfIrVu9mHpoY6ZnodhapCPYIyk/d49KBIHZuAc25CUjMXgTeaVtf0c996036
UxW6ef33wAOJAvW0RCvbXAJfmBeEq2qQlkjTIlpYx71fhZWexHifi8Ouv3Zonc+1
/P2Adq5uzYVBT92f9RKHg9QxxNzVrLjSMaxyvUtWQCAQfW0tFIRdqBGsHYsQrFtI
F4yzv8ECgYEA7ntpyN9HD9Z9lYQzPCR73sFCLM+ID99aVij0wHuxK97bkSyyvkLd
7MyTaym3lg1UEqWNWBCLvFULZx7F0Ah6qCzD4ymm3Bj/ADpWWPgljBI0AFml+HHs
hcATmXUrj5QbLyhiP2gmJjajp1o/rgATx6ED66seSynD6JOH8wUhhZUCgYEAy7OA
06PF8GfseNsTqlDjNF0K7lOqd21S0prdwrsJLiVzUlfMM25MLE0XLDUutCnRheeh
IlcuDoBsVTxz6rkvFGD74N+pgXlN4CicsBq5ofK060PbqCQhSII3fmHobrZ9Cr75
HmBjAxHx998SKaAAGbBbcYGUAp521i1pH5CEPYkCgYEAkUd1Zf0+2RMdZhwm6hh/
rW+l1I6IoMK70YkZsLipccRNld7Y9LbfYwYtODcts6di9AkOVfueZJiaXbONZfIE
Zrb+jkAteh9wGL9xIrnohbABJcV3Kiaco84jInUSmGDtPokncOENfHIEuEpuSJ2b
bx1TuhmAVuGWivR0+ULC7RECgYEAgS0cDRpWc9Xzh9Cl7+PLsXEvdWNpPsL9OsEq
0Ep7z9+/+f/jZtoTRCS/BTHUpDvAuwHglT5j3p5iFMt5VuiIiovWLwynGYwrbnNS
qfrIrYKUaH1n1oDS+oBZYLQGCe9/7EifAjxtjYzbvSyg//SPG7tSwfBCREbpZXj2
qSWkNsECgYA/mCDzCTlrrWPuiepo6kTmN+4TnFA+hJI6NccDVQ+jvbqEdoJ4SW4L
zqfZSZRFJMNpSgIqkQNRPJqMP0jQ5KRtJrjMWBnYxktwKz9fDg2R2MxdFgMF2LH2
HEMMhFHlv8NDjVOXh1KwRoltNGVWYsSrD9wKU9GhRCEfmNCGrvBcEg==
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1,153 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: authelia
namespace: authelia
labels:
app: authelia
spec:
replicas: 1
selector:
matchLabels:
app: authelia
template:
metadata:
labels:
app: authelia
spec:
containers:
- name: authelia
image: authelia:dist
ports:
- containerPort: 443
readinessProbe:
httpGet:
scheme: HTTPS
path: /api/health
port: 443
initialDelaySeconds: 3
periodSeconds: 3
volumeMounts:
- name: authelia-config
mountPath: /config/configuration.yml
readOnly: true
- name: authelia-ssl
mountPath: /config/ssl
readOnly: true
- name: secrets
mountPath: /config/secrets
readOnly: true
env:
# We set secrets directly here for ease of deployment but all secrets
# should be stored in the Kube Vault in production.
- name: AUTHELIA_JWT_SECRET_FILE
value: /config/secrets/jwt_secret
- name: AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE
value: /config/secrets/ldap_password
- name: AUTHELIA_SESSION_SECRET_FILE
value: /config/secrets/session
- name: AUTHELIA_STORAGE_MYSQL_PASSWORD_FILE
value: /config/secrets/sql_password
- name: AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE
value: /config/secrets/encryption_key
- name: ENVIRONMENT
value: dev
volumes:
- name: authelia-config
hostPath:
path: /configmaps/authelia/configuration.yml
type: File
- name: authelia-ssl
hostPath:
path: /configmaps/authelia/ssl
type: Directory
- name: secrets
secret:
secretName: authelia
items:
- key: jwt_secret
path: jwt_secret
- key: session
path: session
- key: sql_password
path: sql_password
- key: ldap_password
path: ldap_password
- key: encryption_key
path: encryption_key
...
---
apiVersion: v1
kind: Service
metadata:
name: authelia-service
namespace: authelia
annotations:
traefik.ingress.kubernetes.io/service.serverstransport: authelia-skipverify@kubernetescrd
spec:
selector:
app: authelia
ports:
- protocol: TCP
port: 443
targetPort: 443
...
---
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: authelia
namespace: authelia
labels:
app: authelia
data:
jwt_secret: YW5fdW5zZWN1cmVfc2VjcmV0 # an_unsecure_secret
ldap_password: cGFzc3dvcmQ= # password
session: dW5zZWN1cmVfcGFzc3dvcmQ= # unsecure_password
sql_password: cGFzc3dvcmQ= # password
encryption_key: YV9ub3Rfc29fc2VjdXJlX2VuY3J5cHRpb25fa2V5
...
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: authelia-ingress
namespace: authelia
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
rules:
- host: login.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: authelia-service
port:
number: 443
...
---
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: forwardauth-authelia
namespace: authelia
labels:
app.kubernetes.io/instance: authelia
app.kubernetes.io/name: authelia
spec:
forwardAuth:
address: https://authelia-service.authelia.svc.cluster.local/api/verify?rd=https://login.example.com:8080
authResponseHeaders:
- Remote-User
- Remote-Name
- Remote-Email
- Remote-Groups
tls:
insecureSkipVerify: true
...

View File

@ -35,6 +35,8 @@ access_control:
rules: rules:
# Rules applied to everyone # Rules applied to everyone
- domain: home.example.com
policy: bypass
- domain: public.example.com - domain: public.example.com
policy: bypass policy: bypass
- domain: secure.example.com - domain: secure.example.com

View File

@ -1,76 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: authelia
namespace: authelia
labels:
app: authelia
spec:
replicas: 1
selector:
matchLabels:
app: authelia
template:
metadata:
labels:
app: authelia
spec:
containers:
- name: authelia
image: authelia:dist
ports:
- containerPort: 443
volumeMounts:
- name: config-volume
mountPath: /config
- name: ssl-volume
mountPath: /config/ssl
- name: secrets
mountPath: /app/secrets
readOnly: true
env:
# We set secrets directly here for ease of deployment but all secrets
# should be stored in the Kube Vault in production.
- name: AUTHELIA_JWT_SECRET_FILE
value: /app/secrets/jwt_secret
- name: AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE
value: /app/secrets/ldap_password
- name: AUTHELIA_SESSION_SECRET_FILE
value: /app/secrets/session
- name: AUTHELIA_STORAGE_MYSQL_PASSWORD_FILE
value: /app/secrets/sql_password
- name: AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE
value: /app/secrets/encryption_key
- name: ENVIRONMENT
value: dev
volumes:
- name: config-volume
configMap:
name: authelia-config
items:
- key: configuration.yml
path: configuration.yml
- name: ssl-volume
configMap:
name: authelia-ssl
items:
- key: cert.pem
path: cert.pem
- key: key.pem
path: key.pem
- name: secrets
secret:
secretName: authelia
items:
- key: jwt_secret
path: jwt_secret
- key: session
path: session
- key: sql_password
path: sql_password
- key: ldap_password
path: ldap_password
- key: encryption_key
path: encryption_key
...

View File

@ -1,23 +0,0 @@
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: authelia-ingress
namespace: authelia
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
tls:
- secretName: authelia-tls
hosts:
- login.example.com
rules:
- host: login.example.com
http:
paths:
- path: /
backend:
serviceName: authelia-service
servicePort: 443
...

View File

@ -1,16 +0,0 @@
---
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: authelia
namespace: authelia
labels:
app: authelia
data:
jwt_secret: YW5fdW5zZWN1cmVfc2VjcmV0 # an_unsecure_secret
ldap_password: cGFzc3dvcmQ= # password
session: dW5zZWN1cmVfcGFzc3dvcmQ= # unsecure_password
sql_password: cGFzc3dvcmQ= # password
encryption_key: YV9ub3Rfc29fc2VjdXJlX2VuY3J5cHRpb25fa2V5
...

View File

@ -1,14 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: authelia-service
namespace: authelia
spec:
selector:
app: authelia
ports:
- protocol: TCP
port: 443
targetPort: 443
...

View File

@ -1,19 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIC/jCCAeagAwIBAgIRAKF0IRxC55eee6icERVf6fgwDQYJKoZIhvcNAQELBQAw
EjEQMA4GA1UEChMHQWNtZSBDbzAgFw0yMDAzMDExMjMzMzlaGA8yMTIwMDIwNjEy
MzMzOVowEjEQMA4GA1UEChMHQWNtZSBDbzCCASIwDQYJKoZIhvcNAQEBBQADggEP
ADCCAQoCggEBAMi7/oSazFIxP3rHsSLjw5XPnpMKEaVwU1zLRzW6W80BDa/ER5to
I3POGLv8lAhtUwB6WvyilrCZfs/D5lkcCxswafU/2LNppFuODnW+PG9eobgOy6Nv
f+KbnZFPRV7PB2yK6DqMyb+tbTQ7F6rEf4i6n28DI0dNyNvUCk0ld3o93LZBvC/D
/+Ulf3Vtdfsd2TckXvdA8lH4VGQJ+FIxhboTlbW8VJlk1V7FZef7+m867kOnPSaj
zv5yygrIA0XPaMAZC/SZrXHMdhvcs43fgmmTel7JD4Sy/Z/pmFlrZr5Xa8jcWycJ
ILLuPnXhgKstgq5wtDkTMZ6rpgMrKcjMKcMCAwEAAaNNMEswDgYDVR0PAQH/BAQD
AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwFgYDVR0RBA8w
DYILZXhhbXBsZS5jb20wDQYJKoZIhvcNAQELBQADggEBABdWkbipzPivAvvamMmQ
5iPPeStfdr5MBxJGT9nPbeXdtS/13FJnspLBMMYOw/2AZk7VFrNjxkXc4NHZSlGz
FcGMlSO40fyirdYaQTDtS230ucLB+LzfZx37y9dKpEKVmQ151kKJjJ4hAZ47LmAQ
aFoDLRo7PA2HmnJ60GrI9wVp96uy1sQ6PcToIyMcVEQ/tLEEow+ykSeiZb9+qBKV
K9GUcu2LorhBtUMmEWs0TJElaf6eKUoG6JXM2byulDg24w5b9gC26kAlHWc5WDU5
pAXOjlN/OYHB0sDbYViWIL390376fYIfu2N5EDKY4QjEYsWEs4Wm9HVS9IgHP/Gi
Xbo=
-----END CERTIFICATE-----

View File

@ -1,28 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDIu/6EmsxSMT96
x7Ei48OVz56TChGlcFNcy0c1ulvNAQ2vxEebaCNzzhi7/JQIbVMAelr8opawmX7P
w+ZZHAsbMGn1P9izaaRbjg51vjxvXqG4Dsujb3/im52RT0Vezwdsiug6jMm/rW00
OxeqxH+Iup9vAyNHTcjb1ApNJXd6Pdy2Qbwvw//lJX91bXX7Hdk3JF73QPJR+FRk
CfhSMYW6E5W1vFSZZNVexWXn+/pvOu5Dpz0mo87+csoKyANFz2jAGQv0ma1xzHYb
3LON34Jpk3peyQ+Esv2f6ZhZa2a+V2vI3FsnCSCy7j514YCrLYKucLQ5EzGeq6YD
KynIzCnDAgMBAAECggEAC13R0LJvRWwyewJZvm8FQTNreEoGq8aLgeKk2p792cLo
gn5ry5n+/+y4q9RmkX+XRpynEE0omUFn09306jDTVCvOpCuEWsxtmR2XJgWqqGfE
Yoa78zo6FJvZNUQ22mKAuh23frFAL1FjsKRz96B+1EA1DPUxhzUZXZFJMAsiE9LZ
PxqPmnqXbPZsOb1XG33TAdCp6CC3H8KHICC+i4IC8prjKHGH/Q1saoNw8jmgwv0S
DelQUbEtqfmE6BmyTGxdeu4uW2Nv/wcENwySAOPKi5gstlbSKTa4IpKGp7CdquWi
stUW6pnSiEeDrDAzwC8uWdncOvnkAy2lRJkz/F9YoQKBgQDrCCqYdvGshecBBnfQ
fowxak2YBfG2jhAKPMHzrvQn5FIb+11x/jeXPEfOB6FShIzZ97JpFIpH3tcONlj3
OVzGCTD6WdRTcltzXVneJtNog7DliNFY4YmIPmQJ+y+EvJW1rSZTZAZI1Nbijg3n
fSd0PTzvgOGHSl1//RI1mFx7MwKBgQDapIPPSF0yf1UJ6Hhzam5NHGZ9fSqV5Qs0
Gi7uM08iDV5K7xiPglBkbN2EuMlgVnHaa5g8X897uwRSYR6nL4PRvcJiNSvnhWhe
+K3x7iHewIPYVfcghoqzuPKsXH2Zm26usdXHxBBa3IBbKtGaHnAd9h65AOUYAmAx
C2BzN90XMQKBgE2MjEFyPZunMulrsOziVG+Zm7ClhXOuvCwkj/pPp8/hzhXdgp+y
ObV09lxMuDX59l+VExEI7fd414yg8gngq3PMZJS2PxCpkvMlwhlCxk6d5ShXVHv3
LuH9dBS3BJ7PerZPQ24QeuJdF+n45S2UZgg8jHaaF9AEAYXRgsicVSdxAoGAJI0U
K/bg/awjv0BJwqGsRt/Ukm32TJC5ysAF0HRrajnp5YULChKy9dbtQV7S63QIHIeY
L5+kw/6DvnHV+gULeGjMsjZJXK8Ev7u6+JLivqZYZDYa1iknztvAVegwZxmA61t3
bantQgNSwerql2U3QQsAH9Vydw0On6RTP2+7WkECgYBWD3u64hBKmAxPkqPotkgI
w/jdOlv8FLHO79+oH1PtKvkzspcYaecKGDm/RNLIXLYnt0AmZEK4qQ4/zDFaR/rc
AhoxK2cKTRltMrhp1ivtFfLggVGogtYNxEnjnsD4KMvH3SjSNdt06YgtZ92++fOp
UsE8Mpf4/G5X7DmcHJHk+w==
-----END PRIVATE KEY-----

View File

@ -1,9 +0,0 @@
#!/bin/sh
start_authelia() {
kubectl create configmap authelia-config --namespace=authelia --from-file=authelia/configs/configuration.yml
kubectl create configmap authelia-ssl --namespace=authelia --from-file=authelia/ssl/cert.pem --from-file=authelia/ssl/key.pem
kubectl apply -f authelia
}
start_authelia

View File

@ -1,11 +0,0 @@
#!/usr/bin/env bash
start_dashboard() {
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta8/aio/deploy/recommended.yaml
kubectl apply -f dashboard.yml
echo "Bearer token for UI user."
kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')
}
start_dashboard

View File

@ -1,42 +0,0 @@
#!/bin/sh
start_apps() {
# Create TLS certificate and key for HTTPS termination
kubectl create secret generic test-app-tls --namespace=authelia --from-file=apps/ssl/server.key --from-file=apps/ssl/server.cert
kubectl create configmap nginx-config --namespace=authelia --from-file=apps/configs/entrypoint.sh --from-file=apps/configs/nginx.conf --from-file=apps/configs/html.tar.gz
# Spawn the applications
kubectl apply -f apps
}
start_ingress_controller() {
kubectl apply -f ingress-controller
}
# Spawn Redis and storage backend
# Please note they are not configured to be distributed on several machines
start_storage() {
kubectl apply -f storage
}
# Create a fake mailbox to catch emails sent by Authelia
start_mail() {
kubectl apply -f mail
}
start_ldap() {
kubectl create configmap ldap-config --namespace=authelia --from-file=ldap/base.ldif --from-file=ldap/access.rules
kubectl apply -f ldap
}
# Create the Authelia namespace in the cluster
create_namespace() {
kubectl apply -f namespace.yml
}
create_namespace
start_storage
start_ldap
start_mail
start_ingress_controller
start_apps

View File

@ -1,21 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
...
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
...

View File

@ -0,0 +1,346 @@
# Kubernetes Dashboard
---
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
...
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
...
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
...
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
...
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
...
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
...
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
...
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] # yamllint disable-line rule:line-length
verbs: ["get"]
...
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
...
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
...
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
...
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.7.0
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
...
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
...
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.8
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
...
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
...
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
...
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRouteTCP
metadata:
name: kubernetes-dashboard-ingress
namespace: kubernetes-dashboard
spec:
entryPoints:
- websecure
routes:
- match: HostSNI(`kubernetes.example.com`)
services:
- name: kubernetes-dashboard
port: 443
tls:
passthrough: true
...
# Traefik Dashboard
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: traefik-dashboard-ingress
namespace: authelia
spec:
entryPoints:
- websecure
routes:
- match: Host(`traefik.example.com`)
kind: Rule
services:
- name: api@internal
kind: TraefikService
...
---
apiVersion: traefik.containo.us/v1alpha1
kind: ServersTransport
metadata:
name: skipverify
namespace: authelia
spec:
insecureSkipVerify: true
...

View File

@ -1,45 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-ingress-controller
namespace: authelia
labels:
app: nginx-ingress-controller
spec:
replicas: 1
revisionHistoryLimit: 0
selector:
matchLabels:
app: nginx-ingress-controller
template:
metadata:
labels:
app: nginx-ingress-controller
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
terminationGracePeriodSeconds: 60
serviceAccountName: nginx-ingress-controller-serviceaccount
containers:
- image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.23.0
name: nginx-ingress-controller
imagePullPolicy: Always
ports:
- containerPort: 80
- containerPort: 443
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
args:
- /nginx-ingress-controller
- --ingress-class=nginx
- --election-id=ingress-controller-leader
...

View File

@ -1,141 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nginx-ingress-controller-serviceaccount
namespace: authelia
labels:
app: nginx-ingress-controller
...
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: nginx-ingress-controller-clusterrole
labels:
app: nginx-ingress-controller
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- "extensions"
resources:
- ingresses/status
verbs:
- update
...
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: nginx-ingress-controller-role
namespace: authelia
labels:
app: nginx-ingress-controller
rules:
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
# Defaults to "<election-id>-<ingress-class>"
# Here: "<ingress-controller-leader>-<nginx>"
# This has to be adapted if you change either parameter
# when launching the nginx-ingress-controller.
- "ingress-controller-leader-nginx"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
...
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: nginx-ingress-controller-role-nisa-binding
namespace: authelia
labels:
app: nginx-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-ingress-controller-role
subjects:
- kind: ServiceAccount
name: nginx-ingress-controller-serviceaccount
namespace: authelia
...
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: nginx-ingress-controller-clusterrole-nisa-binding
labels:
app: nginx-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-ingress-controller-clusterrole
subjects:
- kind: ServiceAccount
name: nginx-ingress-controller-serviceaccount
namespace: authelia
...

View File

@ -1,18 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: nginx-ingress-controller-service
namespace: authelia
labels:
app: nginx-ingress-controller
spec:
selector:
app: nginx-ingress-controller
type: NodePort
ports:
- port: 80
name: http
- port: 443
name: https
...

View File

@ -1,7 +0,0 @@
olcAccess: {0}to attrs=userPassword,shadowLastChange by self write by anonymou
s auth by * none
# olcAccess: {1}to dn.base="" by * read
# olcAccess: {2}to * by * read
olcPasswordHash: {CRYPT}
olcPasswordCryptSaltFormat: $6$rounds=50000$%.16s

View File

@ -1,67 +0,0 @@
dn: ou=groups,dc=example,dc=com
objectClass: organizationalUnit
objectClass: top
ou: groups
dn: ou=users,dc=example,dc=com
objectClass: organizationalUnit
objectClass: top
ou: users
dn: cn=dev,ou=groups,dc=example,dc=com
cn: dev
member: uid=john,ou=users,dc=example,dc=com
member: uid=bob,ou=users,dc=example,dc=com
objectClass: groupOfNames
objectClass: top
dn: cn=admins,ou=groups,dc=example,dc=com
cn: admins
member: uid=john,ou=users,dc=example,dc=com
objectClass: groupOfNames
objectClass: top
dn: uid=john,ou=users,dc=example,dc=com
uid: john
cn: john
objectClass: inetOrgPerson
objectClass: top
mail: john.doe@authelia.com
sn: John Doe
userPassword: {CRYPT}$6$rounds=500000$jgiCMRyGXzoqpxS3$w2pJeZnnH8bwW3zzvoMWtTRfQYsHbWbD/hquuQ5vUeIyl9gdwBIt6RWk2S6afBA0DPakbeWgD/4SZPiS0hYtU/
dn: uid=harry,ou=users,dc=example,dc=com
uid: harry
cn: harry
objectClass: inetOrgPerson
objectClass: top
mail: harry.potter@authelia.com
sn: Harry Potter
userPassword: {CRYPT}$6$rounds=500000$jgiCMRyGXzoqpxS3$w2pJeZnnH8bwW3zzvoMWtTRfQYsHbWbD/hquuQ5vUeIyl9gdwBIt6RWk2S6afBA0DPakbeWgD/4SZPiS0hYtU/
dn: uid=bob,ou=users,dc=example,dc=com
uid: bob
cn: bob
objectClass: inetOrgPerson
objectClass: top
mail: bob.dylan@authelia.com
sn: Bob Dylan
userPassword: {CRYPT}$6$rounds=500000$jgiCMRyGXzoqpxS3$w2pJeZnnH8bwW3zzvoMWtTRfQYsHbWbD/hquuQ5vUeIyl9gdwBIt6RWk2S6afBA0DPakbeWgD/4SZPiS0hYtU/
dn: uid=james,ou=users,dc=example,dc=com
uid: james
cn: james
objectClass: inetOrgPerson
objectClass: top
mail: james.dean@authelia.com
sn: James Dean
userPassword: {CRYPT}$6$rounds=500000$jgiCMRyGXzoqpxS3$w2pJeZnnH8bwW3zzvoMWtTRfQYsHbWbD/hquuQ5vUeIyl9gdwBIt6RWk2S6afBA0DPakbeWgD/4SZPiS0hYtU/
dn: uid=blackhat,ou=users,dc=example,dc=com
uid: blackhat
cn: blackhat
objectClass: inetOrgPerson
objectClass: top
mail: billy.blackhat@authelia.com
sn: Billy BlackHat
userPassword: {CRYPT}$6$rounds=500000$jgiCMRyGXzoqpxS3$w2pJeZnnH8bwW3zzvoMWtTRfQYsHbWbD/hquuQ5vUeIyl9gdwBIt6RWk2S6afBA0DPakbeWgD/4SZPiS0hYtU/

View File

@ -41,19 +41,24 @@ spec:
- name: LDAP_TLS_VERIFY_CLIENT - name: LDAP_TLS_VERIFY_CLIENT
value: try value: try
volumeMounts: volumeMounts:
- name: config-volume - name: ldap-config
mountPath: /container/service/slapd/assets/config/bootstrap/ldif/custom/base.ldif mountPath: /container/service/slapd/assets/config/bootstrap/ldif/custom
subPath: base.ldif
- name: config-volume
mountPath: /container/service/slapd/assets/config/bootstrap/ldif/custom/access.rules
subPath: access.rules
volumes: volumes:
- name: config-volume - name: ldap-config
configMap: hostPath:
name: ldap-config path: /configmaps/ldap
items: type: Directory
- key: base.ldif ...
path: base.ldif ---
- key: access.rules apiVersion: v1
path: access.rules kind: Service
metadata:
name: ldap-service
namespace: authelia
spec:
selector:
app: ldap
ports:
- protocol: TCP
port: 636
... ...

View File

@ -1,13 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: ldap-service
namespace: authelia
spec:
selector:
app: ldap
ports:
- protocol: TCP
port: 636
...

View File

@ -1,25 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mailcatcher
namespace: authelia
labels:
app: mailcatcher
spec:
replicas: 1
selector:
matchLabels:
app: mailcatcher
template:
metadata:
labels:
app: mailcatcher
spec:
containers:
- name: mailcatcher
image: schickling/mailcatcher
ports:
- containerPort: 1025
- containerPort: 1080
...

View File

@ -1,22 +0,0 @@
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: mailcatcher-ingress
namespace: authelia
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
tls:
- secretName: mail-tls
hosts:
- mail.example.com
rules:
- host: mail.example.com
http:
paths:
- path: /
backend:
serviceName: mailcatcher-service
servicePort: 1080
...

View File

@ -0,0 +1,64 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mailcatcher
namespace: authelia
labels:
app: mailcatcher
spec:
replicas: 1
selector:
matchLabels:
app: mailcatcher
template:
metadata:
labels:
app: mailcatcher
spec:
containers:
- name: mailcatcher
image: schickling/mailcatcher
ports:
- containerPort: 1025
- containerPort: 1080
...
---
apiVersion: v1
kind: Service
metadata:
name: mailcatcher-service
namespace: authelia
spec:
selector:
app: mailcatcher
ports:
- protocol: TCP
port: 1080
name: ui
- protocol: TCP
port: 1025
name: smtp
...
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mailcatcher-ingress
namespace: authelia
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
rules:
- host: mail.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: mailcatcher-service
port:
number: 1080
...

View File

@ -1,17 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: mailcatcher-service
namespace: authelia
spec:
selector:
app: mailcatcher
ports:
- protocol: TCP
port: 1080
name: ui
- protocol: TCP
port: 1025
name: smtp
...

View File

@ -0,0 +1,6 @@
---
mirrors:
"docker.io":
endpoint:
- http://registrycache.internal:5000
...

View File

@ -21,9 +21,11 @@ spec:
image: mariadb:10.4.10 image: mariadb:10.4.10
ports: ports:
- containerPort: 3306 - containerPort: 3306
readinessProbe:
tcpSocket:
port: 3306
periodSeconds: 1
env: env:
- name: SLAPD_ORGANISATION
value: MyCompany
- name: MYSQL_ROOT_PASSWORD - name: MYSQL_ROOT_PASSWORD
value: rootpassword value: rootpassword
- name: MYSQL_USER - name: MYSQL_USER
@ -32,13 +34,6 @@ spec:
value: password value: password
- name: MYSQL_DATABASE - name: MYSQL_DATABASE
value: authelia value: authelia
volumeMounts:
- name: data-volume
mountPath: /var/lib/mysql
volumes:
- name: data-volume
hostPath:
path: /data/storage/mysql
... ...
--- ---
apiVersion: v1 apiVersion: v1

View File

@ -21,6 +21,10 @@ spec:
image: redis:3.2.11-alpine image: redis:3.2.11-alpine
ports: ports:
- containerPort: 6379 - containerPort: 6379
readinessProbe:
tcpSocket:
port: 6379
periodSeconds: 1
... ...
--- ---
apiVersion: v1 apiVersion: v1

View File

@ -1,23 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: test-app1
namespace: kube-public
labels:
app: test-app1
spec:
replicas: 1
selector:
matchLabels:
app: test-app1
template:
metadata:
labels:
app: test-app1
spec:
containers:
- name: test-app1
image: authelia/authelia:kube
imagePullPolicy: Never
...

View File

@ -1,53 +0,0 @@
---
version: '3.4'
services:
authelia:
image: authelia/authelia:latest
# Used for Docker configs
configs:
- source: authelia
target: /config/configuration.yml
uid: '0'
gid: '0'
mode: 0444 # yamllint disable-line rule:octal-values
environment:
- NODE_TLS_REJECT_UNAUTHORIZED=0
# Where the authelia volume is to be mounted. To only use a single volume, the minimal config
# needs to be changed to read the users_database.yml also from this subdirectory.
# Otherwise a second volume will need to be configured here to mount the users_database.yml.
volumes:
- authelia:/config/storage
networks:
- overlay
deploy:
# Configure Authelia to automatically restart on failure.
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
# Mode: global would start authelia on all available nodes,
# replicated limits it to how many replicas are configured.
mode: replicated
# How many replicas are wanted. Can be any number >0 up to however many nodes are available.
replicas: 1
placement:
constraints:
- node.role == worker
# The volume for authelia needs to be configured.
# There are many drivers available. Such as local storage, ceph-rdb, nfs, cifs etc.
volumes:
authelia:
driver: default
name: volume-authelia
networks:
overlay:
external: true
# This is needed if Docker configs are being used to provide Authelia with its configuration.
configs:
authelia:
external: true
...

View File

@ -3,53 +3,41 @@ package suites
import ( import (
"fmt" "fmt"
"os/exec" "os/exec"
"regexp"
"strings" "strings"
"time" "time"
"github.com/authelia/authelia/v4/internal/utils" "github.com/authelia/authelia/v4/internal/utils"
) )
var kindImageName = "authelia-kind-proxy" var k3dImageName = "k3d"
var dockerCmdLine = fmt.Sprintf("docker-compose -p authelia -f internal/suites/docker-compose.yml -f internal/suites/example/compose/kind/docker-compose.yml run -T --rm %s", kindImageName) var dockerCmdLine = fmt.Sprintf("docker-compose -p authelia -f internal/suites/docker-compose.yml -f internal/suites/example/compose/k3d/docker-compose.yml exec -T %s", k3dImageName)
// Kind used for running kind commands. // K3D used for running kind commands.
type Kind struct{} type K3D struct{}
func kindCommand(cmdline string) *exec.Cmd { func k3dCommand(cmdline string) *exec.Cmd {
cmd := fmt.Sprintf("%s %s", dockerCmdLine, cmdline) cmd := fmt.Sprintf("%s %s", dockerCmdLine, cmdline)
return utils.Shell(cmd) return utils.Shell(cmd)
} }
// CreateCluster create a new Kubernetes cluster. // CreateCluster create a new Kubernetes cluster.
func (k Kind) CreateCluster() error { func (k K3D) CreateCluster() error {
cmd := kindCommand("kind create cluster --config /etc/kind/config.yml") cmd := k3dCommand("k3d cluster create --registry-config /authelia/registry.yml -v /authelia:/var/lib/rancher/k3s/server/manifests/custom -v /configmaps:/configmaps -p 8080:443")
if err := cmd.Run(); err != nil {
return err
}
cmd = kindCommand("patch-kubeconfig.sh")
if err := cmd.Run(); err != nil {
return err
}
// This command is necessary to fix the coredns loop detected when using user-defined docker network.
// In that case /etc/resolv.conf use 127.0.0.11 as DNS and CoreDNS thinks it is talking to itself which is wrong.
// This IP is the docker internal DNS so it is safe to disable the loop check.
cmd = kindCommand("sh -c 'kubectl -n kube-system get configmap/coredns -o yaml | grep -v loop | kubectl replace -f -'")
err := cmd.Run() err := cmd.Run()
return err return err
} }
// DeleteCluster delete a Kubernetes cluster. // DeleteCluster delete a Kubernetes cluster.
func (k Kind) DeleteCluster() error { func (k K3D) DeleteCluster() error {
cmd := kindCommand("kind delete cluster") cmd := k3dCommand("k3d cluster delete")
return cmd.Run() return cmd.Run()
} }
// ClusterExists check whether a cluster exists. // ClusterExists check whether a cluster exists.
func (k Kind) ClusterExists() (bool, error) { func (k K3D) ClusterExists() (bool, error) {
cmd := kindCommand("kind get clusters") cmd := k3dCommand("k3d cluster list")
cmd.Stdout = nil cmd.Stdout = nil
cmd.Stderr = nil cmd.Stderr = nil
output, err := cmd.Output() output, err := cmd.Output()
@ -58,63 +46,27 @@ func (k Kind) ClusterExists() (bool, error) {
return false, err return false, err
} }
return strings.Contains(string(output), "kind"), nil return strings.Contains(string(output), "k3s-default"), nil
} }
// LoadImage load an image in the Kubernetes container. // LoadImage load an image in the Kubernetes container.
func (k Kind) LoadImage(imageName string) error { func (k K3D) LoadImage(imageName string) error {
cmd := kindCommand(fmt.Sprintf("kind load docker-image %s", imageName)) cmd := k3dCommand(fmt.Sprintf("k3d image import %s", imageName))
return cmd.Run() return cmd.Run()
} }
// Kubectl used for running kubectl commands. // Kubectl used for running kubectl commands.
type Kubectl struct{} type Kubectl struct{}
// StartProxy start a proxy. // GetDashboardToken generates bearer token for Kube Dashboard.
func (k Kubectl) StartProxy() error { func (k Kubectl) GetDashboardToken() error {
cmd := utils.Shell("docker-compose -p authelia -f internal/suites/docker-compose.yml -f internal/suites/example/compose/kind/docker-compose.yml up -d authelia-kind-proxy") return k3dCommand("kubectl -n kubernetes-dashboard create token admin-user;echo ''").Run()
return cmd.Run()
}
// StopProxy stop a proxy.
func (k Kubectl) StopProxy() error {
cmd := utils.Shell("docker-compose -p authelia -f internal/suites/docker-compose.yml -f internal/suites/example/compose/kind/docker-compose.yml rm -s -f authelia-kind-proxy")
return cmd.Run()
}
// StartDashboard start Kube dashboard.
func (k Kubectl) StartDashboard() error {
if err := kindCommand("sh -c 'cd /authelia && ./bootstrap-dashboard.sh'").Run(); err != nil {
return err
}
err := utils.Shell("docker-compose -p authelia -f internal/suites/docker-compose.yml -f internal/suites/example/compose/kind/docker-compose.yml up -d kube-dashboard").Run()
return err
}
// StopDashboard stop kube dashboard.
func (k Kubectl) StopDashboard() error {
cmd := utils.Shell("docker-compose -p authelia -f internal/suites/docker-compose.yml -f internal/suites/example/compose/kind/docker-compose.yml rm -s -f kube-dashboard")
return cmd.Run()
}
// DeployThirdparties deploy thirdparty services (ldap, db, ingress controllers, etc...).
func (k Kubectl) DeployThirdparties() error {
cmd := kindCommand("sh -c 'cd /authelia && ./bootstrap.sh'")
return cmd.Run()
}
// DeployAuthelia deploy Authelia application.
func (k Kubectl) DeployAuthelia() error {
cmd := kindCommand("sh -c 'cd /authelia && ./bootstrap-authelia.sh'")
return cmd.Run()
} }
// WaitPodsReady wait for all pods to be ready. // WaitPodsReady wait for all pods to be ready.
func (k Kubectl) WaitPodsReady(timeout time.Duration) error { func (k Kubectl) WaitPodsReady(namespace string, timeout time.Duration) error {
return utils.CheckUntil(5*time.Second, timeout, func() (bool, error) { return utils.CheckUntil(5*time.Second, timeout, func() (bool, error) {
cmd := kindCommand("kubectl get -n authelia pods --no-headers") cmd := k3dCommand(fmt.Sprintf("kubectl get -n %s pods --no-headers --field-selector=status.phase!=Succeeded", namespace))
cmd.Stdout = nil cmd.Stdout = nil
cmd.Stderr = nil cmd.Stderr = nil
output, _ := cmd.Output() output, _ := cmd.Output()
@ -129,10 +81,12 @@ func (k Kubectl) WaitPodsReady(timeout time.Duration) error {
} }
for _, line := range nonEmptyLines { for _, line := range nonEmptyLines {
if !strings.Contains(line, "1/1") { re := regexp.MustCompile(`1/1|2/2`)
if !re.MatchString(line) {
return false, nil return false, nil
} }
} }
return true, nil return true, nil
}) })
} }

View File

@ -1,7 +1,6 @@
package suites package suites
import ( import (
"fmt"
"os" "os"
"time" "time"
@ -13,94 +12,87 @@ import (
var kubernetesSuiteName = "Kubernetes" var kubernetesSuiteName = "Kubernetes"
func init() { func init() {
kind := Kind{} dockerEnvironment := NewDockerEnvironment([]string{
"internal/suites/docker-compose.yml",
"internal/suites/example/compose/k3d/docker-compose.yml",
})
k3d := K3D{}
kubectl := Kubectl{} kubectl := Kubectl{}
setup := func(suitePath string) error { setup := func(suitePath string) error {
cmd := utils.Shell("docker-compose -p authelia -f internal/suites/docker-compose.yml -f internal/suites/example/compose/kind/docker-compose.yml build") if err := dockerEnvironment.Up(); err != nil {
if err := cmd.Run(); err != nil {
return err return err
} }
exists, err := kind.ClusterExists() err := waitUntilK3DIsReady(dockerEnvironment)
if err != nil {
return err
}
exists, err := k3d.ClusterExists()
if err != nil { if err != nil {
return err return err
} }
if exists { if exists {
log.Debug("Kubernetes cluster already exists") log.Info("Kubernetes cluster already exists")
} else { } else {
err = kind.CreateCluster() err = k3d.CreateCluster()
if err != nil { if err != nil {
return err return err
} }
} }
log.Debug("Building authelia:dist image or use cache if already built...") log.Info("Building authelia:dist image or use cache if already built...")
if os.Getenv("CI") != t { if os.Getenv("CI") != t {
if err := utils.Shell("authelia-scripts docker build").Run(); err != nil { if err := utils.Shell("authelia-scripts docker build").Run(); err != nil {
return err return err
} }
if err := utils.Shell("docker save authelia:dist -o internal/suites/example/kube/authelia-image-dev.tar").Run(); err != nil {
return err
}
} }
log.Debug("Loading images into Kubernetes container...") log.Info("Loading images into Kubernetes container...")
if err := loadDockerImages(); err != nil { if err := loadDockerImages(); err != nil {
return err return err
} }
log.Debug("Starting Kubernetes dashboard...") log.Info("Waiting for cluster to be ready...")
if err := kubectl.StartDashboard(); err != nil { if err := waitAllPodsAreReady(namespaceKube, 5*time.Minute); err != nil {
return err return err
} }
log.Debug("Deploying thirdparties...") log.Info("Waiting for dashboard to be ready...")
if err := kubectl.DeployThirdparties(); err != nil { err = waitAllPodsAreReady(namespaceDashboard, 2*time.Minute)
log.Info("Bearer token for UI user:")
if err := kubectl.GetDashboardToken(); err != nil {
return err return err
} }
log.Debug("Waiting for services to be ready...") log.Info("Waiting for services to be ready...")
if err := waitAllPodsAreReady(5 * time.Minute); err != nil { if err := waitAllPodsAreReady(namespaceAuthelia, 5*time.Minute); err != nil {
return err return err
} }
log.Debug("Deploying Authelia...")
if err = kubectl.DeployAuthelia(); err != nil {
return err
}
log.Debug("Waiting for services to be ready...")
if err := waitAllPodsAreReady(2 * time.Minute); err != nil {
return err
}
log.Debug("Starting proxy...")
err = kubectl.StartProxy()
return err return err
} }
teardown := func(suitePath string) error { teardown := func(suitePath string) error {
err := kubectl.StopDashboard() if err := k3d.DeleteCluster(); err != nil {
if err != nil { return err
log.Errorf("Unable to stop Kubernetes dashboard: %s", err)
} }
err = kubectl.StopProxy() return dockerEnvironment.Down()
if err != nil {
log.Errorf("Unable to stop Kind proxy: %s", err)
}
return kind.DeleteCluster()
} }
GlobalRegistry.Register(kubernetesSuiteName, Suite{ GlobalRegistry.Register(kubernetesSuiteName, Suite{
@ -109,16 +101,20 @@ func init() {
TestTimeout: 2 * time.Minute, TestTimeout: 2 * time.Minute,
TearDown: teardown, TearDown: teardown,
TearDownTimeout: 2 * time.Minute, TearDownTimeout: 2 * time.Minute,
Description: "This suite has been created to test Authelia in a Kubernetes context and using nginx as the ingress controller.", Description: "This suite has been created to test Authelia in a Kubernetes context and using Traefik as the ingress controller.",
}) })
} }
func loadDockerImages() error { func loadDockerImages() error {
kind := Kind{} k3d := K3D{}
images := []string{"authelia:dist"} images := []string{"/authelia/authelia-image-coverage.tar"}
if os.Getenv("CI") != t {
images = []string{"/authelia/authelia-image-dev.tar"}
}
for _, image := range images { for _, image := range images {
err := kind.LoadImage(image) err := k3d.LoadImage(image)
if err != nil { if err != nil {
return err return err
@ -128,17 +124,16 @@ func loadDockerImages() error {
return nil return nil
} }
func waitAllPodsAreReady(timeout time.Duration) error { func waitAllPodsAreReady(namespace string, timeout time.Duration) error {
kubectl := Kubectl{} kubectl := Kubectl{}
// Wait in case the deployment has just been done and some services do not appear in kubectl logs.
time.Sleep(1 * time.Second)
fmt.Println("Check services are running")
if err := kubectl.WaitPodsReady(timeout); err != nil { log.Infof("Checking services in %s namespace are running...", namespace)
if err := kubectl.WaitPodsReady(namespace, timeout); err != nil {
return err return err
} }
fmt.Println("All pods are ready") log.Info("All pods are ready")
return nil return nil
} }