Merge remote tracking branch 'origin/master' into feat-settings-ui
commit
133f1626ab
|
@ -1,7 +1,7 @@
|
|||
# ===================================
|
||||
# ===== Authelia official image =====
|
||||
# ===================================
|
||||
FROM alpine:3.16.3
|
||||
FROM alpine:3.17.0
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
|
|
@ -46,7 +46,7 @@ CGO_ENABLED=1 CGO_CPPFLAGS="-D_FORTIFY_SOURCE=2 -fstack-protector-strong" CGO_LD
|
|||
# ===================================
|
||||
# ===== Authelia official image =====
|
||||
# ===================================
|
||||
FROM alpine:3.16.3
|
||||
FROM alpine:3.17.0
|
||||
|
||||
RUN apk --no-cache add ca-certificates tzdata
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ CGO_ENABLED=1 CGO_CPPFLAGS="-D_FORTIFY_SOURCE=2 -fstack-protector-strong" CGO_LD
|
|||
# ===================================
|
||||
# ===== Authelia official image =====
|
||||
# ===================================
|
||||
FROM alpine:3.16.3
|
||||
FROM alpine:3.17.0
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
|
|
@ -863,7 +863,8 @@ regulation:
|
|||
## The available providers are: `local`, `mysql`, `postgres`. You must use one and only one of these providers.
|
||||
# storage:
|
||||
## The encryption key that is used to encrypt sensitive information in the database. Must be a string with a minimum
|
||||
## length of 20. Please see the docs if you configure this with an undesirable key and need to change it.
|
||||
## length of 20. Please see the docs if you configure this with an undesirable key and need to change it, you MUST use
|
||||
## the CLI to change this in the database if you want to change it from a previously configured value.
|
||||
# encryption_key: you_must_generate_a_random_string_of_more_than_twenty_chars_and_configure_this
|
||||
|
||||
##
|
||||
|
|
|
@ -542,14 +542,14 @@ if they have a path of exactly `/api` or if they start with `/api/`. This means
|
|||
a match for that request.
|
||||
|
||||
```yaml
|
||||
- domains:
|
||||
- domain:
|
||||
- 'example.com'
|
||||
- '*.example.com'
|
||||
policy: bypass
|
||||
resources:
|
||||
- '^/api$'
|
||||
- '^/api/'
|
||||
- domains:
|
||||
- domain:
|
||||
- 'app.example.com'
|
||||
policy: two_factor
|
||||
```
|
||||
|
|
|
@ -28,7 +28,7 @@ this instance if you wanted to downgrade to pre1 you would need to use an Authel
|
|||
|
||||
| Schema Version | Authelia Version | Notes |
|
||||
|:--------------:|:----------------:|:--------------------------------------------------------------------------------------------------:|
|
||||
| pre1 | 4.0.0 | Downgrading to this version requires you use the --pre1 flag |
|
||||
| pre1 | 4.0.0 | Downgrading to this version requires you use the --pre1 flag on Authelia 4.37.2 |
|
||||
| 1 | 4.33.0 | Initial migration managed version |
|
||||
| 2 | 4.34.0 | WebAuthn - added webauthn_devices table, altered totp_config to include device created/used dates |
|
||||
| 3 | 4.34.2 | WebAuthn - fix V2 migration kid column length and provide migration path for anyone on V2 |
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
title: "Database Schema"
|
||||
description: "Authelia Development Database Schema Guidelines"
|
||||
lead: "This section covers the database schema guidelines we use for development."
|
||||
date: 2022-11-09T09:20:18+11:00
|
||||
date: 2022-11-19T16:47:09+11:00
|
||||
draft: false
|
||||
images: []
|
||||
menu:
|
||||
|
|
|
@ -35,7 +35,7 @@ bootstrapping *Authelia*.
|
|||
|
||||
### Standalone Example
|
||||
|
||||
The following is an examples are [Docker Compose] deployments with just *Authelia* and no bundled applications or
|
||||
The following examples are [Docker Compose] deployments with just *Authelia* and no bundled applications or
|
||||
proxies.
|
||||
|
||||
It expects the following:
|
||||
|
|
|
@ -387,6 +387,7 @@ location /authelia {
|
|||
## Headers
|
||||
## The headers starting with X-* are required.
|
||||
proxy_set_header X-Original-URL $scheme://$http_host$request_uri;
|
||||
proxy_set_header X-Original-Method $request_method;
|
||||
proxy_set_header X-Forwarded-Method $request_method;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Host $http_host;
|
||||
|
@ -470,6 +471,7 @@ location /authelia-basic {
|
|||
## Headers
|
||||
## The headers starting with X-* are required.
|
||||
proxy_set_header X-Original-URL $scheme://$http_host$request_uri;
|
||||
proxy_set_header X-Original-Method $request_method;
|
||||
proxy_set_header X-Forwarded-Method $request_method;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Host $http_host;
|
||||
|
|
|
@ -16,27 +16,27 @@ aliases:
|
|||
---
|
||||
|
||||
| Proxy | [Standard](#standard) | [Kubernetes](#kubernetes) | [XHR Redirect](#xhr-redirect) | [Request Method](#request-method) |
|
||||
|:---------------------:|:----------------------------------------------------------------:|:------------------------------------------------------------------------------------:|:-----------------------------------------------------:|:-----------------------------------------------------:|
|
||||
| [Traefik] | [<i class="icon-support-full"></i>](traefik.md) | [<i class="icon-support-full"></i>](../../integration/kubernetes/traefik-ingress.md) | <i class="icon-support-full"></i> | <i class="icon-support-full"></i> |
|
||||
| [NGINX] | [<i class="icon-support-full"></i>](nginx.md) | [<i class="icon-support-full"></i>](../../integration/kubernetes/nginx-ingress.md) | <i class="icon-support-none" alt="Not Supported"></i> | <i class="icon-support-full"></i> |
|
||||
| [NGINX Proxy Manager] | [<i class="icon-support-full"></i>](nginx-proxy-manager.md) | <i class="icon-support-unknown"></i> | <i class="icon-support-none" alt="Not Supported"></i> | <i class="icon-support-full"></i> |
|
||||
| [SWAG] | [<i class="icon-support-full"></i>](swag.md) | <i class="icon-support-unknown"></i> | <i class="icon-support-none" alt="Not Supported"></i> | <i class="icon-support-full"></i> |
|
||||
| [HAProxy] | [<i class="icon-support-full"></i>](haproxy.md) | <i class="icon-support-unknown"></i> | <i class="icon-support-unknown"></i> | <i class="icon-support-full"></i> |
|
||||
| [Caddy] | [<i class="icon-support-full"></i>](caddy.md) | <i class="icon-support-unknown"></i> | <i class="icon-support-full"></i> | <i class="icon-support-full"></i> |
|
||||
| [Traefik] 1.x | [<i class="icon-support-full"></i>](traefikv1.md) | <i class="icon-support-unknown"></i> | <i class="icon-support-full"></i> | <i class="icon-support-full"></i> |
|
||||
| [Envoy] | [<i class="icon-support-full"></i>](envoy.md) | [<i class="icon-support-full"></i>](../../integration/kubernetes/istio.md) | <i class="icon-support-unknown"></i> | <i class="icon-support-full"></i> |
|
||||
| [Skipper] | [<i class="icon-support-full"></i>](skipper.md) | <i class="icon-support-unknown"></i> | <i class="icon-support-unknown"></i> | <i class="icon-support-unknown"></i> |
|
||||
| [Apache] | [<i class="icon-support-none" alt="Not Supported"></i>](#apache) | <i class="icon-support-none" alt="Not Supported"></i> | <i class="icon-support-none" alt="Not Supported"></i> | <i class="icon-support-none" alt="Not Supported"></i> |
|
||||
| [IIS] | [<i class="icon-support-none" alt="Not Supported"></i>](#iis) | <i class="icon-support-none" alt="Not Supported"></i> | <i class="icon-support-none" alt="Not Supported"></i> | <i class="icon-support-none" alt="Not Supported"></i> |
|
||||
|:---------------------:|:------------------------------------------------------------------:|:-------------------------------------------------------------------------------------:|:---------------------------------:|:---------------------------------:|
|
||||
| [Traefik] | {{% support support="full" link="traefik.md" %}} | {{% support support="full" link="../../integration/kubernetes/traefik-ingress.md" %}} | {{% support support="full" %}} | {{% support support="full" %}} |
|
||||
| [Caddy] | {{% support support="full" link="caddy.md" %}} | {{% support support="unknown" %}} | {{% support support="full" %}} | {{% support support="full" %}} |
|
||||
| [Envoy] | {{% support support="full" link="envoy.md" %}} | {{% support support="full" link="../../integration/kubernetes/istio.md" %}} | {{% support support="unknown" %}} | {{% support support="full" %}} |
|
||||
| [NGINX] | {{% support support="full" link="nginx.md" %}} | {{% support support="full" link="../../integration/kubernetes/nginx-ingress.md" %}} | {{% support %}} | {{% support support="full" %}} |
|
||||
| [NGINX Proxy Manager] | {{% support support="full" link="nginx-proxy-manager/index.md" %}} | {{% support support="unknown" %}} | {{% support %}} | {{% support support="full" %}} |
|
||||
| [SWAG] | {{% support support="full" link="swag.md" %}} | {{% support support="unknown" %}} | {{% support %}} | {{% support support="full" %}} |
|
||||
| [HAProxy] | {{% support support="full" link="haproxy.md" %}} | {{% support support="unknown" %}} | {{% support support="unknown" %}} | {{% support support="full" %}} |
|
||||
| [Skipper] | {{% support support="full" link="skipper.md" %}} | {{% support support="unknown" %}} | {{% support support="unknown" %}} | {{% support support="unknown" %}} |
|
||||
| [Traefik] 1.x | {{% support support="full" link="traefikv1.md" %}} | {{% support support="unknown" %}} | {{% support support="full" %}} | {{% support support="full" %}} |
|
||||
| [Apache] | {{% support link="#apache" %}} | {{% support %}} | {{% support %}} | {{% support %}} |
|
||||
| [IIS] | {{% support link="#iis" %}} | {{% support %}} | {{% support %}} | {{% support %}} |
|
||||
|
||||
Legend:
|
||||
|
||||
| Icon | Meaning |
|
||||
|:------------------------------------:|:-------------------:|
|
||||
| <i class="icon-support-full"></i> | Supported |
|
||||
| <i class="icon-support-unknown"></i> | Unknown |
|
||||
| <i class="icon-support-partial"></i> | Partially Supported |
|
||||
| <i class="icon-support-none"></i> | Not Supported |
|
||||
|:---------------------------------:|:-------------------:|
|
||||
| {{% support support="full" %}} | Supported |
|
||||
| {{% support support="unknown" %}} | Unknown |
|
||||
| {{% support support="partial" %}} | Partially Supported |
|
||||
| {{% support support="none" %}} | Not Supported |
|
||||
|
||||
## Support
|
||||
|
||||
|
|
|
@ -49,9 +49,15 @@ One current caveat of the [SWAG] implementation is that it serves Authelia as a
|
|||
This is partly because Webauthn requires that the domain is an exact match when registering and authenticating and it is
|
||||
possible that due to web standards this will never change.
|
||||
|
||||
In addition this represents a bad user experience in some instances as users sometimes visit the
|
||||
`https://app.example.com/auth` URL which doesn't automatically redirect the user to `https://app.example.com` (if they
|
||||
visit `https://app.example.com` then they'll be redirected to authenticate then redirected back to their original URL).
|
||||
In addition this represents a bad user experience in some instances such as:
|
||||
|
||||
- Users sometimes visit the `https://app.example.com/authelia` URL which doesn't automatically redirect the user to
|
||||
`https://app.example.com` (if they visit `https://app.example.com` then they'll be redirected to authenticate then
|
||||
redirected back to their original URL).
|
||||
- Administrators may wish to setup OpenID Connect 1.0 in which case it also doesn't represent a good user experience.
|
||||
|
||||
Taking these factors into consideration we're adapting our [SWAG] guide to use what we consider best for the users and
|
||||
most easily supported. Users who wish to use the [SWAG] guide are free to do so but may not receive the same support.
|
||||
|
||||
## Trusted Proxies
|
||||
|
||||
|
@ -61,22 +67,116 @@ Especially if you have never read it before.*
|
|||
To configure trusted proxies for [SWAG] see the [NGINX] section on [Trusted Proxies](nginx.md#trusted-proxies).
|
||||
Adapting this to [SWAG] is beyond the scope of this documentation.
|
||||
|
||||
## Docker Compose
|
||||
|
||||
The following docker compose example has various applications suitable for setting up an example environment.
|
||||
|
||||
It uses the [nginx image](https://github.com/linuxserver/docker-nginx) from [linuxserver.io] which includes all of the
|
||||
required modules including the `http_set_misc` module.
|
||||
|
||||
It also includes the [nginx-proxy-confs](https://github.com/linuxserver/docker-mods/tree/nginx-proxy-confs) mod where
|
||||
they have several configuration examples in the `/config/nginx/proxy-confs` directory. This can be omitted if desired.
|
||||
|
||||
If you're looking for a more complete solution [linuxserver.io] also have an nginx container called [SWAG](./swag.md)
|
||||
which includes ACME and various other useful utilities.
|
||||
|
||||
{{< details "docker-compose.yaml" >}}
|
||||
```yaml
|
||||
---
|
||||
version: "3.8"
|
||||
|
||||
networks:
|
||||
net:
|
||||
driver: bridge
|
||||
|
||||
services:
|
||||
swag:
|
||||
container_name: swag
|
||||
image: lscr.io/linuxserver/swag
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
net:
|
||||
aliases: []
|
||||
ports:
|
||||
- '80:80'
|
||||
- '443:443'
|
||||
volumes:
|
||||
- ${PWD}/data/swag:/config
|
||||
environment:
|
||||
PUID: '1000'
|
||||
PGID: '1000'
|
||||
TZ: 'Australia/Melbourne'
|
||||
URL: 'example.com'
|
||||
SUBDOMAINS: 'www,whoami,auth,nextcloud,'
|
||||
VALIDATION: 'http'
|
||||
CERTPROVIDER: 'cloudflare'
|
||||
ONLY_SUBDOMAINS: 'false'
|
||||
STAGING: 'true'
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
authelia:
|
||||
container_name: authelia
|
||||
image: authelia/authelia
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
net:
|
||||
aliases: []
|
||||
expose:
|
||||
- 9091
|
||||
volumes:
|
||||
- ${PWD}/data/authelia/config:/config
|
||||
environment:
|
||||
TZ: 'Australia/Melbourne'
|
||||
nextcloud:
|
||||
container_name: nextcloud
|
||||
image: lscr.io/linuxserver/nextcloud
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
net:
|
||||
aliases: []
|
||||
expose:
|
||||
- 443
|
||||
volumes:
|
||||
- ${PWD}/data/nextcloud/config:/config
|
||||
- ${PWD}/data/nextcloud/data:/data
|
||||
environment:
|
||||
PUID: '1000'
|
||||
PGID: '1000'
|
||||
TZ: 'Australia/Melbourne'
|
||||
whoami:
|
||||
container_name: whoami
|
||||
image: docker.io/traefik/whoami
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
net:
|
||||
aliases: []
|
||||
expose:
|
||||
- 80
|
||||
environment:
|
||||
TZ: 'Australia/Melbourne'
|
||||
...
|
||||
```
|
||||
{{< /details >}}
|
||||
|
||||
## Prerequisite Steps
|
||||
|
||||
These steps must be followed regardless of the choice of [subdomain](#subdomain-steps) or [subpath](#subpath-steps).
|
||||
In the [SWAG] `/config` mount which is mounted to `${PWD}/data/swag` in our example:
|
||||
|
||||
1. Deploy __Authelia__ to your docker network with the `container_name` of `authelia` and ensure it's listening on the
|
||||
default port and you have not configured the __Authelia__ server TLS settings.
|
||||
1. Create a folder named `snippets/authelia`:
|
||||
- The `mkdir -p ${PWD}/data/swag/snippets/authelia` command should achieve this on Linux.
|
||||
2. Create the `${PWD}/data/swag/nginxsnippets/authelia/location.conf` file which can be found [here](nginx.md#authelia-locationconf).
|
||||
3. Create the `${PWD}/data/swag/nginxsnippets/authelia/authrequest.conf` file which can be found [here](nginx.md#authelia-authrequestconf).
|
||||
- Ensure you adjust the line `error_page 401 =302 https://auth.example.com/?rd=$target_url;` replacing `https://auth.example.com/` with your external Authelia URL.
|
||||
|
||||
## Subdomain Steps
|
||||
## Protected Application
|
||||
|
||||
In the server configuration for the application you want to protect:
|
||||
|
||||
1. Edit the `/config/nginx/proxy-confs/` file for the application you wish to protect.
|
||||
2. Uncomment the `#include /config/nginx/authelia-server.conf;` line which should be within the `server` block
|
||||
but not inside any `location` blocks.
|
||||
3. Uncomment the `#include /config/nginx/authelia-location.conf;` line which should be within the applications
|
||||
`location` block.
|
||||
2. Under the `#include /config/nginx/authelia-server.conf;` line which should be within the `server` block
|
||||
but not inside any `location` blocks add the following line: ``.
|
||||
3. Under the `#include /config/nginx/authelia-location.conf;` line which should be within the applications
|
||||
`location` block add the following line `include /config/nginx/snippets/authelia/authrequest.conf;`.
|
||||
|
||||
### Example
|
||||
|
||||
|
@ -85,55 +185,28 @@ server {
|
|||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
|
||||
server_name heimdall.*;
|
||||
server_name whoami.*;
|
||||
|
||||
include /config/nginx/ssl.conf;
|
||||
|
||||
client_max_body_size 0;
|
||||
|
||||
# Authelia: Step 1.
|
||||
include /config/nginx/authelia-server.conf;
|
||||
#include /config/nginx/authelia-server.conf;
|
||||
include /config/nginx/snippets/authelia/location.conf;
|
||||
|
||||
location / {
|
||||
# Authelia: Step 2.
|
||||
include /config/nginx/authelia-location.conf;
|
||||
#include /config/nginx/authelia-location.conf;
|
||||
include /config/nginx/snippets/authelia/authrequest.conf;
|
||||
|
||||
include /config/nginx/proxy.conf;
|
||||
resolver 127.0.0.11 valid=30s;
|
||||
set $upstream_app heimdall;
|
||||
set $upstream_port 443;
|
||||
set $upstream_proto https;
|
||||
proxy_pass $upstream_proto://$upstream_app:$upstream_port;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Subpath Steps
|
||||
|
||||
*__Note:__ Steps 1 and 2 only need to be done once, even if you wish to protect multiple applications.*
|
||||
|
||||
1. Edit `/config/nginx/proxy-confs/default`.
|
||||
2. Uncomment the `#include /config/nginx/authelia-server.conf;` line.
|
||||
3. Edit the `/config/nginx/proxy-confs/` file for the application you wish to protect.
|
||||
4. Uncomment the `#include /config/nginx/authelia-location.conf;` line which should be within the applications
|
||||
`location` block.
|
||||
|
||||
### Example
|
||||
|
||||
```nginx
|
||||
location ^~ /bazarr/ {
|
||||
# Authelia: Step 4.
|
||||
include /config/nginx/authelia-location.conf;
|
||||
|
||||
include /config/nginx/proxy.conf;
|
||||
resolver 127.0.0.11 valid=30s;
|
||||
set $upstream_app bazarr;
|
||||
set $upstream_port 6767;
|
||||
set $upstream_app whoami;
|
||||
set $upstream_port 80;
|
||||
set $upstream_proto http;
|
||||
proxy_pass $upstream_proto://$upstream_app:$upstream_port;
|
||||
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
title: "OpenID Connect 1.0"
|
||||
description: "OpenID Connect 1.0 is a authorization identity framework supported by Authelia."
|
||||
date: 2022-06-15T17:51:47+10:00
|
||||
draft: false
|
||||
images: []
|
||||
menu:
|
||||
overview:
|
||||
parent: "authorization"
|
||||
weight: 330
|
||||
toc: false
|
||||
---
|
||||
|
||||
[OpenID Connect 1.0](https://openid.net/connect/) is a authorization identity framework supported by Authelia. You can
|
||||
configure your applications to use Authelia as an [OpenID Connect 1.0 Provider](https://openid.net/connect/). We do not
|
||||
currently operate as an [OpenID Connect 1.0 Relying Party](https://openid.net/connect/). This like all single-sign on
|
||||
technologies requires support by the protected application.
|
||||
|
||||
See the [OpenID Connect 1.0 Configuration Guide](../../configuration/identity-providers/open-id-connect.md) and the
|
||||
[OpenID Connect 1.0 Integration Guide](../../integration/openid-connect/introduction.md) for more information.
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
title: "Trusted Headers SSO"
|
||||
description: "Trusted Headers SSO is a simple header authorization framework supported by Authelia."
|
||||
lead: "Trusted Headers is a simple header authorization framework supported by Authelia."
|
||||
date: 2022-06-15T17:51:47+10:00
|
||||
draft: false
|
||||
images: []
|
||||
menu:
|
||||
overview:
|
||||
parent: "authorization"
|
||||
weight: 340
|
||||
toc: false
|
||||
---
|
||||
|
||||
This mechanism is supported by proxies which inject certain response headers from Authelia into the protected
|
||||
application. This is a very basic means that allows the target application to identify the user who is logged in
|
||||
to Authelia. This like all single-sign on technologies requires support by the protected application.
|
||||
|
||||
You can read more about this in the [Trusted Header SSO Integration Guide](../../integration/trusted-header-sso/introduction.md).
|
||||
|
|
@ -14,28 +14,28 @@ toc: false
|
|||
|
||||
The following table is a support matrix for Authelia features and specific reverse proxies.
|
||||
|
||||
| Proxy | [Standard](#standard) | [Kubernetes](#kubernetes) | [XHR Redirect](#xhr-redirect) | [Request Method](#request-method) |
|
||||
|:---------------------:|:-------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------:|:-----------------------------------------------------:|:-----------------------------------------------------:|
|
||||
| [Traefik] | [<i class="icon-support-full"></i>](../../integration/proxies/traefik.md) | [<i class="icon-support-full"></i>](../../integration/kubernetes/traefik-ingress.md) | <i class="icon-support-full"></i> | <i class="icon-support-full"></i> |
|
||||
| [NGINX] | [<i class="icon-support-full"></i>](../../integration/proxies/nginx.md) | [<i class="icon-support-full"></i>](../../integration/kubernetes/nginx-ingress.md) | <i class="icon-support-none" alt="Not Supported"></i> | <i class="icon-support-full"></i> |
|
||||
| [NGINX Proxy Manager] | [<i class="icon-support-full"></i>](../../integration/proxies/nginx-proxy-manager.md) | <i class="icon-support-unknown"></i> | <i class="icon-support-none" alt="Not Supported"></i> | <i class="icon-support-full"></i> |
|
||||
| [SWAG] | [<i class="icon-support-full"></i>](../../integration/proxies/swag.md) | <i class="icon-support-unknown"></i> | <i class="icon-support-none" alt="Not Supported"></i> | <i class="icon-support-full"></i> |
|
||||
| [HAProxy] | [<i class="icon-support-full"></i>](../../integration/proxies/haproxy.md) | <i class="icon-support-unknown"></i> | <i class="icon-support-unknown"></i> | <i class="icon-support-full"></i> |
|
||||
| [Caddy] | [<i class="icon-support-full"></i>](../../integration/proxies/caddy.md) | <i class="icon-support-unknown"></i> | <i class="icon-support-full"></i> | <i class="icon-support-full"></i> |
|
||||
| [Traefik] 1.x | [<i class="icon-support-full"></i>](../../integration/proxies/traefikv1.md) | <i class="icon-support-unknown"></i> | <i class="icon-support-full"></i> | <i class="icon-support-full"></i> |
|
||||
| [Envoy] | [<i class="icon-support-full"></i>](../../integration/proxies/envoy.md) | [<i class="icon-support-full"></i>](../../integration/kubernetes/istio.md) | <i class="icon-support-unknown"></i> | <i class="icon-support-full"></i> |
|
||||
| [Skipper] | [<i class="icon-support-full"></i>](../../integration/proxies/skipper.md) | <i class="icon-support-unknown"></i> | <i class="icon-support-unknown"></i> | <i class="icon-support-unknown"></i> |
|
||||
| [Apache] | <i class="icon-support-none" alt="Not Supported"></i> | <i class="icon-support-none" alt="Not Supported"></i> | <i class="icon-support-none" alt="Not Supported"></i> | <i class="icon-support-none" alt="Not Supported"></i> |
|
||||
| [IIS] | <i class="icon-support-none" alt="Not Supported"></i> | <i class="icon-support-none" alt="Not Supported"></i> | <i class="icon-support-none" alt="Not Supported"></i> | <i class="icon-support-none" alt="Not Supported"></i> |
|
||||
| Proxy | Standard | Kubernetes | XHR Redirect | Request Method |
|
||||
|:---------------------:|:--------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------:|:---------------------------------:|:---------------------------------:|
|
||||
| [Traefik] | {{% support support="full" link="../../integration/proxies/traefik.md" %}} | {{% support support="full" link="../../integration/kubernetes/traefik-ingress.md" %}} | {{% support support="full" %}} | {{% support support="full" %}} |
|
||||
| [Caddy] | {{% support support="full" link="../../integration/proxies/caddy.md" %}} | {{% support support="unknown" %}} | {{% support support="full" %}} | {{% support support="full" %}} |
|
||||
| [Envoy] | {{% support support="full" link="../../integration/proxies/envoy.md" %}} | {{% support support="full" link="../../integration/kubernetes/istio.md" %}} | {{% support support="unknown" %}} | {{% support support="full" %}} |
|
||||
| [NGINX] | {{% support support="full" link="../../integration/proxies/nginx.md" %}} | {{% support support="full" link="../../integration/kubernetes/nginx-ingress.md" %}} | {{% support %}} | {{% support support="full" %}} |
|
||||
| [NGINX Proxy Manager] | {{% support support="full" link="../../integration/proxies/nginx-proxy-manager/index.md" %}} | {{% support %}} | {{% support %}} | {{% support support="full" %}} |
|
||||
| [SWAG] | {{% support support="full" link="../../integration/proxies/swag.md" %}} | {{% support %}} | {{% support %}} | {{% support support="full" %}} |
|
||||
| [HAProxy] | {{% support support="full" link="../../integration/proxies/haproxy.md" %}} | {{% support support="unknown" %}} | {{% support support="unknown" %}} | {{% support support="full" %}} |
|
||||
| [Traefik] 1.x | {{% support support="full" link="../../integration/proxies/traefikv1.md" %}} | {{% support support="unknown" %}} | {{% support support="full" %}} | {{% support support="full" %}} |
|
||||
| [Skipper] | {{% support support="full" link="../../integration/proxies/skipper.md" %}} | {{% support %}} | {{% support support="unknown" %}} | {{% support support="unknown" %}} |
|
||||
| [Apache] | {{% support %}} | {{% support %}} | {{% support %}} | {{% support %}} |
|
||||
| [IIS] | {{% support %}} | {{% support %}} | {{% support %}} | {{% support %}} |
|
||||
|
||||
Legend:
|
||||
|
||||
| Icon | Meaning |
|
||||
|-------------------------------------:|:-------------------:|
|
||||
| <i class="icon-support-full"></i> | Supported |
|
||||
| <i class="icon-support-unknown"></i> | Unknown |
|
||||
| <i class="icon-support-partial"></i> | Partially Supported |
|
||||
| <i class="icon-support-none"></i> | Not Supported |
|
||||
|:----------------------------------:|:-------------------:|
|
||||
| {{% support support="full" %}} | Supported |
|
||||
| {{% support support="unknown" %}} | Unknown |
|
||||
| {{% support support="partial" %}} | Partially Supported |
|
||||
| {{% support %}} | Not Supported |
|
||||
|
||||
## More Information
|
||||
|
||||
|
|
|
@ -40,7 +40,6 @@ authelia storage migrate down --target 20 --encryption-key b3453fde-ecc2-4a1f-94
|
|||
```
|
||||
--destroy-data confirms you want to destroy data with this migration
|
||||
-h, --help help for down
|
||||
--pre1 sets pre1 as the version to migrate to
|
||||
-t, --target int sets the version to migrate to
|
||||
```
|
||||
|
||||
|
|
|
@ -41,15 +41,15 @@ authelia storage user totp generate john --algorithm SHA512 --config config.yml
|
|||
### Options
|
||||
|
||||
```
|
||||
--algorithm string set the TOTP algorithm (default "SHA1")
|
||||
--digits uint set the TOTP digits (default 6)
|
||||
-f, --force forces the TOTP configuration to be generated regardless if it exists or not
|
||||
--algorithm string set the algorithm to either SHA1 (supported by most applications), SHA256, or SHA512 (default "SHA1")
|
||||
--digits uint set the number of digits (default 6)
|
||||
-f, --force forces the configuration to be generated regardless if it exists or not
|
||||
-h, --help help for generate
|
||||
--issuer string set the TOTP issuer (default "Authelia")
|
||||
--issuer string set the issuer description (default "Authelia")
|
||||
-p, --path string path to a file to create a PNG file with the QR code (optional)
|
||||
--period uint set the TOTP period (default 30)
|
||||
--secret string Optionally set the TOTP shared secret as base32 encoded bytes (no padding), it's recommended to not set this option unless you're restoring an TOTP config
|
||||
--secret-size uint set the TOTP secret size (default 32)
|
||||
--period uint set the period between rotations (default 30)
|
||||
--secret string set the shared secret as base32 encoded bytes (no padding), it's recommended that you do not use this option unless you're restoring a configuration
|
||||
--secret-size uint set the secret size (default 32)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
title: "Database Integrations"
|
||||
description: "A database integration reference guide"
|
||||
lead: "This section contains a database integration reference guide for Authelia."
|
||||
date: 2022-11-10T11:03:47+11:00
|
||||
date: 2022-11-19T16:47:09+11:00
|
||||
draft: false
|
||||
images: []
|
||||
menu:
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
title: "Integrations"
|
||||
description: "A collection of integration reference guides"
|
||||
lead: "This section contains integration reference guides for Authelia."
|
||||
date: 2022-11-10T11:03:47+11:00
|
||||
date: 2022-11-19T16:47:09+11:00
|
||||
draft: false
|
||||
images: []
|
||||
menu:
|
||||
|
|
|
@ -113,6 +113,13 @@ Feature List:
|
|||
|
||||
### Beta 6
|
||||
|
||||
{{< roadmap-status stage="in-progress" version="v4.38.0" >}}
|
||||
|
||||
|
||||
* [OAuth 2.0 Pushed Authorization Requests](https://www.rfc-editor.org/rfc/rfc9126.html)
|
||||
|
||||
### Beta 7
|
||||
|
||||
{{< roadmap-status >}}
|
||||
|
||||
Feature List:
|
||||
|
@ -122,7 +129,7 @@ Feature List:
|
|||
|
||||
See [OpenID Connect Core (Mandatory to Implement Features for All OpenID Providers)].
|
||||
|
||||
### Beta 7
|
||||
### Beta 8
|
||||
|
||||
{{< roadmap-status >}}
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
<div class="row justify-content-center text-center">
|
||||
<div class="col-lg-5">
|
||||
<h2 class="h4">Single Sign-On 🎟️</h2>
|
||||
<p>Allow your users the convenience of just being required to login once to a wide range of web applications.</p>
|
||||
<p>Allow your users the convenience of just being required to login once to a wide range of web applications via a session cookie, <a href="./overview/authorization/openid-connect-1.0/">OpenID Connect 1.0</a>, or <a href="./overview/authorization/trusted-headers/">Trusted Headers</a>.</p>
|
||||
</div>
|
||||
<div class="col-lg-5">
|
||||
<h2 class="h4">Authorization Policies 👮</h2>
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
{{- $class := "icon-support-none" }}{{ $title := "Not Supported" }}
|
||||
{{- with .Get "support" }}
|
||||
{{- if (eq . "full") }}{{ $class = "icon-support-full" }}{{ $title = "Fully Supported" }}
|
||||
{{- else if (eq . "partial") }}{{ $class = "icon-support-partial" }}{{ $title = "Partially Supported" }}
|
||||
{{- else if (eq . "unknown") }}{{ $class = "icon-support-unknown" }}{{ $title = "Unknown" }}
|
||||
{{- end }}{{ end }}
|
||||
{{- with .Get "title" }}{{ $title = . }}{{ end }}
|
||||
{{- with .Get "link" }}[<i class="{{ $class }}" data-toggle="tooltip" data-placement="top" title="{{ $title }}"></i>]({{ . }})
|
||||
{{- else }}<i class="{{ $class }}" data-toggle="tooltip" data-placement="top" title="{{ $title }}"></i>{{ end }}
|
|
@ -46,13 +46,13 @@
|
|||
"@popperjs/core": "2.11.6",
|
||||
"auto-changelog": "2.4.0",
|
||||
"autoprefixer": "10.4.13",
|
||||
"bootstrap": "5.2.2",
|
||||
"bootstrap-icons": "1.10.0",
|
||||
"bootstrap": "5.2.3",
|
||||
"bootstrap-icons": "1.10.2",
|
||||
"clipboard": "2.0.11",
|
||||
"eslint": "8.27.0",
|
||||
"eslint": "8.28.0",
|
||||
"exec-bin": "1.0.0",
|
||||
"flexsearch": "0.7.31",
|
||||
"highlight.js": "11.6.0",
|
||||
"highlight.js": "11.7.0",
|
||||
"hugo-installer": "4.0.1",
|
||||
"instant.page": "5.1.1",
|
||||
"katex": "0.16.3",
|
||||
|
@ -64,10 +64,10 @@
|
|||
"postcss-cli": "10.0.0",
|
||||
"purgecss-whitelister": "2.4.0",
|
||||
"shx": "0.3.4",
|
||||
"stylelint": "14.14.1",
|
||||
"stylelint": "14.15.0",
|
||||
"stylelint-config-standard-scss": "6.1.0"
|
||||
},
|
||||
"otherDependencies": {
|
||||
"hugo": "0.105.0"
|
||||
"hugo": "0.107.0"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,13 +9,13 @@ specifiers:
|
|||
'@popperjs/core': 2.11.6
|
||||
auto-changelog: 2.4.0
|
||||
autoprefixer: 10.4.13
|
||||
bootstrap: 5.2.2
|
||||
bootstrap-icons: 1.10.0
|
||||
bootstrap: 5.2.3
|
||||
bootstrap-icons: 1.10.2
|
||||
clipboard: 2.0.11
|
||||
eslint: 8.27.0
|
||||
eslint: 8.28.0
|
||||
exec-bin: 1.0.0
|
||||
flexsearch: 0.7.31
|
||||
highlight.js: 11.6.0
|
||||
highlight.js: 11.7.0
|
||||
hugo-installer: 4.0.1
|
||||
instant.page: 5.1.1
|
||||
katex: 0.16.3
|
||||
|
@ -27,7 +27,7 @@ specifiers:
|
|||
postcss-cli: 10.0.0
|
||||
purgecss-whitelister: 2.4.0
|
||||
shx: 0.3.4
|
||||
stylelint: 14.14.1
|
||||
stylelint: 14.15.0
|
||||
stylelint-config-standard-scss: 6.1.0
|
||||
|
||||
devDependencies:
|
||||
|
@ -39,13 +39,13 @@ devDependencies:
|
|||
'@popperjs/core': 2.11.6
|
||||
auto-changelog: 2.4.0
|
||||
autoprefixer: 10.4.13_postcss@8.4.19
|
||||
bootstrap: 5.2.2_@popperjs+core@2.11.6
|
||||
bootstrap-icons: 1.10.0
|
||||
bootstrap: 5.2.3_@popperjs+core@2.11.6
|
||||
bootstrap-icons: 1.10.2
|
||||
clipboard: 2.0.11
|
||||
eslint: 8.27.0
|
||||
eslint: 8.28.0
|
||||
exec-bin: 1.0.0
|
||||
flexsearch: 0.7.31
|
||||
highlight.js: 11.6.0
|
||||
highlight.js: 11.7.0
|
||||
hugo-installer: 4.0.1
|
||||
instant.page: 5.1.1
|
||||
katex: 0.16.3
|
||||
|
@ -57,8 +57,8 @@ devDependencies:
|
|||
postcss-cli: 10.0.0_postcss@8.4.19
|
||||
purgecss-whitelister: 2.4.0
|
||||
shx: 0.3.4
|
||||
stylelint: 14.14.1
|
||||
stylelint-config-standard-scss: 6.1.0_ave2i6l4ingtbwj4aquhd5witq
|
||||
stylelint: 14.15.0
|
||||
stylelint-config-standard-scss: 6.1.0_a37symlv4urgexnspmy4gyeh7i
|
||||
|
||||
packages:
|
||||
|
||||
|
@ -1603,12 +1603,12 @@ packages:
|
|||
safe-buffer: 5.2.1
|
||||
dev: true
|
||||
|
||||
/bootstrap-icons/1.10.0:
|
||||
resolution: {integrity: sha512-SNQ3EUv5cKuoqkS6tebZztusF1P8hKyCVVSlYjm6d5H2fa1v32w72oPjujrTlGy2g9LiZ0tR/uFMEZwO71GHPQ==}
|
||||
/bootstrap-icons/1.10.2:
|
||||
resolution: {integrity: sha512-PTPYadRn1AMGr+QTSxe4ZCc+Wzv9DGZxbi3lNse/dajqV31n2/wl/7NX78ZpkvFgRNmH4ogdIQPQmxAfhEV6nA==}
|
||||
dev: true
|
||||
|
||||
/bootstrap/5.2.2_@popperjs+core@2.11.6:
|
||||
resolution: {integrity: sha512-dEtzMTV71n6Fhmbg4fYJzQsw1N29hJKO1js5ackCgIpDcGid2ETMGC6zwSYw09v05Y+oRdQ9loC54zB1La3hHQ==}
|
||||
/bootstrap/5.2.3_@popperjs+core@2.11.6:
|
||||
resolution: {integrity: sha512-cEKPM+fwb3cT8NzQZYEu4HilJ3anCrWqh3CHAok1p9jXqMPsPTBhU25fBckEJHJ/p+tTxTFTsFQGM+gaHpi3QQ==}
|
||||
peerDependencies:
|
||||
'@popperjs/core': ^2.11.6
|
||||
dependencies:
|
||||
|
@ -1859,8 +1859,8 @@ packages:
|
|||
resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==}
|
||||
dev: true
|
||||
|
||||
/cosmiconfig/7.0.1:
|
||||
resolution: {integrity: sha512-a1YWNUV2HwGimB7dU2s1wUMurNKjpx60HxBB6xUM8Re+2s1g1IIfJvFR0/iCF+XHdE0GMTKTuLR32UQff4TEyQ==}
|
||||
/cosmiconfig/7.1.0:
|
||||
resolution: {integrity: sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==}
|
||||
engines: {node: '>=10'}
|
||||
dependencies:
|
||||
'@types/parse-json': 4.0.0
|
||||
|
@ -2079,13 +2079,13 @@ packages:
|
|||
estraverse: 5.3.0
|
||||
dev: true
|
||||
|
||||
/eslint-utils/3.0.0_eslint@8.27.0:
|
||||
/eslint-utils/3.0.0_eslint@8.28.0:
|
||||
resolution: {integrity: sha512-uuQC43IGctw68pJA1RgbQS8/NP7rch6Cwd4j3ZBtgo4/8Flj4eGE7ZYSZRN3iq5pVUv6GPdW5Z1RFleo84uLDA==}
|
||||
engines: {node: ^10.0.0 || ^12.0.0 || >= 14.0.0}
|
||||
peerDependencies:
|
||||
eslint: '>=5'
|
||||
dependencies:
|
||||
eslint: 8.27.0
|
||||
eslint: 8.28.0
|
||||
eslint-visitor-keys: 2.1.0
|
||||
dev: true
|
||||
|
||||
|
@ -2099,8 +2099,8 @@ packages:
|
|||
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
|
||||
dev: true
|
||||
|
||||
/eslint/8.27.0:
|
||||
resolution: {integrity: sha512-0y1bfG2ho7mty+SiILVf9PfuRA49ek4Nc60Wmmu62QlobNR+CeXa4xXIJgcuwSQgZiWaPH+5BDsctpIW0PR/wQ==}
|
||||
/eslint/8.28.0:
|
||||
resolution: {integrity: sha512-S27Di+EVyMxcHiwDrFzk8dJYAaD+/5SoWKxL1ri/71CRHsnJnRDPNt2Kzj24+MT9FDupf4aqqyqPrvI8MvQ4VQ==}
|
||||
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
|
||||
hasBin: true
|
||||
dependencies:
|
||||
|
@ -2115,7 +2115,7 @@ packages:
|
|||
doctrine: 3.0.0
|
||||
escape-string-regexp: 4.0.0
|
||||
eslint-scope: 7.1.1
|
||||
eslint-utils: 3.0.0_eslint@8.27.0
|
||||
eslint-utils: 3.0.0_eslint@8.28.0
|
||||
eslint-visitor-keys: 3.3.0
|
||||
espree: 9.4.0
|
||||
esquery: 1.4.0
|
||||
|
@ -2552,8 +2552,8 @@ packages:
|
|||
function-bind: 1.1.1
|
||||
dev: true
|
||||
|
||||
/highlight.js/11.6.0:
|
||||
resolution: {integrity: sha512-ig1eqDzJaB0pqEvlPVIpSSyMaO92bH1N2rJpLMN/nX396wTpDA4Eq0uK+7I/2XG17pFaaKE0kjV/XPeGt7Evjw==}
|
||||
/highlight.js/11.7.0:
|
||||
resolution: {integrity: sha512-1rRqesRFhMO/PRF+G86evnyJkCgaZFOI+Z6kdj15TA18funfoqJXvgPCLSf0SWq3SRfg1j3HlDs8o4s3EGq1oQ==}
|
||||
engines: {node: '>=12.0.0'}
|
||||
dev: true
|
||||
|
||||
|
@ -3829,7 +3829,7 @@ packages:
|
|||
resolution: {integrity: sha512-Dj1Okke1C3uKKwQcetra4jSuk0DqbzbYtXipzFlFMZtowbF1x7BKJwB9AayVMyFARvU8EDrZdcax4At/452cAg==}
|
||||
dev: true
|
||||
|
||||
/stylelint-config-recommended-scss/8.0.0_ave2i6l4ingtbwj4aquhd5witq:
|
||||
/stylelint-config-recommended-scss/8.0.0_a37symlv4urgexnspmy4gyeh7i:
|
||||
resolution: {integrity: sha512-BxjxEzRaZoQb7Iinc3p92GS6zRdRAkIuEu2ZFLTxJK2e1AIcCb5B5MXY9KOXdGTnYFZ+KKx6R4Fv9zU6CtMYPQ==}
|
||||
peerDependencies:
|
||||
postcss: ^8.3.3
|
||||
|
@ -3840,20 +3840,20 @@ packages:
|
|||
dependencies:
|
||||
postcss: 8.4.19
|
||||
postcss-scss: 4.0.4_postcss@8.4.19
|
||||
stylelint: 14.14.1
|
||||
stylelint-config-recommended: 9.0.0_stylelint@14.14.1
|
||||
stylelint-scss: 4.2.0_stylelint@14.14.1
|
||||
stylelint: 14.15.0
|
||||
stylelint-config-recommended: 9.0.0_stylelint@14.15.0
|
||||
stylelint-scss: 4.2.0_stylelint@14.15.0
|
||||
dev: true
|
||||
|
||||
/stylelint-config-recommended/9.0.0_stylelint@14.14.1:
|
||||
/stylelint-config-recommended/9.0.0_stylelint@14.15.0:
|
||||
resolution: {integrity: sha512-9YQSrJq4NvvRuTbzDsWX3rrFOzOlYBmZP+o513BJN/yfEmGSr0AxdvrWs0P/ilSpVV/wisamAHu5XSk8Rcf4CQ==}
|
||||
peerDependencies:
|
||||
stylelint: ^14.10.0
|
||||
dependencies:
|
||||
stylelint: 14.14.1
|
||||
stylelint: 14.15.0
|
||||
dev: true
|
||||
|
||||
/stylelint-config-standard-scss/6.1.0_ave2i6l4ingtbwj4aquhd5witq:
|
||||
/stylelint-config-standard-scss/6.1.0_a37symlv4urgexnspmy4gyeh7i:
|
||||
resolution: {integrity: sha512-iZ2B5kQT2G3rUzx+437cEpdcnFOQkwnwqXuY8Z0QUwIHQVE8mnYChGAquyKFUKZRZ0pRnrciARlPaR1RBtPb0Q==}
|
||||
peerDependencies:
|
||||
postcss: ^8.3.3
|
||||
|
@ -3863,21 +3863,21 @@ packages:
|
|||
optional: true
|
||||
dependencies:
|
||||
postcss: 8.4.19
|
||||
stylelint: 14.14.1
|
||||
stylelint-config-recommended-scss: 8.0.0_ave2i6l4ingtbwj4aquhd5witq
|
||||
stylelint-config-standard: 29.0.0_stylelint@14.14.1
|
||||
stylelint: 14.15.0
|
||||
stylelint-config-recommended-scss: 8.0.0_a37symlv4urgexnspmy4gyeh7i
|
||||
stylelint-config-standard: 29.0.0_stylelint@14.15.0
|
||||
dev: true
|
||||
|
||||
/stylelint-config-standard/29.0.0_stylelint@14.14.1:
|
||||
/stylelint-config-standard/29.0.0_stylelint@14.15.0:
|
||||
resolution: {integrity: sha512-uy8tZLbfq6ZrXy4JKu3W+7lYLgRQBxYTUUB88vPgQ+ZzAxdrvcaSUW9hOMNLYBnwH+9Kkj19M2DHdZ4gKwI7tg==}
|
||||
peerDependencies:
|
||||
stylelint: ^14.14.0
|
||||
dependencies:
|
||||
stylelint: 14.14.1
|
||||
stylelint-config-recommended: 9.0.0_stylelint@14.14.1
|
||||
stylelint: 14.15.0
|
||||
stylelint-config-recommended: 9.0.0_stylelint@14.15.0
|
||||
dev: true
|
||||
|
||||
/stylelint-scss/4.2.0_stylelint@14.14.1:
|
||||
/stylelint-scss/4.2.0_stylelint@14.15.0:
|
||||
resolution: {integrity: sha512-HHHMVKJJ5RM9pPIbgJ/XA67h9H0407G68Rm69H4fzFbFkyDMcTV1Byep3qdze5+fJ3c0U7mJrbj6S0Fg072uZA==}
|
||||
peerDependencies:
|
||||
stylelint: ^14.5.1
|
||||
|
@ -3887,18 +3887,18 @@ packages:
|
|||
postcss-resolve-nested-selector: 0.1.1
|
||||
postcss-selector-parser: 6.0.10
|
||||
postcss-value-parser: 4.2.0
|
||||
stylelint: 14.14.1
|
||||
stylelint: 14.15.0
|
||||
dev: true
|
||||
|
||||
/stylelint/14.14.1:
|
||||
resolution: {integrity: sha512-Jnftu+lSD8cSpcV/+Z2nfgfgFpTIS1FcujezXPngtoIQ6wtwutL22MsNE0dJuMiM1h1790g2qIjAyUZCMrX4sw==}
|
||||
/stylelint/14.15.0:
|
||||
resolution: {integrity: sha512-JOgDAo5QRsqiOZPZO+B9rKJvBm64S0xasbuRPAbPs6/vQDgDCnZLIiw6XcAS6GQKk9k1sBWR6rmH3Mfj8OknKg==}
|
||||
engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
|
||||
hasBin: true
|
||||
dependencies:
|
||||
'@csstools/selector-specificity': 2.0.2_45y636a2vqremknoajyxd5nkzy
|
||||
balanced-match: 2.0.0
|
||||
colord: 2.9.3
|
||||
cosmiconfig: 7.0.1
|
||||
cosmiconfig: 7.1.0
|
||||
css-functions-list: 3.1.0
|
||||
debug: 4.3.4
|
||||
fast-glob: 3.2.12
|
||||
|
|
12
go.mod
12
go.mod
|
@ -7,16 +7,16 @@ require (
|
|||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
|
||||
github.com/deckarep/golang-set v1.8.0
|
||||
github.com/duosecurity/duo_api_golang v0.0.0-20221117185402-091daa09e19d
|
||||
github.com/fasthttp/router v1.4.13
|
||||
github.com/fasthttp/router v1.4.14
|
||||
github.com/fasthttp/session/v2 v2.4.13
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.4
|
||||
github.com/go-crypt/crypt v0.1.14
|
||||
github.com/go-ldap/ldap/v3 v3.4.4
|
||||
github.com/go-rod/rod v0.112.0
|
||||
github.com/go-rod/rod v0.112.2
|
||||
github.com/go-sql-driver/mysql v1.6.0
|
||||
github.com/go-webauthn/webauthn v0.5.0
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2
|
||||
github.com/golang-jwt/jwt/v4 v4.4.3
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/hashicorp/go-retryablehttp v0.7.1
|
||||
|
@ -28,7 +28,7 @@ require (
|
|||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826
|
||||
github.com/ory/fosite v0.43.0
|
||||
github.com/ory/herodot v0.9.13
|
||||
github.com/ory/x v0.0.514
|
||||
github.com/ory/x v0.0.517
|
||||
github.com/otiai10/copy v1.9.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pquerna/otp v1.3.0
|
||||
|
@ -38,7 +38,7 @@ require (
|
|||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/trustelem/zxcvbn v1.0.1
|
||||
github.com/valyala/fasthttp v1.41.0
|
||||
github.com/valyala/fasthttp v1.43.0
|
||||
golang.org/x/sync v0.1.0
|
||||
golang.org/x/term v0.2.0
|
||||
golang.org/x/text v0.4.0
|
||||
|
@ -104,7 +104,7 @@ require (
|
|||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/ysmood/goob v0.4.0 // indirect
|
||||
github.com/ysmood/gson v0.7.1 // indirect
|
||||
github.com/ysmood/gson v0.7.3 // indirect
|
||||
github.com/ysmood/leakless v0.8.0 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/mod v0.6.0 // indirect
|
||||
|
|
29
go.sum
29
go.sum
|
@ -144,8 +144,8 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.
|
|||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
|
||||
github.com/fasthttp/router v1.4.13 h1:42M7+7tNO6clb5seb4HhXlBIX1lnNv8DLhiT6jUv75A=
|
||||
github.com/fasthttp/router v1.4.13/go.mod h1:mVhHMaSQA2Hi1HeuL/ZMuZpsZWk5bya75EpaDr3fO7E=
|
||||
github.com/fasthttp/router v1.4.14 h1:+W65VCKgyI4BZszhDiCRfONoFieePZIoQ7D8vGhiuzM=
|
||||
github.com/fasthttp/router v1.4.14/go.mod h1:+svLaOvqF9Lc0yjX9aHAD4NUMf+mggLPOT4UMdS6fjM=
|
||||
github.com/fasthttp/session/v2 v2.4.13 h1:I/j3w8UrXX1haXE+iraAbQuGihNVeTq6b8sp6L3ZJ6Q=
|
||||
github.com/fasthttp/session/v2 v2.4.13/go.mod h1:bAE6Bjl6ofQbkOpqcSuOVt/1R1LnbNLnFMHjGQcYP5M=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
|
@ -182,8 +182,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
|
|||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
|
||||
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
|
||||
github.com/go-rod/rod v0.112.0 h1:U9Yc+quw4hxZ6GrdbWFBeylvaYElEKM9ijFW2LYkGlA=
|
||||
github.com/go-rod/rod v0.112.0/go.mod h1:GZDtmEs6RpF6kBRYpGCZXxXlKNneKVPiKOjaMbmVVjE=
|
||||
github.com/go-rod/rod v0.112.2 h1:dwauKYC/H2em8/BcGk3gC0LTzZHf5MIDKf2DVM4z9gU=
|
||||
github.com/go-rod/rod v0.112.2/go.mod h1:ElViL9ABbcshNQw93+11FrYRH92RRhMKleuILo6+5V0=
|
||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
|
@ -197,8 +197,8 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
|
|||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
|
@ -460,8 +460,8 @@ github.com/ory/herodot v0.9.13 h1:cN/Z4eOkErl/9W7hDIDLb79IO/bfsH+8yscBjRpB4IU=
|
|||
github.com/ory/herodot v0.9.13/go.mod h1:IWDs9kSvFQqw/cQ8zi5ksyYvITiUU4dI7glUrhZcJYo=
|
||||
github.com/ory/viper v1.7.5 h1:+xVdq7SU3e1vNaCsk/ixsfxE4zylk1TJUiJrY647jUE=
|
||||
github.com/ory/viper v1.7.5/go.mod h1:ypOuyJmEUb3oENywQZRgeAMwqgOyDqwboO1tj3DjTaM=
|
||||
github.com/ory/x v0.0.514 h1:QCRqmZbsqRTMIMA+mR1qjjpStdEeVGTXI0jMO0iTfVw=
|
||||
github.com/ory/x v0.0.514/go.mod h1:xUtRpoiRARyJNPVk/fcCNKzyp25Foxt9GPlj8pd7egY=
|
||||
github.com/ory/x v0.0.517 h1:20FrwHie18q78WGaHcaH0+XoPNdE88zqSXCQNPNlYUs=
|
||||
github.com/ory/x v0.0.517/go.mod h1:xUtRpoiRARyJNPVk/fcCNKzyp25Foxt9GPlj8pd7egY=
|
||||
github.com/otiai10/copy v1.9.0 h1:7KFNiCgZ91Ru4qW4CWPf/7jqtxLagGRmIxWldPP9VY4=
|
||||
github.com/otiai10/copy v1.9.0/go.mod h1:hsfX19wcn0UWIHUQ3/4fHuehhk2UyArQ9dVFAn3FczI=
|
||||
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
|
||||
|
@ -604,8 +604,9 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT
|
|||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.40.0/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I=
|
||||
github.com/valyala/fasthttp v1.41.0 h1:zeR0Z1my1wDHTRiamBCXVglQdbUwgb9uWG3k1HQz6jY=
|
||||
github.com/valyala/fasthttp v1.41.0/go.mod h1:f6VbjjoI3z1NDOZOv17o6RvtRSWxC77seBFc2uWtgiY=
|
||||
github.com/valyala/fasthttp v1.42.0/go.mod h1:f6VbjjoI3z1NDOZOv17o6RvtRSWxC77seBFc2uWtgiY=
|
||||
github.com/valyala/fasthttp v1.43.0 h1:Gy4sb32C98fbzVWZlTM1oTMdLWGyvxR03VhM6cBIU4g=
|
||||
github.com/valyala/fasthttp v1.43.0/go.mod h1:f6VbjjoI3z1NDOZOv17o6RvtRSWxC77seBFc2uWtgiY=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
|
@ -613,12 +614,12 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
|
|||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ=
|
||||
github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18=
|
||||
github.com/ysmood/got v0.31.3 h1:UvvF+TDVsZLO7MSzm/Bd/H4HVp+7S5YwsxgdwaKq8uA=
|
||||
github.com/ysmood/got v0.31.3/go.mod h1:pE1l4LOwOBhQg6A/8IAatkGp7uZjnalzrZolnlhhMgY=
|
||||
github.com/ysmood/got v0.32.0 h1:aAHdQgfgMb/lo4v+OekM+SSqEJYFI035h5YYvLXsVyU=
|
||||
github.com/ysmood/got v0.32.0/go.mod h1:pE1l4LOwOBhQg6A/8IAatkGp7uZjnalzrZolnlhhMgY=
|
||||
github.com/ysmood/gotrace v0.6.0 h1:SyI1d4jclswLhg7SWTL6os3L1WOKeNn/ZtzVQF8QmdY=
|
||||
github.com/ysmood/gotrace v0.6.0/go.mod h1:TzhIG7nHDry5//eYZDYcTzuJLYQIkykJzCRIo4/dzQM=
|
||||
github.com/ysmood/gson v0.7.1 h1:zKL2MTGtynxdBdlZjyGsvEOZ7dkxaY5TH6QhAbTgz0Q=
|
||||
github.com/ysmood/gson v0.7.1/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg=
|
||||
github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE=
|
||||
github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg=
|
||||
github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak=
|
||||
github.com/ysmood/leakless v0.8.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
|
|
|
@ -554,6 +554,45 @@ const (
|
|||
cmdFlagUsageCharacters = "sets the explicit characters for the random string"
|
||||
cmdFlagNameLength = "length"
|
||||
cmdFlagUsageLength = "sets the character length for the random string"
|
||||
|
||||
cmdFlagNameNewEncryptionKey = "new-encryption-key"
|
||||
|
||||
cmdFlagNameFile = "file"
|
||||
cmdFlagNameUsers = "users"
|
||||
cmdFlagNameServices = "services"
|
||||
cmdFlagNameSectors = "sectors"
|
||||
cmdFlagNameIdentifier = "identifier"
|
||||
cmdFlagNameService = "service"
|
||||
cmdFlagNameSector = "sector"
|
||||
cmdFlagNameDescription = "description"
|
||||
cmdFlagNameAll = "all"
|
||||
cmdFlagNameKeyID = "kid"
|
||||
cmdFlagNameVerbose = "verbose"
|
||||
cmdFlagNameSecret = "secret"
|
||||
cmdFlagNameSecretSize = "secret-size"
|
||||
cmdFlagNamePeriod = "period"
|
||||
cmdFlagNameDigits = "digits"
|
||||
cmdFlagNameAlgorithm = "algorithm"
|
||||
cmdFlagNameIssuer = "issuer"
|
||||
cmdFlagNameForce = "force"
|
||||
cmdFlagNameFormat = "format"
|
||||
cmdFlagNamePath = "path"
|
||||
cmdFlagNameTarget = "target"
|
||||
cmdFlagNameDestroyData = "destroy-data"
|
||||
|
||||
cmdFlagNameEncryptionKey = "encryption-key"
|
||||
cmdFlagNameSQLite3Path = "sqlite.path"
|
||||
cmdFlagNameMySQLHost = "mysql.host"
|
||||
cmdFlagNameMySQLPort = "mysql.port"
|
||||
cmdFlagNameMySQLDatabase = "mysql.database"
|
||||
cmdFlagNameMySQLUsername = "mysql.username"
|
||||
cmdFlagNameMySQLPassword = "mysql.password"
|
||||
cmdFlagNamePostgreSQLHost = "postgres.host"
|
||||
cmdFlagNamePostgreSQLPort = "postgres.port"
|
||||
cmdFlagNamePostgreSQLDatabase = "postgres.database"
|
||||
cmdFlagNamePostgreSQLSchema = "postgres.schema"
|
||||
cmdFlagNamePostgreSQLUsername = "postgres.username"
|
||||
cmdFlagNamePostgreSQLPassword = "postgres.password"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -591,6 +630,7 @@ var (
|
|||
|
||||
const (
|
||||
identifierServiceOpenIDConnect = "openid"
|
||||
invalid = "invalid"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -3,12 +3,10 @@ package commands
|
|||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/go-crypt/crypt"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/term"
|
||||
|
||||
"github.com/authelia/authelia/v4/internal/authentication"
|
||||
"github.com/authelia/authelia/v4/internal/configuration"
|
||||
|
@ -433,7 +431,7 @@ func cmdCryptoHashGetPassword(cmd *cobra.Command, args []string, useArgs, useRan
|
|||
noConfirm bool
|
||||
)
|
||||
|
||||
if data, err = hashReadPasswordWithPrompt("Enter Password: "); err != nil {
|
||||
if data, err = termReadPasswordWithPrompt("Enter Password: ", "password"); err != nil {
|
||||
err = fmt.Errorf("failed to read the password from the terminal: %w", err)
|
||||
|
||||
return
|
||||
|
@ -448,8 +446,7 @@ func cmdCryptoHashGetPassword(cmd *cobra.Command, args []string, useArgs, useRan
|
|||
}
|
||||
|
||||
if noConfirm, err = cmd.Flags().GetBool(cmdFlagNameNoConfirm); err == nil && !noConfirm {
|
||||
if data, err = hashReadPasswordWithPrompt("Confirm Password: "); err != nil {
|
||||
err = fmt.Errorf("failed to read the password from the terminal: %w", err)
|
||||
if data, err = termReadPasswordWithPrompt("Confirm Password: ", ""); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -467,22 +464,6 @@ func cmdCryptoHashGetPassword(cmd *cobra.Command, args []string, useArgs, useRan
|
|||
return
|
||||
}
|
||||
|
||||
func hashReadPasswordWithPrompt(prompt string) (data []byte, err error) {
|
||||
fmt.Print(prompt)
|
||||
|
||||
if data, err = term.ReadPassword(int(syscall.Stdin)); err != nil { //nolint:unconvert,nolintlint
|
||||
if err.Error() == "inappropriate ioctl for device" {
|
||||
return nil, fmt.Errorf("the terminal doesn't appear to be interactive either use the '--password' flag or use an interactive terminal: %w", err)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fmt.Println("")
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func cmdFlagConfig(cmd *cobra.Command) {
|
||||
cmd.PersistentFlags().StringSliceP(cmdFlagNameConfig, "c", []string{"configuration.yml"}, "configuration files to load")
|
||||
}
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
package commands
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// ErrStdinIsNotTerminal is returned when Stdin is not an interactive terminal.
|
||||
var ErrStdinIsNotTerminal = errors.New("stdin is not a terminal")
|
|
@ -23,22 +23,22 @@ func newStorageCmd() (cmd *cobra.Command) {
|
|||
|
||||
cmdWithConfigFlags(cmd, true, []string{"configuration.yml"})
|
||||
|
||||
cmd.PersistentFlags().String("encryption-key", "", "the storage encryption key to use")
|
||||
cmd.PersistentFlags().String(cmdFlagNameEncryptionKey, "", "the storage encryption key to use")
|
||||
|
||||
cmd.PersistentFlags().String("sqlite.path", "", "the SQLite database path")
|
||||
cmd.PersistentFlags().String(cmdFlagNameSQLite3Path, "", "the SQLite database path")
|
||||
|
||||
cmd.PersistentFlags().String("mysql.host", "", "the MySQL hostname")
|
||||
cmd.PersistentFlags().Int("mysql.port", 3306, "the MySQL port")
|
||||
cmd.PersistentFlags().String("mysql.database", "authelia", "the MySQL database name")
|
||||
cmd.PersistentFlags().String("mysql.username", "authelia", "the MySQL username")
|
||||
cmd.PersistentFlags().String("mysql.password", "", "the MySQL password")
|
||||
cmd.PersistentFlags().String(cmdFlagNameMySQLHost, "", "the MySQL hostname")
|
||||
cmd.PersistentFlags().Int(cmdFlagNameMySQLPort, 3306, "the MySQL port")
|
||||
cmd.PersistentFlags().String(cmdFlagNameMySQLDatabase, "authelia", "the MySQL database name")
|
||||
cmd.PersistentFlags().String(cmdFlagNameMySQLUsername, "authelia", "the MySQL username")
|
||||
cmd.PersistentFlags().String(cmdFlagNameMySQLPassword, "", "the MySQL password")
|
||||
|
||||
cmd.PersistentFlags().String("postgres.host", "", "the PostgreSQL hostname")
|
||||
cmd.PersistentFlags().Int("postgres.port", 5432, "the PostgreSQL port")
|
||||
cmd.PersistentFlags().String("postgres.database", "authelia", "the PostgreSQL database name")
|
||||
cmd.PersistentFlags().String("postgres.schema", "public", "the PostgreSQL schema name")
|
||||
cmd.PersistentFlags().String("postgres.username", "authelia", "the PostgreSQL username")
|
||||
cmd.PersistentFlags().String("postgres.password", "", "the PostgreSQL password")
|
||||
cmd.PersistentFlags().String(cmdFlagNamePostgreSQLHost, "", "the PostgreSQL hostname")
|
||||
cmd.PersistentFlags().Int(cmdFlagNamePostgreSQLPort, 5432, "the PostgreSQL port")
|
||||
cmd.PersistentFlags().String(cmdFlagNamePostgreSQLDatabase, "authelia", "the PostgreSQL database name")
|
||||
cmd.PersistentFlags().String(cmdFlagNamePostgreSQLSchema, "public", "the PostgreSQL schema name")
|
||||
cmd.PersistentFlags().String(cmdFlagNamePostgreSQLUsername, "authelia", "the PostgreSQL username")
|
||||
cmd.PersistentFlags().String(cmdFlagNamePostgreSQLPassword, "", "the PostgreSQL password")
|
||||
cmd.PersistentFlags().String("postgres.ssl.mode", "disable", "the PostgreSQL ssl mode")
|
||||
cmd.PersistentFlags().String("postgres.ssl.root_certificate", "", "the PostgreSQL ssl root certificate file location")
|
||||
cmd.PersistentFlags().String("postgres.ssl.certificate", "", "the PostgreSQL ssl certificate file location")
|
||||
|
@ -83,7 +83,7 @@ func newStorageEncryptionCheckCmd() (cmd *cobra.Command) {
|
|||
DisableAutoGenTag: true,
|
||||
}
|
||||
|
||||
cmd.Flags().Bool("verbose", false, "enables verbose checking of every row of encrypted data")
|
||||
cmd.Flags().Bool(cmdFlagNameVerbose, false, "enables verbose checking of every row of encrypted data")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ func newStorageEncryptionChangeKeyCmd() (cmd *cobra.Command) {
|
|||
DisableAutoGenTag: true,
|
||||
}
|
||||
|
||||
cmd.Flags().String("new-encryption-key", "", "the new key to encrypt the data with")
|
||||
cmd.Flags().String(cmdFlagNameNewEncryptionKey, "", "the new key to encrypt the data with")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ func newStorageUserIdentifiersExportCmd() (cmd *cobra.Command) {
|
|||
DisableAutoGenTag: true,
|
||||
}
|
||||
|
||||
cmd.Flags().StringP("file", "f", "user-opaque-identifiers.yml", "The file name for the YAML export")
|
||||
cmd.Flags().StringP(cmdFlagNameFile, "f", "user-opaque-identifiers.yml", "The file name for the YAML export")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ func newStorageUserIdentifiersImportCmd() (cmd *cobra.Command) {
|
|||
DisableAutoGenTag: true,
|
||||
}
|
||||
|
||||
cmd.Flags().StringP("file", "f", "user-opaque-identifiers.yml", "The file name for the YAML import")
|
||||
cmd.Flags().StringP(cmdFlagNameFile, "f", "user-opaque-identifiers.yml", "The file name for the YAML import")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@ -186,9 +186,9 @@ func newStorageUserIdentifiersGenerateCmd() (cmd *cobra.Command) {
|
|||
DisableAutoGenTag: true,
|
||||
}
|
||||
|
||||
cmd.Flags().StringSlice("users", nil, "The list of users to generate the opaque identifiers for")
|
||||
cmd.Flags().StringSlice("services", []string{identifierServiceOpenIDConnect}, fmt.Sprintf("The list of services to generate the opaque identifiers for, valid values are: %s", strings.Join(validIdentifierServices, ", ")))
|
||||
cmd.Flags().StringSlice("sectors", []string{""}, "The list of sectors to generate identifiers for")
|
||||
cmd.Flags().StringSlice(cmdFlagNameUsers, nil, "The list of users to generate the opaque identifiers for")
|
||||
cmd.Flags().StringSlice(cmdFlagNameServices, []string{identifierServiceOpenIDConnect}, fmt.Sprintf("The list of services to generate the opaque identifiers for, valid values are: %s", strings.Join(validIdentifierServices, ", ")))
|
||||
cmd.Flags().StringSlice(cmdFlagNameSectors, []string{""}, "The list of sectors to generate identifiers for")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@ -205,9 +205,9 @@ func newStorageUserIdentifiersAddCmd() (cmd *cobra.Command) {
|
|||
DisableAutoGenTag: true,
|
||||
}
|
||||
|
||||
cmd.Flags().String("identifier", "", "The optional version 4 UUID to use, if not set a random one will be used")
|
||||
cmd.Flags().String("service", identifierServiceOpenIDConnect, fmt.Sprintf("The service to add the identifier for, valid values are: %s", strings.Join(validIdentifierServices, ", ")))
|
||||
cmd.Flags().String("sector", "", "The sector identifier to use (should usually be blank)")
|
||||
cmd.Flags().String(cmdFlagNameIdentifier, "", "The optional version 4 UUID to use, if not set a random one will be used")
|
||||
cmd.Flags().String(cmdFlagNameService, identifierServiceOpenIDConnect, fmt.Sprintf("The service to add the identifier for, valid values are: %s", strings.Join(validIdentifierServices, ", ")))
|
||||
cmd.Flags().String(cmdFlagNameSector, "", "The sector identifier to use (should usually be blank)")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@ -257,9 +257,9 @@ func newStorageUserWebAuthnDeleteCmd() (cmd *cobra.Command) {
|
|||
DisableAutoGenTag: true,
|
||||
}
|
||||
|
||||
cmd.Flags().Bool("all", false, "delete all of the users webauthn devices")
|
||||
cmd.Flags().String("description", "", "delete a users webauthn device by description")
|
||||
cmd.Flags().String("kid", "", "delete a users webauthn device by key id")
|
||||
cmd.Flags().Bool(cmdFlagNameAll, false, "delete all of the users webauthn devices")
|
||||
cmd.Flags().String(cmdFlagNameDescription, "", "delete a users webauthn device by description")
|
||||
cmd.Flags().String(cmdFlagNameKeyID, "", "delete a users webauthn device by key id")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@ -295,14 +295,14 @@ func newStorageUserTOTPGenerateCmd() (cmd *cobra.Command) {
|
|||
DisableAutoGenTag: true,
|
||||
}
|
||||
|
||||
cmd.Flags().String("secret", "", "Optionally set the TOTP shared secret as base32 encoded bytes (no padding), it's recommended to not set this option unless you're restoring an TOTP config")
|
||||
cmd.Flags().Uint("secret-size", schema.TOTPSecretSizeDefault, "set the TOTP secret size")
|
||||
cmd.Flags().Uint("period", 30, "set the TOTP period")
|
||||
cmd.Flags().Uint("digits", 6, "set the TOTP digits")
|
||||
cmd.Flags().String("algorithm", "SHA1", "set the TOTP algorithm")
|
||||
cmd.Flags().String("issuer", "Authelia", "set the TOTP issuer")
|
||||
cmd.Flags().BoolP("force", "f", false, "forces the TOTP configuration to be generated regardless if it exists or not")
|
||||
cmd.Flags().StringP("path", "p", "", "path to a file to create a PNG file with the QR code (optional)")
|
||||
cmd.Flags().String(cmdFlagNameSecret, "", "set the shared secret as base32 encoded bytes (no padding), it's recommended that you do not use this option unless you're restoring a configuration")
|
||||
cmd.Flags().Uint(cmdFlagNameSecretSize, schema.TOTPSecretSizeDefault, "set the secret size")
|
||||
cmd.Flags().Uint(cmdFlagNamePeriod, 30, "set the period between rotations")
|
||||
cmd.Flags().Uint(cmdFlagNameDigits, 6, "set the number of digits")
|
||||
cmd.Flags().String(cmdFlagNameAlgorithm, "SHA1", "set the algorithm to either SHA1 (supported by most applications), SHA256, or SHA512")
|
||||
cmd.Flags().String(cmdFlagNameIssuer, "Authelia", "set the issuer description")
|
||||
cmd.Flags().BoolP(cmdFlagNameForce, "f", false, "forces the configuration to be generated regardless if it exists or not")
|
||||
cmd.Flags().StringP(cmdFlagNamePath, "p", "", "path to a file to create a PNG file with the QR code (optional)")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ func newStorageUserTOTPExportCmd() (cmd *cobra.Command) {
|
|||
DisableAutoGenTag: true,
|
||||
}
|
||||
|
||||
cmd.Flags().String("format", storageTOTPExportFormatURI, fmt.Sprintf("sets the output format, valid values are: %s", strings.Join(validStorageTOTPExportFormats, ", ")))
|
||||
cmd.Flags().String(cmdFlagNameFormat, storageTOTPExportFormatURI, fmt.Sprintf("sets the output format, valid values are: %s", strings.Join(validStorageTOTPExportFormats, ", ")))
|
||||
cmd.Flags().String("dir", "", "used with the png output format to specify which new directory to save the files in")
|
||||
|
||||
return cmd
|
||||
|
@ -431,7 +431,7 @@ func newStorageMigrateUpCmd() (cmd *cobra.Command) {
|
|||
DisableAutoGenTag: true,
|
||||
}
|
||||
|
||||
cmd.Flags().IntP("target", "t", 0, "sets the version to migrate to, by default this is the latest version")
|
||||
cmd.Flags().IntP(cmdFlagNameTarget, "t", 0, "sets the version to migrate to, by default this is the latest version")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@ -448,9 +448,8 @@ func newStorageMigrateDownCmd() (cmd *cobra.Command) {
|
|||
DisableAutoGenTag: true,
|
||||
}
|
||||
|
||||
cmd.Flags().IntP("target", "t", 0, "sets the version to migrate to")
|
||||
cmd.Flags().Bool("pre1", false, "sets pre1 as the version to migrate to")
|
||||
cmd.Flags().Bool("destroy-data", false, "confirms you want to destroy data with this migration")
|
||||
cmd.Flags().IntP(cmdFlagNameTarget, "t", 0, "sets the version to migrate to")
|
||||
cmd.Flags().Bool(cmdFlagNameDestroyData, false, "confirms you want to destroy data with this migration")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"image/png"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
@ -48,31 +49,32 @@ func storagePersistentPreRunE(cmd *cobra.Command, _ []string) (err error) {
|
|||
}
|
||||
|
||||
mapping := map[string]string{
|
||||
"encryption-key": "storage.encryption_key",
|
||||
"sqlite.path": "storage.local.path",
|
||||
cmdFlagNameEncryptionKey: "storage.encryption_key",
|
||||
|
||||
"mysql.host": "storage.mysql.host",
|
||||
"mysql.port": "storage.mysql.port",
|
||||
"mysql.database": "storage.mysql.database",
|
||||
"mysql.username": "storage.mysql.username",
|
||||
"mysql.password": "storage.mysql.password",
|
||||
cmdFlagNameSQLite3Path: "storage.local.path",
|
||||
|
||||
"postgres.host": "storage.postgres.host",
|
||||
"postgres.port": "storage.postgres.port",
|
||||
"postgres.database": "storage.postgres.database",
|
||||
"postgres.schema": "storage.postgres.schema",
|
||||
"postgres.username": "storage.postgres.username",
|
||||
"postgres.password": "storage.postgres.password",
|
||||
cmdFlagNameMySQLHost: "storage.mysql.host",
|
||||
cmdFlagNameMySQLPort: "storage.mysql.port",
|
||||
cmdFlagNameMySQLDatabase: "storage.mysql.database",
|
||||
cmdFlagNameMySQLUsername: "storage.mysql.username",
|
||||
cmdFlagNameMySQLPassword: "storage.mysql.password",
|
||||
|
||||
cmdFlagNamePostgreSQLHost: "storage.postgres.host",
|
||||
cmdFlagNamePostgreSQLPort: "storage.postgres.port",
|
||||
cmdFlagNamePostgreSQLDatabase: "storage.postgres.database",
|
||||
cmdFlagNamePostgreSQLSchema: "storage.postgres.schema",
|
||||
cmdFlagNamePostgreSQLUsername: "storage.postgres.username",
|
||||
cmdFlagNamePostgreSQLPassword: "storage.postgres.password",
|
||||
"postgres.ssl.mode": "storage.postgres.ssl.mode",
|
||||
"postgres.ssl.root_certificate": "storage.postgres.ssl.root_certificate",
|
||||
"postgres.ssl.certificate": "storage.postgres.ssl.certificate",
|
||||
"postgres.ssl.key": "storage.postgres.ssl.key",
|
||||
|
||||
"period": "totp.period",
|
||||
"digits": "totp.digits",
|
||||
"algorithm": "totp.algorithm",
|
||||
"issuer": "totp.issuer",
|
||||
"secret-size": "totp.secret_size",
|
||||
cmdFlagNamePeriod: "totp.period",
|
||||
cmdFlagNameDigits: "totp.digits",
|
||||
cmdFlagNameAlgorithm: "totp.algorithm",
|
||||
cmdFlagNameIssuer: "totp.issuer",
|
||||
cmdFlagNameSecretSize: "totp.secret_size",
|
||||
}
|
||||
|
||||
sources = append(sources, configuration.NewEnvironmentSource(configuration.DefaultEnvPrefix, configuration.DefaultEnvDelimiter))
|
||||
|
@ -128,6 +130,7 @@ func storageSchemaEncryptionCheckRunE(cmd *cobra.Command, args []string) (err er
|
|||
var (
|
||||
provider storage.Provider
|
||||
verbose bool
|
||||
result storage.EncryptionValidationResult
|
||||
|
||||
ctx = context.Background()
|
||||
)
|
||||
|
@ -138,21 +141,43 @@ func storageSchemaEncryptionCheckRunE(cmd *cobra.Command, args []string) (err er
|
|||
_ = provider.Close()
|
||||
}()
|
||||
|
||||
if verbose, err = cmd.Flags().GetBool("verbose"); err != nil {
|
||||
if verbose, err = cmd.Flags().GetBool(cmdFlagNameVerbose); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = provider.SchemaEncryptionCheckKey(ctx, verbose); err != nil {
|
||||
if result, err = provider.SchemaEncryptionCheckKey(ctx, verbose); err != nil {
|
||||
switch {
|
||||
case errors.Is(err, storage.ErrSchemaEncryptionVersionUnsupported):
|
||||
fmt.Printf("Could not check encryption key for validity. The schema version doesn't support encryption.\n")
|
||||
case errors.Is(err, storage.ErrSchemaEncryptionInvalidKey):
|
||||
fmt.Printf("Encryption key validation: failed.\n\nError: %v.\n", err)
|
||||
fmt.Printf("Storage Encryption Key Validation: FAILURE\n\n\tCause: The schema version doesn't support encryption.\n")
|
||||
default:
|
||||
fmt.Printf("Could not check encryption key for validity.\n\nError: %v.\n", err)
|
||||
fmt.Printf("Storage Encryption Key Validation: UNKNOWN\n\n\tCause: %v.\n", err)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Encryption key validation: success.")
|
||||
if result.Success() {
|
||||
fmt.Println("Storage Encryption Key Validation: SUCCESS")
|
||||
} else {
|
||||
fmt.Printf("Storage Encryption Key Validation: FAILURE\n\n\tCause: %v.\n", storage.ErrSchemaEncryptionInvalidKey)
|
||||
}
|
||||
|
||||
if verbose {
|
||||
fmt.Printf("\nTables:")
|
||||
|
||||
tables := make([]string, 0, len(result.Tables))
|
||||
|
||||
for name := range result.Tables {
|
||||
tables = append(tables, name)
|
||||
}
|
||||
|
||||
sort.Strings(tables)
|
||||
|
||||
for _, name := range tables {
|
||||
table := result.Tables[name]
|
||||
|
||||
fmt.Printf("\n\n\tTable (%s): %s\n\t\tInvalid Rows: %d\n\t\tTotal Rows: %d", name, table.ResultDescriptor(), table.Invalid, table.Total)
|
||||
}
|
||||
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -185,13 +210,22 @@ func storageSchemaEncryptionChangeKeyRunE(cmd *cobra.Command, args []string) (er
|
|||
return errors.New("schema version must be at least version 1 to change the encryption key")
|
||||
}
|
||||
|
||||
key, err = cmd.Flags().GetString("new-encryption-key")
|
||||
useFlag := cmd.Flags().Changed(cmdFlagNameNewEncryptionKey)
|
||||
if useFlag {
|
||||
if key, err = cmd.Flags().GetString(cmdFlagNameNewEncryptionKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !useFlag || key == "" {
|
||||
if key, err = termReadPasswordStrWithPrompt("Enter New Storage Encryption Key: ", cmdFlagNameNewEncryptionKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case key == "":
|
||||
return errors.New("you must set the --new-encryption-key flag")
|
||||
return errors.New("the new encryption key must not be blank")
|
||||
case len(key) < 20:
|
||||
return errors.New("the new encryption key must be at least 20 characters")
|
||||
}
|
||||
|
@ -341,24 +375,24 @@ func storageWebAuthnDeleteGetAndValidateConfig(cmd *cobra.Command, args []string
|
|||
|
||||
flags := 0
|
||||
|
||||
if cmd.Flags().Changed("all") {
|
||||
if all, err = cmd.Flags().GetBool("all"); err != nil {
|
||||
if cmd.Flags().Changed(cmdFlagNameAll) {
|
||||
if all, err = cmd.Flags().GetBool(cmdFlagNameAll); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
flags++
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("description") {
|
||||
if description, err = cmd.Flags().GetString("description"); err != nil {
|
||||
if cmd.Flags().Changed(cmdFlagNameDescription) {
|
||||
if description, err = cmd.Flags().GetString(cmdFlagNameDescription); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
flags++
|
||||
}
|
||||
|
||||
if byKID = cmd.Flags().Changed("kid"); byKID {
|
||||
if kid, err = cmd.Flags().GetString("kid"); err != nil {
|
||||
if byKID = cmd.Flags().Changed(cmdFlagNameKeyID); byKID {
|
||||
if kid, err = cmd.Flags().GetString(cmdFlagNameKeyID); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -574,7 +608,7 @@ func storageTOTPExportRunE(cmd *cobra.Command, args []string) (err error) {
|
|||
}
|
||||
|
||||
func storageTOTPExportGetConfigFromFlags(cmd *cobra.Command) (format, dir string, err error) {
|
||||
if format, err = cmd.Flags().GetString("format"); err != nil {
|
||||
if format, err = cmd.Flags().GetString(cmdFlagNameFormat); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
|
@ -694,7 +728,6 @@ func newStorageMigrationRunE(up bool) func(cmd *cobra.Command, args []string) (e
|
|||
var (
|
||||
provider storage.Provider
|
||||
target int
|
||||
pre1 bool
|
||||
|
||||
ctx = context.Background()
|
||||
)
|
||||
|
@ -705,45 +738,36 @@ func newStorageMigrationRunE(up bool) func(cmd *cobra.Command, args []string) (e
|
|||
_ = provider.Close()
|
||||
}()
|
||||
|
||||
if target, err = cmd.Flags().GetInt("target"); err != nil {
|
||||
if target, err = cmd.Flags().GetInt(cmdFlagNameTarget); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case up:
|
||||
switch cmd.Flags().Changed("target") {
|
||||
switch cmd.Flags().Changed(cmdFlagNameTarget) {
|
||||
case true:
|
||||
return provider.SchemaMigrate(ctx, true, target)
|
||||
default:
|
||||
return provider.SchemaMigrate(ctx, true, storage.SchemaLatest)
|
||||
}
|
||||
default:
|
||||
if pre1, err = cmd.Flags().GetBool("pre1"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !cmd.Flags().Changed("target") && !pre1 {
|
||||
return errors.New("must set target")
|
||||
if !cmd.Flags().Changed(cmdFlagNameTarget) {
|
||||
return errors.New("you must set a target version")
|
||||
}
|
||||
|
||||
if err = storageMigrateDownConfirmDestroy(cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case pre1:
|
||||
return provider.SchemaMigrate(ctx, false, -1)
|
||||
default:
|
||||
return provider.SchemaMigrate(ctx, false, target)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func storageMigrateDownConfirmDestroy(cmd *cobra.Command) (err error) {
|
||||
var destroy bool
|
||||
|
||||
if destroy, err = cmd.Flags().GetBool("destroy-data"); err != nil {
|
||||
if destroy, err = cmd.Flags().GetBool(cmdFlagNameDestroyData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -803,15 +827,21 @@ func storageSchemaInfoRunE(_ *cobra.Command, _ []string) (err error) {
|
|||
upgradeStr = "no"
|
||||
}
|
||||
|
||||
var encryption string
|
||||
var (
|
||||
encryption string
|
||||
result storage.EncryptionValidationResult
|
||||
)
|
||||
|
||||
if err = provider.SchemaEncryptionCheckKey(ctx, false); err != nil {
|
||||
switch result, err = provider.SchemaEncryptionCheckKey(ctx, false); {
|
||||
case err != nil:
|
||||
if errors.Is(err, storage.ErrSchemaEncryptionVersionUnsupported) {
|
||||
encryption = "unsupported (schema version)"
|
||||
} else {
|
||||
encryption = "invalid"
|
||||
encryption = invalid
|
||||
}
|
||||
} else {
|
||||
case !result.Success():
|
||||
encryption = invalid
|
||||
default:
|
||||
encryption = "valid"
|
||||
}
|
||||
|
||||
|
@ -847,7 +877,7 @@ func storageUserIdentifiersExport(cmd *cobra.Command, _ []string) (err error) {
|
|||
file string
|
||||
)
|
||||
|
||||
if file, err = cmd.Flags().GetString("file"); err != nil {
|
||||
if file, err = cmd.Flags().GetString(cmdFlagNameFile); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -899,7 +929,7 @@ func storageUserIdentifiersImport(cmd *cobra.Command, _ []string) (err error) {
|
|||
stat os.FileInfo
|
||||
)
|
||||
|
||||
if file, err = cmd.Flags().GetString("file"); err != nil {
|
||||
if file, err = cmd.Flags().GetString(cmdFlagNameFile); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -967,15 +997,15 @@ func storageUserIdentifiersGenerate(cmd *cobra.Command, _ []string) (err error)
|
|||
return fmt.Errorf("can't load the existing identifiers: %w", err)
|
||||
}
|
||||
|
||||
if users, err = cmd.Flags().GetStringSlice("users"); err != nil {
|
||||
if users, err = cmd.Flags().GetStringSlice(cmdFlagNameUsers); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if services, err = cmd.Flags().GetStringSlice("services"); err != nil {
|
||||
if services, err = cmd.Flags().GetStringSlice(cmdFlagNameServices); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sectors, err = cmd.Flags().GetStringSlice("sectors"); err != nil {
|
||||
if sectors, err = cmd.Flags().GetStringSlice(cmdFlagNameSectors); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1036,7 +1066,7 @@ func storageUserIdentifiersAdd(cmd *cobra.Command, args []string) (err error) {
|
|||
service, sector string
|
||||
)
|
||||
|
||||
if service, err = cmd.Flags().GetString("service"); err != nil {
|
||||
if service, err = cmd.Flags().GetString(cmdFlagNameService); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1046,7 +1076,7 @@ func storageUserIdentifiersAdd(cmd *cobra.Command, args []string) (err error) {
|
|||
return fmt.Errorf("the service name '%s' is invalid, the valid values are: '%s'", service, strings.Join(validIdentifierServices, "', '"))
|
||||
}
|
||||
|
||||
if sector, err = cmd.Flags().GetString("sector"); err != nil {
|
||||
if sector, err = cmd.Flags().GetString(cmdFlagNameSector); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1056,10 +1086,10 @@ func storageUserIdentifiersAdd(cmd *cobra.Command, args []string) (err error) {
|
|||
SectorID: sector,
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("identifier") {
|
||||
if cmd.Flags().Changed(cmdFlagNameIdentifier) {
|
||||
var identifierStr string
|
||||
|
||||
if identifierStr, err = cmd.Flags().GetString("identifier"); err != nil {
|
||||
if identifierStr, err = cmd.Flags().GetString(cmdFlagNameIdentifier); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -3,8 +3,10 @@ package commands
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/term"
|
||||
|
||||
"github.com/authelia/authelia/v4/internal/utils"
|
||||
)
|
||||
|
@ -99,3 +101,38 @@ func flagsGetRandomCharacters(flags *pflag.FlagSet, flagNameLength, flagNameChar
|
|||
|
||||
return utils.RandomString(n, charset, true), nil
|
||||
}
|
||||
|
||||
func termReadPasswordStrWithPrompt(prompt, flag string) (data string, err error) {
|
||||
var d []byte
|
||||
|
||||
if d, err = termReadPasswordWithPrompt(prompt, flag); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(d), nil
|
||||
}
|
||||
|
||||
func termReadPasswordWithPrompt(prompt, flag string) (data []byte, err error) {
|
||||
fd := int(syscall.Stdin) //nolint:unconvert,nolintlint
|
||||
|
||||
if isTerm := term.IsTerminal(fd); !isTerm {
|
||||
switch len(flag) {
|
||||
case 0:
|
||||
return nil, ErrStdinIsNotTerminal
|
||||
case 1:
|
||||
return nil, fmt.Errorf("you must either use an interactive terminal or use the -%s flag", flag)
|
||||
default:
|
||||
return nil, fmt.Errorf("you must either use an interactive terminal or use the --%s flag", flag)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Print(prompt)
|
||||
|
||||
if data, err = term.ReadPassword(fd); err != nil {
|
||||
return nil, fmt.Errorf("failed to read the input from the terminal: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("")
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
|
|
@ -863,7 +863,8 @@ regulation:
|
|||
## The available providers are: `local`, `mysql`, `postgres`. You must use one and only one of these providers.
|
||||
# storage:
|
||||
## The encryption key that is used to encrypt sensitive information in the database. Must be a string with a minimum
|
||||
## length of 20. Please see the docs if you configure this with an undesirable key and need to change it.
|
||||
## length of 20. Please see the docs if you configure this with an undesirable key and need to change it, you MUST use
|
||||
## the CLI to change this in the database if you want to change it from a previously configured value.
|
||||
# encryption_key: you_must_generate_a_random_string_of_more_than_twenty_chars_and_configure_this
|
||||
|
||||
##
|
||||
|
|
|
@ -90,9 +90,7 @@ func ValidateRules(config *schema.Configuration, validator *schema.StructValidat
|
|||
for i, rule := range config.AccessControl.Rules {
|
||||
rulePosition := i + 1
|
||||
|
||||
if len(rule.Domains)+len(rule.DomainsRegex) == 0 {
|
||||
validator.Push(fmt.Errorf(errFmtAccessControlRuleNoDomains, ruleDescriptor(rulePosition, rule)))
|
||||
}
|
||||
validateDomains(rulePosition, rule, validator)
|
||||
|
||||
if !IsPolicyValid(rule.Policy) {
|
||||
validator.Push(fmt.Errorf(errFmtAccessControlRuleInvalidPolicy, ruleDescriptor(rulePosition, rule), rule.Policy))
|
||||
|
@ -125,6 +123,18 @@ func validateBypass(rulePosition int, rule schema.ACLRule, validator *schema.Str
|
|||
}
|
||||
}
|
||||
|
||||
func validateDomains(rulePosition int, rule schema.ACLRule, validator *schema.StructValidator) {
|
||||
if len(rule.Domains)+len(rule.DomainsRegex) == 0 {
|
||||
validator.Push(fmt.Errorf(errFmtAccessControlRuleNoDomains, ruleDescriptor(rulePosition, rule)))
|
||||
}
|
||||
|
||||
for i, domain := range rule.Domains {
|
||||
if len(domain) > 1 && domain[0] == '*' && domain[1] != '.' {
|
||||
validator.PushWarning(fmt.Errorf("access control: rule #%d: domain #%d: domain '%s' is ineffective and should probably be '%s' instead", rulePosition, i+1, domain, fmt.Sprintf("*.%s", domain[1:])))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func validateNetworks(rulePosition int, rule schema.ACLRule, config schema.AccessControlConfiguration, validator *schema.StructValidator) {
|
||||
for _, network := range rule.Networks {
|
||||
if !IsNetworkValid(network) {
|
||||
|
|
|
@ -88,6 +88,22 @@ func (suite *AccessControl) TestShouldRaiseErrorInvalidNetworkGroupNetwork() {
|
|||
suite.Assert().EqualError(suite.validator.Errors()[0], "access control: networks: network group 'internal' is invalid: the network 'abc.def.ghi.jkl' is not a valid IP or CIDR notation")
|
||||
}
|
||||
|
||||
func (suite *AccessControl) TestShouldRaiseWarningOnBadDomain() {
|
||||
suite.config.AccessControl.Rules = []schema.ACLRule{
|
||||
{
|
||||
Domains: []string{"*example.com"},
|
||||
Policy: "one_factor",
|
||||
},
|
||||
}
|
||||
|
||||
ValidateRules(suite.config, suite.validator)
|
||||
|
||||
suite.Assert().Len(suite.validator.Warnings(), 1)
|
||||
suite.Require().Len(suite.validator.Errors(), 0)
|
||||
|
||||
suite.Assert().EqualError(suite.validator.Warnings()[0], "access control: rule #1: domain #1: domain '*example.com' is ineffective and should probably be '*.example.com' instead")
|
||||
}
|
||||
|
||||
func (suite *AccessControl) TestShouldRaiseErrorWithNoRulesDefined() {
|
||||
suite.config.AccessControl.Rules = []schema.ACLRule{}
|
||||
|
||||
|
|
|
@ -161,15 +161,13 @@ const (
|
|||
errFmtOIDCClientInvalidSecret = "identity_providers: oidc: client '%s': option 'secret' is required"
|
||||
errFmtOIDCClientPublicInvalidSecret = "identity_providers: oidc: client '%s': option 'secret' is " +
|
||||
"required to be empty when option 'public' is true"
|
||||
errFmtOIDCClientRedirectURI = "identity_providers: oidc: client '%s': option 'redirect_uris' has an " +
|
||||
"invalid value: redirect uri '%s' must have a scheme of 'http' or 'https' but '%s' is configured"
|
||||
errFmtOIDCClientRedirectURICantBeParsed = "identity_providers: oidc: client '%s': option 'redirect_uris' has an " +
|
||||
"invalid value: redirect uri '%s' could not be parsed: %v"
|
||||
errFmtOIDCClientRedirectURIPublic = "identity_providers: oidc: client '%s': option 'redirect_uris' has the " +
|
||||
"redirect uri '%s' when option 'public' is false but this is invalid as this uri is not valid " +
|
||||
"for the openid connect confidential client type"
|
||||
errFmtOIDCClientRedirectURIAbsolute = "identity_providers: oidc: client '%s': option 'redirect_uris' has an " +
|
||||
"invalid value: redirect uri '%s' must have the scheme 'http' or 'https' but it has no scheme"
|
||||
"invalid value: redirect uri '%s' must have the scheme but it is absent"
|
||||
errFmtOIDCClientInvalidPolicy = "identity_providers: oidc: client '%s': option 'policy' must be 'one_factor' " +
|
||||
"or 'two_factor' but it is configured as '%s'"
|
||||
errFmtOIDCClientInvalidConsentMode = "identity_providers: oidc: client '%s': consent: option 'mode' must be one of " +
|
||||
|
|
|
@ -12,11 +12,11 @@ import (
|
|||
)
|
||||
|
||||
// ValidateIdentityProviders validates and updates the IdentityProviders configuration.
|
||||
func ValidateIdentityProviders(config *schema.IdentityProvidersConfiguration, validator *schema.StructValidator) {
|
||||
validateOIDC(config.OIDC, validator)
|
||||
func ValidateIdentityProviders(config *schema.IdentityProvidersConfiguration, val *schema.StructValidator) {
|
||||
validateOIDC(config.OIDC, val)
|
||||
}
|
||||
|
||||
func validateOIDC(config *schema.OpenIDConnectConfiguration, validator *schema.StructValidator) {
|
||||
func validateOIDC(config *schema.OpenIDConnectConfiguration, val *schema.StructValidator) {
|
||||
if config == nil {
|
||||
return
|
||||
}
|
||||
|
@ -25,37 +25,37 @@ func validateOIDC(config *schema.OpenIDConnectConfiguration, validator *schema.S
|
|||
|
||||
switch {
|
||||
case config.IssuerPrivateKey == nil:
|
||||
validator.Push(fmt.Errorf(errFmtOIDCNoPrivateKey))
|
||||
val.Push(fmt.Errorf(errFmtOIDCNoPrivateKey))
|
||||
default:
|
||||
if config.IssuerCertificateChain.HasCertificates() {
|
||||
if !config.IssuerCertificateChain.EqualKey(config.IssuerPrivateKey) {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCCertificateMismatch))
|
||||
val.Push(fmt.Errorf(errFmtOIDCCertificateMismatch))
|
||||
}
|
||||
|
||||
if err := config.IssuerCertificateChain.Validate(); err != nil {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCCertificateChain, err))
|
||||
val.Push(fmt.Errorf(errFmtOIDCCertificateChain, err))
|
||||
}
|
||||
}
|
||||
|
||||
if config.IssuerPrivateKey.Size()*8 < 2048 {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCInvalidPrivateKeyBitSize, 2048, config.IssuerPrivateKey.Size()*8))
|
||||
val.Push(fmt.Errorf(errFmtOIDCInvalidPrivateKeyBitSize, 2048, config.IssuerPrivateKey.Size()*8))
|
||||
}
|
||||
}
|
||||
|
||||
if config.MinimumParameterEntropy != 0 && config.MinimumParameterEntropy < 8 {
|
||||
validator.PushWarning(fmt.Errorf(errFmtOIDCServerInsecureParameterEntropy, config.MinimumParameterEntropy))
|
||||
val.PushWarning(fmt.Errorf(errFmtOIDCServerInsecureParameterEntropy, config.MinimumParameterEntropy))
|
||||
}
|
||||
|
||||
if config.EnforcePKCE != "never" && config.EnforcePKCE != "public_clients_only" && config.EnforcePKCE != "always" {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCEnforcePKCEInvalidValue, config.EnforcePKCE))
|
||||
val.Push(fmt.Errorf(errFmtOIDCEnforcePKCEInvalidValue, config.EnforcePKCE))
|
||||
}
|
||||
|
||||
validateOIDCOptionsCORS(config, validator)
|
||||
validateOIDCOptionsCORS(config, val)
|
||||
|
||||
if len(config.Clients) == 0 {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCNoClientsConfigured))
|
||||
val.Push(fmt.Errorf(errFmtOIDCNoClientsConfigured))
|
||||
} else {
|
||||
validateOIDCClients(config, validator)
|
||||
validateOIDCClients(config, val)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -91,26 +91,26 @@ func validateOIDCOptionsCORS(config *schema.OpenIDConnectConfiguration, validato
|
|||
validateOIDCOptionsCORSEndpoints(config, validator)
|
||||
}
|
||||
|
||||
func validateOIDCOptionsCORSAllowedOrigins(config *schema.OpenIDConnectConfiguration, validator *schema.StructValidator) {
|
||||
func validateOIDCOptionsCORSAllowedOrigins(config *schema.OpenIDConnectConfiguration, val *schema.StructValidator) {
|
||||
for _, origin := range config.CORS.AllowedOrigins {
|
||||
if origin.String() == "*" {
|
||||
if len(config.CORS.AllowedOrigins) != 1 {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCCORSInvalidOriginWildcard))
|
||||
val.Push(fmt.Errorf(errFmtOIDCCORSInvalidOriginWildcard))
|
||||
}
|
||||
|
||||
if config.CORS.AllowedOriginsFromClientRedirectURIs {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCCORSInvalidOriginWildcardWithClients))
|
||||
val.Push(fmt.Errorf(errFmtOIDCCORSInvalidOriginWildcardWithClients))
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if origin.Path != "" {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCCORSInvalidOrigin, origin.String(), "path"))
|
||||
val.Push(fmt.Errorf(errFmtOIDCCORSInvalidOrigin, origin.String(), "path"))
|
||||
}
|
||||
|
||||
if origin.RawQuery != "" {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCCORSInvalidOrigin, origin.String(), "query string"))
|
||||
val.Push(fmt.Errorf(errFmtOIDCCORSInvalidOrigin, origin.String(), "query string"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -132,16 +132,15 @@ func validateOIDCOptionsCORSAllowedOriginsFromClientRedirectURIs(config *schema.
|
|||
}
|
||||
}
|
||||
|
||||
func validateOIDCOptionsCORSEndpoints(config *schema.OpenIDConnectConfiguration, validator *schema.StructValidator) {
|
||||
func validateOIDCOptionsCORSEndpoints(config *schema.OpenIDConnectConfiguration, val *schema.StructValidator) {
|
||||
for _, endpoint := range config.CORS.Endpoints {
|
||||
if !utils.IsStringInSlice(endpoint, validOIDCCORSEndpoints) {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCCORSInvalidEndpoint, endpoint, strings.Join(validOIDCCORSEndpoints, "', '")))
|
||||
val.Push(fmt.Errorf(errFmtOIDCCORSInvalidEndpoint, endpoint, strings.Join(validOIDCCORSEndpoints, "', '")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:gocyclo // TODO: Refactor.
|
||||
func validateOIDCClients(config *schema.OpenIDConnectConfiguration, validator *schema.StructValidator) {
|
||||
func validateOIDCClients(config *schema.OpenIDConnectConfiguration, val *schema.StructValidator) {
|
||||
invalidID, duplicateIDs := false, false
|
||||
|
||||
var ids []string
|
||||
|
@ -162,181 +161,180 @@ func validateOIDCClients(config *schema.OpenIDConnectConfiguration, validator *s
|
|||
|
||||
if client.Public {
|
||||
if client.Secret != nil {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientPublicInvalidSecret, client.ID))
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientPublicInvalidSecret, client.ID))
|
||||
}
|
||||
} else {
|
||||
if client.Secret == nil {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientInvalidSecret, client.ID))
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientInvalidSecret, client.ID))
|
||||
}
|
||||
}
|
||||
|
||||
if client.Policy == "" {
|
||||
config.Clients[c].Policy = schema.DefaultOpenIDConnectClientConfiguration.Policy
|
||||
} else if client.Policy != policyOneFactor && client.Policy != policyTwoFactor {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientInvalidPolicy, client.ID, client.Policy))
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientInvalidPolicy, client.ID, client.Policy))
|
||||
}
|
||||
|
||||
switch {
|
||||
case utils.IsStringInSlice(client.ConsentMode, []string{"", "auto"}):
|
||||
if client.ConsentPreConfiguredDuration != nil {
|
||||
config.Clients[c].ConsentMode = oidc.ClientConsentModePreConfigured.String()
|
||||
} else {
|
||||
config.Clients[c].ConsentMode = oidc.ClientConsentModeExplicit.String()
|
||||
}
|
||||
case utils.IsStringInSlice(client.ConsentMode, validOIDCClientConsentModes):
|
||||
break
|
||||
default:
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientInvalidConsentMode, client.ID, strings.Join(append(validOIDCClientConsentModes, "auto"), "', '"), client.ConsentMode))
|
||||
}
|
||||
|
||||
if client.ConsentPreConfiguredDuration == nil {
|
||||
config.Clients[c].ConsentPreConfiguredDuration = schema.DefaultOpenIDConnectClientConfiguration.ConsentPreConfiguredDuration
|
||||
}
|
||||
|
||||
validateOIDCClientSectorIdentifier(client, validator)
|
||||
validateOIDCClientScopes(c, config, validator)
|
||||
validateOIDCClientGrantTypes(c, config, validator)
|
||||
validateOIDCClientResponseTypes(c, config, validator)
|
||||
validateOIDCClientResponseModes(c, config, validator)
|
||||
validateOIDDClientUserinfoAlgorithm(c, config, validator)
|
||||
validateOIDCClientRedirectURIs(client, validator)
|
||||
validateOIDCClientConsentMode(c, config, val)
|
||||
validateOIDCClientSectorIdentifier(client, val)
|
||||
validateOIDCClientScopes(c, config, val)
|
||||
validateOIDCClientGrantTypes(c, config, val)
|
||||
validateOIDCClientResponseTypes(c, config, val)
|
||||
validateOIDCClientResponseModes(c, config, val)
|
||||
validateOIDDClientUserinfoAlgorithm(c, config, val)
|
||||
validateOIDCClientRedirectURIs(client, val)
|
||||
}
|
||||
|
||||
if invalidID {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientsWithEmptyID))
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientsWithEmptyID))
|
||||
}
|
||||
|
||||
if duplicateIDs {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientsDuplicateID))
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientsDuplicateID))
|
||||
}
|
||||
}
|
||||
|
||||
func validateOIDCClientSectorIdentifier(client schema.OpenIDConnectClientConfiguration, validator *schema.StructValidator) {
|
||||
func validateOIDCClientSectorIdentifier(client schema.OpenIDConnectClientConfiguration, val *schema.StructValidator) {
|
||||
if client.SectorIdentifier.String() != "" {
|
||||
if utils.IsURLHostComponent(client.SectorIdentifier) || utils.IsURLHostComponentWithPort(client.SectorIdentifier) {
|
||||
return
|
||||
}
|
||||
|
||||
if client.SectorIdentifier.Scheme != "" {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientInvalidSectorIdentifier, client.ID, client.SectorIdentifier.String(), client.SectorIdentifier.Host, "scheme", client.SectorIdentifier.Scheme))
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientInvalidSectorIdentifier, client.ID, client.SectorIdentifier.String(), client.SectorIdentifier.Host, "scheme", client.SectorIdentifier.Scheme))
|
||||
|
||||
if client.SectorIdentifier.Path != "" {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientInvalidSectorIdentifier, client.ID, client.SectorIdentifier.String(), client.SectorIdentifier.Host, "path", client.SectorIdentifier.Path))
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientInvalidSectorIdentifier, client.ID, client.SectorIdentifier.String(), client.SectorIdentifier.Host, "path", client.SectorIdentifier.Path))
|
||||
}
|
||||
|
||||
if client.SectorIdentifier.RawQuery != "" {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientInvalidSectorIdentifier, client.ID, client.SectorIdentifier.String(), client.SectorIdentifier.Host, "query", client.SectorIdentifier.RawQuery))
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientInvalidSectorIdentifier, client.ID, client.SectorIdentifier.String(), client.SectorIdentifier.Host, "query", client.SectorIdentifier.RawQuery))
|
||||
}
|
||||
|
||||
if client.SectorIdentifier.Fragment != "" {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientInvalidSectorIdentifier, client.ID, client.SectorIdentifier.String(), client.SectorIdentifier.Host, "fragment", client.SectorIdentifier.Fragment))
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientInvalidSectorIdentifier, client.ID, client.SectorIdentifier.String(), client.SectorIdentifier.Host, "fragment", client.SectorIdentifier.Fragment))
|
||||
}
|
||||
|
||||
if client.SectorIdentifier.User != nil {
|
||||
if client.SectorIdentifier.User.Username() != "" {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientInvalidSectorIdentifier, client.ID, client.SectorIdentifier.String(), client.SectorIdentifier.Host, "username", client.SectorIdentifier.User.Username()))
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientInvalidSectorIdentifier, client.ID, client.SectorIdentifier.String(), client.SectorIdentifier.Host, "username", client.SectorIdentifier.User.Username()))
|
||||
}
|
||||
|
||||
if _, set := client.SectorIdentifier.User.Password(); set {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientInvalidSectorIdentifierWithoutValue, client.ID, client.SectorIdentifier.String(), client.SectorIdentifier.Host, "password"))
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientInvalidSectorIdentifierWithoutValue, client.ID, client.SectorIdentifier.String(), client.SectorIdentifier.Host, "password"))
|
||||
}
|
||||
}
|
||||
} else if client.SectorIdentifier.Host == "" {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientInvalidSectorIdentifierHost, client.ID, client.SectorIdentifier.String()))
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientInvalidSectorIdentifierHost, client.ID, client.SectorIdentifier.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func validateOIDCClientScopes(c int, configuration *schema.OpenIDConnectConfiguration, validator *schema.StructValidator) {
|
||||
if len(configuration.Clients[c].Scopes) == 0 {
|
||||
configuration.Clients[c].Scopes = schema.DefaultOpenIDConnectClientConfiguration.Scopes
|
||||
func validateOIDCClientConsentMode(c int, config *schema.OpenIDConnectConfiguration, val *schema.StructValidator) {
|
||||
switch {
|
||||
case utils.IsStringInSlice(config.Clients[c].ConsentMode, []string{"", "auto"}):
|
||||
if config.Clients[c].ConsentPreConfiguredDuration != nil {
|
||||
config.Clients[c].ConsentMode = oidc.ClientConsentModePreConfigured.String()
|
||||
} else {
|
||||
config.Clients[c].ConsentMode = oidc.ClientConsentModeExplicit.String()
|
||||
}
|
||||
case utils.IsStringInSlice(config.Clients[c].ConsentMode, validOIDCClientConsentModes):
|
||||
break
|
||||
default:
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientInvalidConsentMode, config.Clients[c].ID, strings.Join(append(validOIDCClientConsentModes, "auto"), "', '"), config.Clients[c].ConsentMode))
|
||||
}
|
||||
|
||||
if config.Clients[c].ConsentMode == oidc.ClientConsentModePreConfigured.String() && config.Clients[c].ConsentPreConfiguredDuration == nil {
|
||||
config.Clients[c].ConsentPreConfiguredDuration = schema.DefaultOpenIDConnectClientConfiguration.ConsentPreConfiguredDuration
|
||||
}
|
||||
}
|
||||
|
||||
func validateOIDCClientScopes(c int, config *schema.OpenIDConnectConfiguration, val *schema.StructValidator) {
|
||||
if len(config.Clients[c].Scopes) == 0 {
|
||||
config.Clients[c].Scopes = schema.DefaultOpenIDConnectClientConfiguration.Scopes
|
||||
return
|
||||
}
|
||||
|
||||
if !utils.IsStringInSlice(oidc.ScopeOpenID, configuration.Clients[c].Scopes) {
|
||||
configuration.Clients[c].Scopes = append(configuration.Clients[c].Scopes, oidc.ScopeOpenID)
|
||||
if !utils.IsStringInSlice(oidc.ScopeOpenID, config.Clients[c].Scopes) {
|
||||
config.Clients[c].Scopes = append(config.Clients[c].Scopes, oidc.ScopeOpenID)
|
||||
}
|
||||
|
||||
for _, scope := range configuration.Clients[c].Scopes {
|
||||
for _, scope := range config.Clients[c].Scopes {
|
||||
if !utils.IsStringInSlice(scope, validOIDCScopes) {
|
||||
validator.Push(fmt.Errorf(
|
||||
val.Push(fmt.Errorf(
|
||||
errFmtOIDCClientInvalidEntry,
|
||||
configuration.Clients[c].ID, "scopes", strings.Join(validOIDCScopes, "', '"), scope))
|
||||
config.Clients[c].ID, "scopes", strings.Join(validOIDCScopes, "', '"), scope))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func validateOIDCClientGrantTypes(c int, configuration *schema.OpenIDConnectConfiguration, validator *schema.StructValidator) {
|
||||
if len(configuration.Clients[c].GrantTypes) == 0 {
|
||||
configuration.Clients[c].GrantTypes = schema.DefaultOpenIDConnectClientConfiguration.GrantTypes
|
||||
func validateOIDCClientGrantTypes(c int, config *schema.OpenIDConnectConfiguration, val *schema.StructValidator) {
|
||||
if len(config.Clients[c].GrantTypes) == 0 {
|
||||
config.Clients[c].GrantTypes = schema.DefaultOpenIDConnectClientConfiguration.GrantTypes
|
||||
return
|
||||
}
|
||||
|
||||
for _, grantType := range configuration.Clients[c].GrantTypes {
|
||||
for _, grantType := range config.Clients[c].GrantTypes {
|
||||
if !utils.IsStringInSlice(grantType, validOIDCGrantTypes) {
|
||||
validator.Push(fmt.Errorf(
|
||||
val.Push(fmt.Errorf(
|
||||
errFmtOIDCClientInvalidEntry,
|
||||
configuration.Clients[c].ID, "grant_types", strings.Join(validOIDCGrantTypes, "', '"), grantType))
|
||||
config.Clients[c].ID, "grant_types", strings.Join(validOIDCGrantTypes, "', '"), grantType))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func validateOIDCClientResponseTypes(c int, configuration *schema.OpenIDConnectConfiguration, _ *schema.StructValidator) {
|
||||
if len(configuration.Clients[c].ResponseTypes) == 0 {
|
||||
configuration.Clients[c].ResponseTypes = schema.DefaultOpenIDConnectClientConfiguration.ResponseTypes
|
||||
func validateOIDCClientResponseTypes(c int, config *schema.OpenIDConnectConfiguration, _ *schema.StructValidator) {
|
||||
if len(config.Clients[c].ResponseTypes) == 0 {
|
||||
config.Clients[c].ResponseTypes = schema.DefaultOpenIDConnectClientConfiguration.ResponseTypes
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func validateOIDCClientResponseModes(c int, configuration *schema.OpenIDConnectConfiguration, validator *schema.StructValidator) {
|
||||
if len(configuration.Clients[c].ResponseModes) == 0 {
|
||||
configuration.Clients[c].ResponseModes = schema.DefaultOpenIDConnectClientConfiguration.ResponseModes
|
||||
func validateOIDCClientResponseModes(c int, config *schema.OpenIDConnectConfiguration, validator *schema.StructValidator) {
|
||||
if len(config.Clients[c].ResponseModes) == 0 {
|
||||
config.Clients[c].ResponseModes = schema.DefaultOpenIDConnectClientConfiguration.ResponseModes
|
||||
return
|
||||
}
|
||||
|
||||
for _, responseMode := range configuration.Clients[c].ResponseModes {
|
||||
for _, responseMode := range config.Clients[c].ResponseModes {
|
||||
if !utils.IsStringInSlice(responseMode, validOIDCResponseModes) {
|
||||
validator.Push(fmt.Errorf(
|
||||
errFmtOIDCClientInvalidEntry,
|
||||
configuration.Clients[c].ID, "response_modes", strings.Join(validOIDCResponseModes, "', '"), responseMode))
|
||||
config.Clients[c].ID, "response_modes", strings.Join(validOIDCResponseModes, "', '"), responseMode))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func validateOIDDClientUserinfoAlgorithm(c int, configuration *schema.OpenIDConnectConfiguration, validator *schema.StructValidator) {
|
||||
if configuration.Clients[c].UserinfoSigningAlgorithm == "" {
|
||||
configuration.Clients[c].UserinfoSigningAlgorithm = schema.DefaultOpenIDConnectClientConfiguration.UserinfoSigningAlgorithm
|
||||
} else if !utils.IsStringInSlice(configuration.Clients[c].UserinfoSigningAlgorithm, validOIDCUserinfoAlgorithms) {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientInvalidUserinfoAlgorithm,
|
||||
configuration.Clients[c].ID, strings.Join(validOIDCUserinfoAlgorithms, ", "), configuration.Clients[c].UserinfoSigningAlgorithm))
|
||||
func validateOIDDClientUserinfoAlgorithm(c int, config *schema.OpenIDConnectConfiguration, val *schema.StructValidator) {
|
||||
if config.Clients[c].UserinfoSigningAlgorithm == "" {
|
||||
config.Clients[c].UserinfoSigningAlgorithm = schema.DefaultOpenIDConnectClientConfiguration.UserinfoSigningAlgorithm
|
||||
} else if !utils.IsStringInSlice(config.Clients[c].UserinfoSigningAlgorithm, validOIDCUserinfoAlgorithms) {
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientInvalidUserinfoAlgorithm,
|
||||
config.Clients[c].ID, strings.Join(validOIDCUserinfoAlgorithms, ", "), config.Clients[c].UserinfoSigningAlgorithm))
|
||||
}
|
||||
}
|
||||
|
||||
func validateOIDCClientRedirectURIs(client schema.OpenIDConnectClientConfiguration, validator *schema.StructValidator) {
|
||||
func validateOIDCClientRedirectURIs(client schema.OpenIDConnectClientConfiguration, val *schema.StructValidator) {
|
||||
for _, redirectURI := range client.RedirectURIs {
|
||||
if redirectURI == oauth2InstalledApp {
|
||||
if client.Public {
|
||||
continue
|
||||
}
|
||||
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientRedirectURIPublic, client.ID, oauth2InstalledApp))
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientRedirectURIPublic, client.ID, oauth2InstalledApp))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
parsedURL, err := url.Parse(redirectURI)
|
||||
if err != nil {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientRedirectURICantBeParsed, client.ID, redirectURI, err))
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientRedirectURICantBeParsed, client.ID, redirectURI, err))
|
||||
continue
|
||||
}
|
||||
|
||||
if !parsedURL.IsAbs() {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientRedirectURIAbsolute, client.ID, redirectURI))
|
||||
if !parsedURL.IsAbs() || (!client.Public && parsedURL.Scheme == "") {
|
||||
val.Push(fmt.Errorf(errFmtOIDCClientRedirectURIAbsolute, client.ID, redirectURI))
|
||||
return
|
||||
}
|
||||
|
||||
if !client.Public && parsedURL.Scheme != schemeHTTPS && parsedURL.Scheme != schemeHTTP {
|
||||
validator.Push(fmt.Errorf(errFmtOIDCClientRedirectURI, client.ID, redirectURI, parsedURL.Scheme))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -124,13 +125,12 @@ func TestShouldRaiseErrorWhenOIDCCORSOriginsHasInvalidValues(t *testing.T) {
|
|||
|
||||
ValidateIdentityProviders(config, validator)
|
||||
|
||||
require.Len(t, validator.Errors(), 6)
|
||||
require.Len(t, validator.Errors(), 5)
|
||||
assert.EqualError(t, validator.Errors()[0], "identity_providers: oidc: cors: option 'allowed_origins' contains an invalid value 'https://example.com/' as it has a path: origins must only be scheme, hostname, and an optional port")
|
||||
assert.EqualError(t, validator.Errors()[1], "identity_providers: oidc: cors: option 'allowed_origins' contains an invalid value 'https://site.example.com/subpath' as it has a path: origins must only be scheme, hostname, and an optional port")
|
||||
assert.EqualError(t, validator.Errors()[2], "identity_providers: oidc: cors: option 'allowed_origins' contains an invalid value 'https://site.example.com?example=true' as it has a query string: origins must only be scheme, hostname, and an optional port")
|
||||
assert.EqualError(t, validator.Errors()[3], "identity_providers: oidc: cors: option 'allowed_origins' contains the wildcard origin '*' with more than one origin but the wildcard origin must be defined by itself")
|
||||
assert.EqualError(t, validator.Errors()[4], "identity_providers: oidc: cors: option 'allowed_origins' contains the wildcard origin '*' cannot be specified with option 'allowed_origins_from_client_redirect_uris' enabled")
|
||||
assert.EqualError(t, validator.Errors()[5], "identity_providers: oidc: client 'myclient': option 'redirect_uris' has an invalid value: redirect uri 'file://a/file' must have a scheme of 'http' or 'https' but 'file' is configured")
|
||||
|
||||
require.Len(t, config.OIDC.CORS.AllowedOrigins, 6)
|
||||
assert.Equal(t, "*", config.OIDC.CORS.AllowedOrigins[3].String())
|
||||
|
@ -314,6 +314,23 @@ func TestShouldRaiseErrorWhenOIDCServerClientBadValues(t *testing.T) {
|
|||
fmt.Sprintf(errFmtOIDCClientInvalidSectorIdentifierHost, "client-invalid-sector", "example.com/path?query=abc#fragment"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "InvalidConsentMode",
|
||||
Clients: []schema.OpenIDConnectClientConfiguration{
|
||||
{
|
||||
ID: "client-bad-consent-mode",
|
||||
Secret: MustDecodeSecret("$plaintext$a-secret"),
|
||||
Policy: policyTwoFactor,
|
||||
RedirectURIs: []string{
|
||||
"https://google.com",
|
||||
},
|
||||
ConsentMode: "cap",
|
||||
},
|
||||
},
|
||||
Errors: []string{
|
||||
fmt.Sprintf(errFmtOIDCClientInvalidConsentMode, "client-bad-consent-mode", strings.Join(append(validOIDCClientConsentModes, "auto"), "', '"), "cap"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
@ -634,6 +651,8 @@ func TestValidateIdentityProvidersShouldNotRaiseErrorsOnValidPublicClients(t *te
|
|||
}
|
||||
|
||||
func TestValidateIdentityProvidersShouldSetDefaultValues(t *testing.T) {
|
||||
timeDay := time.Hour * 24
|
||||
|
||||
validator := schema.NewStructValidator()
|
||||
config := &schema.IdentityProvidersConfiguration{
|
||||
OIDC: &schema.OpenIDConnectConfiguration{
|
||||
|
@ -646,6 +665,7 @@ func TestValidateIdentityProvidersShouldSetDefaultValues(t *testing.T) {
|
|||
RedirectURIs: []string{
|
||||
"https://google.com",
|
||||
},
|
||||
ConsentPreConfiguredDuration: &timeDay,
|
||||
},
|
||||
{
|
||||
ID: "b-client",
|
||||
|
@ -671,6 +691,30 @@ func TestValidateIdentityProvidersShouldSetDefaultValues(t *testing.T) {
|
|||
"fragment",
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "c-client",
|
||||
Secret: MustDecodeSecret("$plaintext$a-client-secret"),
|
||||
RedirectURIs: []string{
|
||||
"https://google.com",
|
||||
},
|
||||
ConsentMode: "implicit",
|
||||
},
|
||||
{
|
||||
ID: "d-client",
|
||||
Secret: MustDecodeSecret("$plaintext$a-client-secret"),
|
||||
RedirectURIs: []string{
|
||||
"https://google.com",
|
||||
},
|
||||
ConsentMode: "explicit",
|
||||
},
|
||||
{
|
||||
ID: "e-client",
|
||||
Secret: MustDecodeSecret("$plaintext$a-client-secret"),
|
||||
RedirectURIs: []string{
|
||||
"https://google.com",
|
||||
},
|
||||
ConsentMode: "pre-configured",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -703,6 +747,15 @@ func TestValidateIdentityProvidersShouldSetDefaultValues(t *testing.T) {
|
|||
assert.Equal(t, "groups", config.OIDC.Clients[1].Scopes[0])
|
||||
assert.Equal(t, "openid", config.OIDC.Clients[1].Scopes[1])
|
||||
|
||||
// Assert Clients[0] ends up configured with the correct consent mode.
|
||||
require.NotNil(t, config.OIDC.Clients[0].ConsentPreConfiguredDuration)
|
||||
assert.Equal(t, time.Hour*24, *config.OIDC.Clients[0].ConsentPreConfiguredDuration)
|
||||
assert.Equal(t, "pre-configured", config.OIDC.Clients[0].ConsentMode)
|
||||
|
||||
// Assert Clients[1] ends up configured with the correct consent mode.
|
||||
assert.Nil(t, config.OIDC.Clients[1].ConsentPreConfiguredDuration)
|
||||
assert.Equal(t, "explicit", config.OIDC.Clients[1].ConsentMode)
|
||||
|
||||
// Assert Clients[0] ends up configured with the default GrantTypes.
|
||||
require.Len(t, config.OIDC.Clients[0].GrantTypes, 2)
|
||||
assert.Equal(t, "refresh_token", config.OIDC.Clients[0].GrantTypes[0])
|
||||
|
@ -737,6 +790,15 @@ func TestValidateIdentityProvidersShouldSetDefaultValues(t *testing.T) {
|
|||
assert.Equal(t, time.Minute, config.OIDC.AuthorizeCodeLifespan)
|
||||
assert.Equal(t, time.Hour, config.OIDC.IDTokenLifespan)
|
||||
assert.Equal(t, time.Minute*90, config.OIDC.RefreshTokenLifespan)
|
||||
|
||||
assert.Equal(t, "implicit", config.OIDC.Clients[2].ConsentMode)
|
||||
assert.Nil(t, config.OIDC.Clients[2].ConsentPreConfiguredDuration)
|
||||
|
||||
assert.Equal(t, "explicit", config.OIDC.Clients[3].ConsentMode)
|
||||
assert.Nil(t, config.OIDC.Clients[3].ConsentPreConfiguredDuration)
|
||||
|
||||
assert.Equal(t, "pre-configured", config.OIDC.Clients[4].ConsentMode)
|
||||
assert.Equal(t, schema.DefaultOpenIDConnectClientConfiguration.ConsentPreConfiguredDuration, config.OIDC.Clients[4].ConsentPreConfiguredDuration)
|
||||
}
|
||||
|
||||
// All valid schemes are supported as defined in https://datatracker.ietf.org/doc/html/rfc8252#section-7.1
|
||||
|
@ -749,6 +811,7 @@ func TestValidateOIDCClientRedirectURIsSupportingPrivateUseURISchemes(t *testing
|
|||
"oc://ios.owncloud.com",
|
||||
// example given in the RFC https://datatracker.ietf.org/doc/html/rfc8252#section-7.1
|
||||
"com.example.app:/oauth2redirect/example-provider",
|
||||
oauth2InstalledApp,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -767,10 +830,9 @@ func TestValidateOIDCClientRedirectURIsSupportingPrivateUseURISchemes(t *testing
|
|||
validateOIDCClientRedirectURIs(conf, validator)
|
||||
|
||||
assert.Len(t, validator.Warnings(), 0)
|
||||
assert.Len(t, validator.Errors(), 2)
|
||||
assert.Len(t, validator.Errors(), 1)
|
||||
assert.ElementsMatch(t, validator.Errors(), []error{
|
||||
errors.New("identity_providers: oidc: client 'owncloud': option 'redirect_uris' has an invalid value: redirect uri 'oc://ios.owncloud.com' must have a scheme of 'http' or 'https' but 'oc' is configured"),
|
||||
errors.New("identity_providers: oidc: client 'owncloud': option 'redirect_uris' has an invalid value: redirect uri 'com.example.app:/oauth2redirect/example-provider' must have a scheme of 'http' or 'https' but 'com.example.app' is configured"),
|
||||
errors.New("identity_providers: oidc: client 'owncloud': option 'redirect_uris' has the redirect uri 'urn:ietf:wg:oauth:2.0:oob' when option 'public' is false but this is invalid as this uri is not valid for the openid connect confidential client type"),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
|
@ -41,6 +41,8 @@ func validateSession(config *schema.SessionConfiguration, validator *schema.Stru
|
|||
|
||||
if config.Domain == "" {
|
||||
validator.Push(fmt.Errorf(errFmtSessionOptionRequired, "domain"))
|
||||
} else if strings.HasPrefix(config.Domain, ".") {
|
||||
validator.PushWarning(fmt.Errorf("session: option 'domain' has a prefix of '.' which is not supported or intended behaviour: you can use this at your own risk but we recommend removing it"))
|
||||
}
|
||||
|
||||
if strings.HasPrefix(config.Domain, "*.") {
|
||||
|
|
|
@ -49,6 +49,20 @@ func TestShouldSetDefaultSessionValuesWhenNegative(t *testing.T) {
|
|||
assert.Equal(t, schema.DefaultSessionConfiguration.RememberMeDuration, config.RememberMeDuration)
|
||||
}
|
||||
|
||||
func TestShouldWarnSessionValuesWhenPotentiallyInvalid(t *testing.T) {
|
||||
validator := schema.NewStructValidator()
|
||||
config := newDefaultSessionConfig()
|
||||
|
||||
config.Domain = ".example.com"
|
||||
|
||||
ValidateSession(&config, validator)
|
||||
|
||||
require.Len(t, validator.Warnings(), 1)
|
||||
assert.Len(t, validator.Errors(), 0)
|
||||
|
||||
assert.EqualError(t, validator.Warnings()[0], "session: option 'domain' has a prefix of '.' which is not supported or intended behaviour: you can use this at your own risk but we recommend removing it")
|
||||
}
|
||||
|
||||
func TestShouldHandleRedisConfigSuccessfully(t *testing.T) {
|
||||
validator := schema.NewStructValidator()
|
||||
config := newDefaultSessionConfig()
|
||||
|
|
|
@ -688,11 +688,12 @@ func (mr *MockStorageMockRecorder) SchemaEncryptionChangeKey(arg0, arg1 interfac
|
|||
}
|
||||
|
||||
// SchemaEncryptionCheckKey mocks base method.
|
||||
func (m *MockStorage) SchemaEncryptionCheckKey(arg0 context.Context, arg1 bool) error {
|
||||
func (m *MockStorage) SchemaEncryptionCheckKey(arg0 context.Context, arg1 bool) (storage.EncryptionValidationResult, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SchemaEncryptionCheckKey", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
ret0, _ := ret[0].(storage.EncryptionValidationResult)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// SchemaEncryptionCheckKey indicates an expected call of SchemaEncryptionCheckKey.
|
||||
|
|
|
@ -15,6 +15,7 @@ const (
|
|||
|
||||
tableOAuth2ConsentSession = "oauth2_consent_session"
|
||||
tableOAuth2ConsentPreConfiguration = "oauth2_consent_preconfiguration"
|
||||
|
||||
tableOAuth2AuthorizeCodeSession = "oauth2_authorization_code_session"
|
||||
tableOAuth2AccessTokenSession = "oauth2_access_token_session" //nolint:gosec // This is not a hardcoded credential.
|
||||
tableOAuth2RefreshTokenSession = "oauth2_refresh_token_session" //nolint:gosec // This is not a hardcoded credential.
|
||||
|
@ -24,8 +25,6 @@ const (
|
|||
|
||||
tableMigrations = "migrations"
|
||||
tableEncryption = "encryption"
|
||||
|
||||
tablePrefixBackup = "_bkp_"
|
||||
)
|
||||
|
||||
// OAuth2SessionType represents the potential OAuth 2.0 session types.
|
||||
|
@ -58,6 +57,24 @@ func (s OAuth2SessionType) String() string {
|
|||
}
|
||||
}
|
||||
|
||||
// Table returns the table name for this session type.
|
||||
func (s OAuth2SessionType) Table() string {
|
||||
switch s {
|
||||
case OAuth2SessionTypeAuthorizeCode:
|
||||
return tableOAuth2AuthorizeCodeSession
|
||||
case OAuth2SessionTypeAccessToken:
|
||||
return tableOAuth2AccessTokenSession
|
||||
case OAuth2SessionTypeRefreshToken:
|
||||
return tableOAuth2RefreshTokenSession
|
||||
case OAuth2SessionTypePKCEChallenge:
|
||||
return tableOAuth2PKCERequestSession
|
||||
case OAuth2SessionTypeOpenIDConnect:
|
||||
return tableOAuth2OpenIDConnectSession
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
sqlNetworkTypeTCP = "tcp"
|
||||
sqlNetworkTypeUnixSocket = "unix"
|
||||
|
@ -72,16 +89,6 @@ const (
|
|||
tablePre1TOTPSecrets = "totp_secrets"
|
||||
tablePre1IdentityVerificationTokens = "identity_verification_tokens"
|
||||
tablePre1U2FDevices = "u2f_devices"
|
||||
|
||||
tablePre1Config = "config"
|
||||
|
||||
tableAlphaAuthenticationLogs = "AuthenticationLogs"
|
||||
tableAlphaIdentityVerificationTokens = "IdentityVerificationTokens"
|
||||
tableAlphaPreferences = "Preferences"
|
||||
tableAlphaPreferencesTableName = "PreferencesTableName"
|
||||
tableAlphaSecondFactorPreferences = "SecondFactorPreferences"
|
||||
tableAlphaTOTPSecrets = "TOTPSecrets"
|
||||
tableAlphaU2FDeviceHandles = "U2FDeviceHandles"
|
||||
)
|
||||
|
||||
var tablesPre1 = []string{
|
||||
|
@ -114,3 +121,8 @@ const (
|
|||
var (
|
||||
reMigration = regexp.MustCompile(`^V(\d{4})\.([^.]+)\.(all|sqlite|postgres|mysql)\.(up|down)\.sql$`)
|
||||
)
|
||||
|
||||
const (
|
||||
na = "N/A"
|
||||
invalid = "invalid"
|
||||
)
|
||||
|
|
|
@ -35,7 +35,7 @@ var (
|
|||
|
||||
// ErrSchemaEncryptionInvalidKey is returned when the schema is checked if the encryption key is valid for
|
||||
// the database but the key doesn't appear to be valid.
|
||||
ErrSchemaEncryptionInvalidKey = errors.New("the encryption key is not valid against the schema check value")
|
||||
ErrSchemaEncryptionInvalidKey = errors.New("the configured encryption key does not appear to be valid for this database which may occur if the encryption key was changed in the configuration without using the cli to change it in the database")
|
||||
)
|
||||
|
||||
// Error formats for the storage provider.
|
||||
|
@ -49,7 +49,6 @@ const (
|
|||
|
||||
const (
|
||||
errFmtFailedMigration = "schema migration %d (%s) failed: %w"
|
||||
errFmtFailedMigrationPre1 = "schema migration pre1 failed: %w"
|
||||
errFmtSchemaCurrentGreaterThanLatestKnown = "current schema version is greater than the latest known schema " +
|
||||
"version, you must downgrade to schema version %d before you can use this version of Authelia"
|
||||
)
|
||||
|
@ -59,3 +58,8 @@ const (
|
|||
logFmtMigrationComplete = "Storage schema migration from %s to %s is complete"
|
||||
logFmtErrClosingConn = "Error occurred closing SQL connection: %v"
|
||||
)
|
||||
|
||||
const (
|
||||
errFmtMigrationPre1 = "schema migration %s pre1 is no longer supported: you must use an older version of authelia to perform this migration: %s"
|
||||
errFmtMigrationPre1SuggestedVersion = "the suggested authelia version is 4.37.2"
|
||||
)
|
||||
|
|
|
@ -46,42 +46,6 @@ func latestMigrationVersion(providerName string) (version int, err error) {
|
|||
return version, nil
|
||||
}
|
||||
|
||||
func loadMigration(providerName string, version int, up bool) (migration *model.SchemaMigration, err error) {
|
||||
entries, err := migrationsFS.ReadDir("migrations")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := scanMigration(entry.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
migration = &m
|
||||
|
||||
if up != migration.Up {
|
||||
continue
|
||||
}
|
||||
|
||||
if migration.Provider != providerAll && migration.Provider != providerName {
|
||||
continue
|
||||
}
|
||||
|
||||
if version != migration.Version {
|
||||
continue
|
||||
}
|
||||
|
||||
return migration, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("migration not found")
|
||||
}
|
||||
|
||||
// loadMigrations scans the migrations fs and loads the appropriate migrations for a given providerName, prior and
|
||||
// target versions. If the target version is -1 this indicates the latest version. If the target version is 0
|
||||
// this indicates the database zero state.
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
PRAGMA foreign_keys=off;
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
DELETE FROM oauth2_consent_session
|
||||
WHERE subject IN(SELECT identifier FROM user_opaque_identifier WHERE username = '' AND service IN('openid', 'openid_connect'));
|
||||
|
||||
|
@ -261,6 +259,4 @@ ORDER BY id;
|
|||
|
||||
DROP TABLE IF EXISTS _bkp_DOWN_V0005_oauth2_openid_connect_session;
|
||||
|
||||
COMMIT;
|
||||
|
||||
PRAGMA foreign_keys=on;
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
PRAGMA foreign_keys=off;
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
DELETE FROM oauth2_consent_session
|
||||
WHERE subject IN(SELECT identifier FROM user_opaque_identifier WHERE username = '' AND service IN('openid', 'openid_connect'));
|
||||
|
||||
|
@ -255,6 +253,4 @@ ORDER BY id;
|
|||
|
||||
DROP TABLE IF EXISTS _bkp_UP_V0005_oauth2_openid_connect_session;
|
||||
|
||||
COMMIT;
|
||||
|
||||
PRAGMA foreign_keys=on;
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
PRAGMA foreign_keys=off;
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
ALTER TABLE webauthn_devices
|
||||
RENAME TO _bkp_DOWN_V0007_webauthn_devices;
|
||||
|
||||
|
@ -612,6 +610,4 @@ ORDER BY id;
|
|||
|
||||
DROP TABLE IF EXISTS _bkp_DOWN_V0007_oauth2_openid_connect_session;
|
||||
|
||||
COMMIT;
|
||||
|
||||
PRAGMA foreign_keys=on;
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
PRAGMA foreign_keys=off;
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
|
||||
DROP TABLE IF EXISTS _bkp_UP_V0002_totp_configurations;
|
||||
DROP TABLE IF EXISTS _bkp_UP_V0002_u2f_devices;
|
||||
DROP TABLE IF EXISTS totp_secrets;
|
||||
|
@ -662,6 +660,4 @@ ORDER BY id;
|
|||
|
||||
DROP TABLE IF EXISTS _bkp_UP_V0007_oauth2_openid_connect_session;
|
||||
|
||||
COMMIT;
|
||||
|
||||
PRAGMA foreign_keys=on;
|
||||
|
|
|
@ -79,8 +79,8 @@ type Provider interface {
|
|||
SchemaMigrationsUp(ctx context.Context, version int) (migrations []model.SchemaMigration, err error)
|
||||
SchemaMigrationsDown(ctx context.Context, version int) (migrations []model.SchemaMigration, err error)
|
||||
|
||||
SchemaEncryptionChangeKey(ctx context.Context, encryptionKey string) (err error)
|
||||
SchemaEncryptionCheckKey(ctx context.Context, verbose bool) (err error)
|
||||
SchemaEncryptionChangeKey(ctx context.Context, key string) (err error)
|
||||
SchemaEncryptionCheckKey(ctx context.Context, verbose bool) (result EncryptionValidationResult, err error)
|
||||
|
||||
Close() (err error)
|
||||
}
|
||||
|
|
|
@ -43,8 +43,6 @@ func NewSQLProvider(config *schema.Configuration, name, driverName, dataSourceNa
|
|||
sqlSelectTOTPConfig: fmt.Sprintf(queryFmtSelectTOTPConfiguration, tableTOTPConfigurations),
|
||||
sqlSelectTOTPConfigs: fmt.Sprintf(queryFmtSelectTOTPConfigurations, tableTOTPConfigurations),
|
||||
|
||||
sqlUpdateTOTPConfigSecret: fmt.Sprintf(queryFmtUpdateTOTPConfigurationSecret, tableTOTPConfigurations),
|
||||
sqlUpdateTOTPConfigSecretByUsername: fmt.Sprintf(queryFmtUpdateTOTPConfigurationSecretByUsername, tableTOTPConfigurations),
|
||||
sqlUpdateTOTPConfigRecordSignIn: fmt.Sprintf(queryFmtUpdateTOTPConfigRecordSignIn, tableTOTPConfigurations),
|
||||
sqlUpdateTOTPConfigRecordSignInByUsername: fmt.Sprintf(queryFmtUpdateTOTPConfigRecordSignInByUsername, tableTOTPConfigurations),
|
||||
|
||||
|
@ -161,8 +159,6 @@ type SQLProvider struct {
|
|||
sqlSelectTOTPConfig string
|
||||
sqlSelectTOTPConfigs string
|
||||
|
||||
sqlUpdateTOTPConfigSecret string
|
||||
sqlUpdateTOTPConfigSecretByUsername string
|
||||
sqlUpdateTOTPConfigRecordSignIn string
|
||||
sqlUpdateTOTPConfigRecordSignInByUsername string
|
||||
|
||||
|
@ -172,7 +168,6 @@ type SQLProvider struct {
|
|||
sqlSelectWebauthnDevicesByUsername string
|
||||
|
||||
sqlUpdateWebauthnDeviceDescriptionByUsernameAndID string
|
||||
|
||||
sqlUpdateWebauthnDevicePublicKey string
|
||||
sqlUpdateWebauthnDevicePublicKeyByUsername string
|
||||
sqlUpdateWebauthnDeviceRecordSignIn string
|
||||
|
@ -295,13 +290,17 @@ func (p *SQLProvider) StartupCheck() (err error) {
|
|||
|
||||
ctx := context.Background()
|
||||
|
||||
if err = p.SchemaEncryptionCheckKey(ctx, false); err != nil && !errors.Is(err, ErrSchemaEncryptionVersionUnsupported) {
|
||||
var result EncryptionValidationResult
|
||||
|
||||
if result, err = p.SchemaEncryptionCheckKey(ctx, false); err != nil && !errors.Is(err, ErrSchemaEncryptionVersionUnsupported) {
|
||||
return err
|
||||
}
|
||||
|
||||
err = p.SchemaMigrate(ctx, true, SchemaLatest)
|
||||
if !result.Success() {
|
||||
return ErrSchemaEncryptionInvalidKey
|
||||
}
|
||||
|
||||
switch err {
|
||||
switch err = p.SchemaMigrate(ctx, true, SchemaLatest); err {
|
||||
case ErrSchemaAlreadyUpToDate:
|
||||
p.log.Infof("Storage schema is already up to date")
|
||||
return nil
|
||||
|
@ -840,21 +839,6 @@ func (p *SQLProvider) LoadTOTPConfigurations(ctx context.Context, limit, page in
|
|||
return configs, nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) updateTOTPConfigurationSecret(ctx context.Context, config model.TOTPConfiguration) (err error) {
|
||||
switch config.ID {
|
||||
case 0:
|
||||
_, err = p.db.ExecContext(ctx, p.sqlUpdateTOTPConfigSecretByUsername, config.Secret, config.Username)
|
||||
default:
|
||||
_, err = p.db.ExecContext(ctx, p.sqlUpdateTOTPConfigSecret, config.Secret, config.ID)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating TOTP configuration secret for user '%s': %w", config.Username, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveWebauthnDevice saves a registered Webauthn device.
|
||||
func (p *SQLProvider) SaveWebauthnDevice(ctx context.Context, device model.WebauthnDevice) (err error) {
|
||||
if device.PublicKey, err = p.encrypt(device.PublicKey); err != nil {
|
||||
|
@ -972,21 +956,6 @@ func (p *SQLProvider) LoadWebauthnDevicesByUsername(ctx context.Context, usernam
|
|||
return devices, nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) updateWebauthnDevicePublicKey(ctx context.Context, device model.WebauthnDevice) (err error) {
|
||||
switch device.ID {
|
||||
case 0:
|
||||
_, err = p.db.ExecContext(ctx, p.sqlUpdateWebauthnDevicePublicKeyByUsername, device.PublicKey, device.Username, device.KID)
|
||||
default:
|
||||
_, err = p.db.ExecContext(ctx, p.sqlUpdateWebauthnDevicePublicKey, device.PublicKey, device.ID)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating Webauthn public key for user '%s' kid '%x': %w", device.Username, device.KID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SavePreferredDuoDevice saves a Duo device.
|
||||
func (p *SQLProvider) SavePreferredDuoDevice(ctx context.Context, device model.DuoDevice) (err error) {
|
||||
if _, err = p.db.ExecContext(ctx, p.sqlUpsertDuoDevice, device.Username, device.Device, device.Method); err != nil {
|
||||
|
|
|
@ -58,8 +58,6 @@ func NewPostgreSQLProvider(config *schema.Configuration, caCertPool *x509.CertPo
|
|||
provider.sqlUpdateTOTPConfigRecordSignInByUsername = provider.db.Rebind(provider.sqlUpdateTOTPConfigRecordSignInByUsername)
|
||||
provider.sqlDeleteTOTPConfig = provider.db.Rebind(provider.sqlDeleteTOTPConfig)
|
||||
provider.sqlSelectTOTPConfigs = provider.db.Rebind(provider.sqlSelectTOTPConfigs)
|
||||
provider.sqlUpdateTOTPConfigSecret = provider.db.Rebind(provider.sqlUpdateTOTPConfigSecret)
|
||||
provider.sqlUpdateTOTPConfigSecretByUsername = provider.db.Rebind(provider.sqlUpdateTOTPConfigSecretByUsername)
|
||||
|
||||
provider.sqlSelectWebauthnDevices = provider.db.Rebind(provider.sqlSelectWebauthnDevices)
|
||||
provider.sqlSelectWebauthnDevicesByUsername = provider.db.Rebind(provider.sqlSelectWebauthnDevicesByUsername)
|
||||
|
|
|
@ -1,38 +1,65 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"github.com/authelia/authelia/v4/internal/model"
|
||||
"github.com/authelia/authelia/v4/internal/utils"
|
||||
)
|
||||
|
||||
// SchemaEncryptionChangeKey uses the currently configured key to decrypt values in the database and the key provided
|
||||
// by this command to encrypt the values again and update them using a transaction.
|
||||
func (p *SQLProvider) SchemaEncryptionChangeKey(ctx context.Context, encryptionKey string) (err error) {
|
||||
func (p *SQLProvider) SchemaEncryptionChangeKey(ctx context.Context, key string) (err error) {
|
||||
skey := sha256.Sum256([]byte(key))
|
||||
|
||||
if bytes.Equal(skey[:], p.key[:]) {
|
||||
return fmt.Errorf("error changing the storage encryption key: the old key and the new key are the same")
|
||||
}
|
||||
|
||||
if _, err = p.SchemaEncryptionCheckKey(ctx, false); err != nil {
|
||||
return fmt.Errorf("error changing the storage encryption key: %w", err)
|
||||
}
|
||||
|
||||
tx, err := p.db.Beginx()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error beginning transaction to change encryption key: %w", err)
|
||||
}
|
||||
|
||||
key := sha256.Sum256([]byte(encryptionKey))
|
||||
|
||||
if err = p.schemaEncryptionChangeKeyTOTP(ctx, tx, key); err != nil {
|
||||
return err
|
||||
encChangeFuncs := []EncryptionChangeKeyFunc{
|
||||
schemaEncryptionChangeKeyTOTP,
|
||||
schemaEncryptionChangeKeyWebauthn,
|
||||
}
|
||||
|
||||
if err = p.schemaEncryptionChangeKeyWebauthn(ctx, tx, key); err != nil {
|
||||
return err
|
||||
for i := 0; true; i++ {
|
||||
typeOAuth2Session := OAuth2SessionType(i)
|
||||
|
||||
if typeOAuth2Session.Table() == "" {
|
||||
break
|
||||
}
|
||||
|
||||
if err = p.setNewEncryptionCheckValue(ctx, &key, tx); err != nil {
|
||||
if rollbackErr := tx.Rollback(); rollbackErr != nil {
|
||||
return fmt.Errorf("rollback error %v: rollback due to error: %w", rollbackErr, err)
|
||||
encChangeFuncs = append(encChangeFuncs, schemaEncryptionChangeKeyOpenIDConnect(typeOAuth2Session))
|
||||
}
|
||||
|
||||
for _, encChangeFunc := range encChangeFuncs {
|
||||
if err = encChangeFunc(ctx, p, tx, skey); err != nil {
|
||||
if rerr := tx.Rollback(); rerr != nil {
|
||||
return fmt.Errorf("rollback error %v: rollback due to error: %w", rerr, err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("rollback due to error: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = p.setNewEncryptionCheckValue(ctx, tx, &skey); err != nil {
|
||||
if rerr := tx.Rollback(); rerr != nil {
|
||||
return fmt.Errorf("rollback error %v: rollback due to error: %w", rerr, err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("rollback due to error: %w", err)
|
||||
|
@ -41,224 +68,264 @@ func (p *SQLProvider) SchemaEncryptionChangeKey(ctx context.Context, encryptionK
|
|||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaEncryptionChangeKeyTOTP(ctx context.Context, tx *sqlx.Tx, key [32]byte) (err error) {
|
||||
var configs []model.TOTPConfiguration
|
||||
|
||||
for page := 0; true; page++ {
|
||||
if configs, err = p.LoadTOTPConfigurations(ctx, 10, page); err != nil {
|
||||
if rollbackErr := tx.Rollback(); rollbackErr != nil {
|
||||
return fmt.Errorf("rollback error %v: rollback due to error: %w", rollbackErr, err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("rollback due to error: %w", err)
|
||||
}
|
||||
|
||||
for _, config := range configs {
|
||||
if config.Secret, err = utils.Encrypt(config.Secret, &key); err != nil {
|
||||
if rollbackErr := tx.Rollback(); rollbackErr != nil {
|
||||
return fmt.Errorf("rollback error %v: rollback due to error: %w", rollbackErr, err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("rollback due to error: %w", err)
|
||||
}
|
||||
|
||||
if err = p.updateTOTPConfigurationSecret(ctx, config); err != nil {
|
||||
if rollbackErr := tx.Rollback(); rollbackErr != nil {
|
||||
return fmt.Errorf("rollback error %v: rollback due to error: %w", rollbackErr, err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("rollback due to error: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(configs) != 10 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaEncryptionChangeKeyWebauthn(ctx context.Context, tx *sqlx.Tx, key [32]byte) (err error) {
|
||||
var devices []model.WebauthnDevice
|
||||
|
||||
for page := 0; true; page++ {
|
||||
if devices, err = p.LoadWebauthnDevices(ctx, 10, page); err != nil {
|
||||
if rollbackErr := tx.Rollback(); rollbackErr != nil {
|
||||
return fmt.Errorf("rollback error %v: rollback due to error: %w", rollbackErr, err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("rollback due to error: %w", err)
|
||||
}
|
||||
|
||||
for _, device := range devices {
|
||||
if device.PublicKey, err = utils.Encrypt(device.PublicKey, &key); err != nil {
|
||||
if rollbackErr := tx.Rollback(); rollbackErr != nil {
|
||||
return fmt.Errorf("rollback error %v: rollback due to error: %w", rollbackErr, err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("rollback due to error: %w", err)
|
||||
}
|
||||
|
||||
if err = p.updateWebauthnDevicePublicKey(ctx, device); err != nil {
|
||||
if rollbackErr := tx.Rollback(); rollbackErr != nil {
|
||||
return fmt.Errorf("rollback error %v: rollback due to error: %w", rollbackErr, err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("rollback due to error: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(devices) != 10 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SchemaEncryptionCheckKey checks the encryption key configured is valid for the database.
|
||||
func (p *SQLProvider) SchemaEncryptionCheckKey(ctx context.Context, verbose bool) (err error) {
|
||||
func (p *SQLProvider) SchemaEncryptionCheckKey(ctx context.Context, verbose bool) (result EncryptionValidationResult, err error) {
|
||||
version, err := p.SchemaVersion(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
return result, err
|
||||
}
|
||||
|
||||
if version < 1 {
|
||||
return ErrSchemaEncryptionVersionUnsupported
|
||||
return result, ErrSchemaEncryptionVersionUnsupported
|
||||
}
|
||||
|
||||
var errs []error
|
||||
result = EncryptionValidationResult{
|
||||
Tables: map[string]EncryptionValidationTableResult{},
|
||||
}
|
||||
|
||||
if _, err = p.getEncryptionValue(ctx, encryptionNameCheck); err != nil {
|
||||
errs = append(errs, ErrSchemaEncryptionInvalidKey)
|
||||
result.InvalidCheckValue = true
|
||||
}
|
||||
|
||||
if verbose {
|
||||
if err = p.schemaEncryptionCheckTOTP(ctx); err != nil {
|
||||
errs = append(errs, err)
|
||||
encCheckFuncs := []EncryptionCheckKeyFunc{
|
||||
schemaEncryptionCheckKeyTOTP,
|
||||
schemaEncryptionCheckKeyWebauthn,
|
||||
}
|
||||
|
||||
if err = p.schemaEncryptionCheckWebauthn(ctx); err != nil {
|
||||
errs = append(errs, err)
|
||||
for i := 0; true; i++ {
|
||||
typeOAuth2Session := OAuth2SessionType(i)
|
||||
|
||||
if typeOAuth2Session.Table() == "" {
|
||||
break
|
||||
}
|
||||
|
||||
encCheckFuncs = append(encCheckFuncs, schemaEncryptionCheckKeyOpenIDConnect(typeOAuth2Session))
|
||||
}
|
||||
|
||||
for _, encCheckFunc := range encCheckFuncs {
|
||||
table, tableResult := encCheckFunc(ctx, p)
|
||||
|
||||
result.Tables[table] = tableResult
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) != 0 {
|
||||
for i, e := range errs {
|
||||
if i == 0 {
|
||||
err = e
|
||||
|
||||
continue
|
||||
return result, nil
|
||||
}
|
||||
|
||||
err = fmt.Errorf("%w, %v", err, e)
|
||||
}
|
||||
func schemaEncryptionChangeKeyTOTP(ctx context.Context, provider *SQLProvider, tx *sqlx.Tx, key [32]byte) (err error) {
|
||||
var count int
|
||||
|
||||
if err = tx.GetContext(ctx, &count, fmt.Sprintf(queryFmtSelectRowCount, tableTOTPConfigurations)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaEncryptionCheckTOTP(ctx context.Context) (err error) {
|
||||
var (
|
||||
config model.TOTPConfiguration
|
||||
row int
|
||||
invalid int
|
||||
total int
|
||||
)
|
||||
configs := make([]encTOTPConfiguration, 0, count)
|
||||
|
||||
pageSize := 10
|
||||
|
||||
var rows *sqlx.Rows
|
||||
|
||||
for page := 0; true; page++ {
|
||||
if rows, err = p.db.QueryxContext(ctx, p.sqlSelectTOTPConfigs, pageSize, pageSize*page); err != nil {
|
||||
_ = rows.Close()
|
||||
if err = tx.SelectContext(ctx, &configs, fmt.Sprintf(queryFmtSelectTOTPConfigurationsEncryptedData, tableTOTPConfigurations)); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("error selecting TOTP configurations: %w", err)
|
||||
}
|
||||
|
||||
row = 0
|
||||
query := provider.db.Rebind(fmt.Sprintf(queryFmtUpdateTOTPConfigurationSecret, tableTOTPConfigurations))
|
||||
|
||||
for rows.Next() {
|
||||
total++
|
||||
row++
|
||||
|
||||
if err = rows.StructScan(&config); err != nil {
|
||||
_ = rows.Close()
|
||||
return fmt.Errorf("error scanning TOTP configuration to struct: %w", err)
|
||||
for _, c := range configs {
|
||||
if c.Secret, err = provider.decrypt(c.Secret); err != nil {
|
||||
return fmt.Errorf("error decrypting TOTP configuration secret with id '%d': %w", c.ID, err)
|
||||
}
|
||||
|
||||
if _, err = p.decrypt(config.Secret); err != nil {
|
||||
invalid++
|
||||
}
|
||||
if c.Secret, err = utils.Encrypt(c.Secret, &key); err != nil {
|
||||
return fmt.Errorf("error encrypting TOTP configuration secret with id '%d': %w", c.ID, err)
|
||||
}
|
||||
|
||||
_ = rows.Close()
|
||||
|
||||
if row < pageSize {
|
||||
break
|
||||
if _, err = tx.ExecContext(ctx, query, c.Secret, c.ID); err != nil {
|
||||
return fmt.Errorf("error updating TOTP configuration secret with id '%d': %w", c.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if invalid != 0 {
|
||||
return fmt.Errorf("%d of %d total TOTP secrets were invalid", invalid, total)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaEncryptionCheckWebauthn(ctx context.Context) (err error) {
|
||||
var (
|
||||
device model.WebauthnDevice
|
||||
row int
|
||||
invalid int
|
||||
total int
|
||||
)
|
||||
func schemaEncryptionChangeKeyWebauthn(ctx context.Context, provider *SQLProvider, tx *sqlx.Tx, key [32]byte) (err error) {
|
||||
var count int
|
||||
|
||||
pageSize := 10
|
||||
if err = tx.GetContext(ctx, &count, fmt.Sprintf(queryFmtSelectRowCount, tableWebauthnDevices)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var rows *sqlx.Rows
|
||||
if count == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for page := 0; true; page++ {
|
||||
if rows, err = p.db.QueryxContext(ctx, p.sqlSelectWebauthnDevices, pageSize, pageSize*page); err != nil {
|
||||
_ = rows.Close()
|
||||
devices := make([]encWebauthnDevice, 0, count)
|
||||
|
||||
if err = tx.SelectContext(ctx, &devices, fmt.Sprintf(queryFmtSelectWebauthnDevicesEncryptedData, tableWebauthnDevices)); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("error selecting Webauthn devices: %w", err)
|
||||
}
|
||||
|
||||
row = 0
|
||||
query := provider.db.Rebind(fmt.Sprintf(queryFmtUpdateWebauthnDevicePublicKey, tableWebauthnDevices))
|
||||
|
||||
for rows.Next() {
|
||||
total++
|
||||
row++
|
||||
|
||||
if err = rows.StructScan(&device); err != nil {
|
||||
_ = rows.Close()
|
||||
return fmt.Errorf("error scanning Webauthn device to struct: %w", err)
|
||||
for _, d := range devices {
|
||||
if d.PublicKey, err = provider.decrypt(d.PublicKey); err != nil {
|
||||
return fmt.Errorf("error decrypting Webauthn device public key with id '%d': %w", d.ID, err)
|
||||
}
|
||||
|
||||
if _, err = p.decrypt(device.PublicKey); err != nil {
|
||||
invalid++
|
||||
}
|
||||
if d.PublicKey, err = utils.Encrypt(d.PublicKey, &key); err != nil {
|
||||
return fmt.Errorf("error encrypting Webauthn device public key with id '%d': %w", d.ID, err)
|
||||
}
|
||||
|
||||
_ = rows.Close()
|
||||
|
||||
if row < pageSize {
|
||||
break
|
||||
if _, err = tx.ExecContext(ctx, query, d.PublicKey, d.ID); err != nil {
|
||||
return fmt.Errorf("error updating Webauthn device public key with id '%d': %w", d.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if invalid != 0 {
|
||||
return fmt.Errorf("%d of %d total Webauthn devices were invalid", invalid, total)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func schemaEncryptionChangeKeyOpenIDConnect(typeOAuth2Session OAuth2SessionType) EncryptionChangeKeyFunc {
|
||||
return func(ctx context.Context, provider *SQLProvider, tx *sqlx.Tx, key [32]byte) (err error) {
|
||||
var count int
|
||||
|
||||
if err = tx.GetContext(ctx, &count, fmt.Sprintf(queryFmtSelectRowCount, typeOAuth2Session.Table())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sessions := make([]encOAuth2Session, 0, count)
|
||||
|
||||
if err = tx.SelectContext(ctx, &sessions, fmt.Sprintf(queryFmtSelectOAuth2SessionEncryptedData, typeOAuth2Session.Table())); err != nil {
|
||||
return fmt.Errorf("error selecting oauth2 %s sessions: %w", typeOAuth2Session.String(), err)
|
||||
}
|
||||
|
||||
query := provider.db.Rebind(fmt.Sprintf(queryFmtUpdateOAuth2ConsentSessionSessionData, typeOAuth2Session.Table()))
|
||||
|
||||
for _, s := range sessions {
|
||||
if s.Session, err = provider.decrypt(s.Session); err != nil {
|
||||
return fmt.Errorf("error decrypting oauth2 %s session data with id '%d': %w", typeOAuth2Session.String(), s.ID, err)
|
||||
}
|
||||
|
||||
if s.Session, err = utils.Encrypt(s.Session, &key); err != nil {
|
||||
return fmt.Errorf("error encrypting oauth2 %s session data with id '%d': %w", typeOAuth2Session.String(), s.ID, err)
|
||||
}
|
||||
|
||||
if _, err = tx.ExecContext(ctx, query, s.Session, s.ID); err != nil {
|
||||
return fmt.Errorf("error updating oauth2 %s session data with id '%d': %w", typeOAuth2Session.String(), s.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func schemaEncryptionCheckKeyTOTP(ctx context.Context, provider *SQLProvider) (table string, result EncryptionValidationTableResult) {
|
||||
var (
|
||||
rows *sqlx.Rows
|
||||
err error
|
||||
)
|
||||
|
||||
if rows, err = provider.db.QueryxContext(ctx, fmt.Sprintf(queryFmtSelectTOTPConfigurationsEncryptedData, tableTOTPConfigurations)); err != nil {
|
||||
return tableTOTPConfigurations, EncryptionValidationTableResult{Error: fmt.Errorf("error selecting TOTP configurations: %w", err)}
|
||||
}
|
||||
|
||||
var config encTOTPConfiguration
|
||||
|
||||
for rows.Next() {
|
||||
result.Total++
|
||||
|
||||
if err = rows.StructScan(&config); err != nil {
|
||||
_ = rows.Close()
|
||||
|
||||
return tableTOTPConfigurations, EncryptionValidationTableResult{Error: fmt.Errorf("error scanning TOTP configuration to struct: %w", err)}
|
||||
}
|
||||
|
||||
if _, err = provider.decrypt(config.Secret); err != nil {
|
||||
result.Invalid++
|
||||
}
|
||||
}
|
||||
|
||||
_ = rows.Close()
|
||||
|
||||
return tableTOTPConfigurations, result
|
||||
}
|
||||
|
||||
func schemaEncryptionCheckKeyWebauthn(ctx context.Context, provider *SQLProvider) (table string, result EncryptionValidationTableResult) {
|
||||
var (
|
||||
rows *sqlx.Rows
|
||||
err error
|
||||
)
|
||||
|
||||
if rows, err = provider.db.QueryxContext(ctx, fmt.Sprintf(queryFmtSelectWebauthnDevicesEncryptedData, tableWebauthnDevices)); err != nil {
|
||||
return tableWebauthnDevices, EncryptionValidationTableResult{Error: fmt.Errorf("error selecting Webauthn devices: %w", err)}
|
||||
}
|
||||
|
||||
var device encWebauthnDevice
|
||||
|
||||
for rows.Next() {
|
||||
result.Total++
|
||||
|
||||
if err = rows.StructScan(&device); err != nil {
|
||||
_ = rows.Close()
|
||||
|
||||
return tableWebauthnDevices, EncryptionValidationTableResult{Error: fmt.Errorf("error scanning Webauthn device to struct: %w", err)}
|
||||
}
|
||||
|
||||
if _, err = provider.decrypt(device.PublicKey); err != nil {
|
||||
result.Invalid++
|
||||
}
|
||||
}
|
||||
|
||||
_ = rows.Close()
|
||||
|
||||
return tableWebauthnDevices, result
|
||||
}
|
||||
|
||||
func schemaEncryptionCheckKeyOpenIDConnect(typeOAuth2Session OAuth2SessionType) EncryptionCheckKeyFunc {
|
||||
return func(ctx context.Context, provider *SQLProvider) (table string, result EncryptionValidationTableResult) {
|
||||
var (
|
||||
rows *sqlx.Rows
|
||||
err error
|
||||
)
|
||||
|
||||
if rows, err = provider.db.QueryxContext(ctx, fmt.Sprintf(queryFmtSelectOAuth2SessionEncryptedData, typeOAuth2Session.Table())); err != nil {
|
||||
return typeOAuth2Session.Table(), EncryptionValidationTableResult{Error: fmt.Errorf("error selecting oauth2 %s sessions: %w", typeOAuth2Session.String(), err)}
|
||||
}
|
||||
|
||||
var session encOAuth2Session
|
||||
|
||||
for rows.Next() {
|
||||
result.Total++
|
||||
|
||||
if err = rows.StructScan(&session); err != nil {
|
||||
_ = rows.Close()
|
||||
|
||||
return typeOAuth2Session.Table(), EncryptionValidationTableResult{Error: fmt.Errorf("error scanning oauth2 %s session to struct: %w", typeOAuth2Session.String(), err)}
|
||||
}
|
||||
|
||||
if _, err = provider.decrypt(session.Session); err != nil {
|
||||
result.Invalid++
|
||||
}
|
||||
}
|
||||
|
||||
_ = rows.Close()
|
||||
|
||||
return typeOAuth2Session.Table(), result
|
||||
}
|
||||
}
|
||||
|
||||
func (p *SQLProvider) encrypt(clearText []byte) (cipherText []byte, err error) {
|
||||
return utils.Encrypt(clearText, &p.key)
|
||||
}
|
||||
|
@ -278,7 +345,7 @@ func (p *SQLProvider) getEncryptionValue(ctx context.Context, name string) (valu
|
|||
return p.decrypt(encryptedValue)
|
||||
}
|
||||
|
||||
func (p *SQLProvider) setNewEncryptionCheckValue(ctx context.Context, key *[32]byte, e sqlx.ExecerContext) (err error) {
|
||||
func (p *SQLProvider) setNewEncryptionCheckValue(ctx context.Context, conn SQLXConnection, key *[32]byte) (err error) {
|
||||
valueClearText, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -289,11 +356,7 @@ func (p *SQLProvider) setNewEncryptionCheckValue(ctx context.Context, key *[32]b
|
|||
return err
|
||||
}
|
||||
|
||||
if e != nil {
|
||||
_, err = e.ExecContext(ctx, p.sqlUpsertEncryptionValue, encryptionNameCheck, value)
|
||||
} else {
|
||||
_, err = p.db.ExecContext(ctx, p.sqlUpsertEncryptionValue, encryptionNameCheck, value)
|
||||
}
|
||||
_, err = conn.ExecContext(ctx, p.sqlUpsertEncryptionValue, encryptionNameCheck, value)
|
||||
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -83,18 +83,16 @@ const (
|
|||
LIMIT ?
|
||||
OFFSET ?;`
|
||||
|
||||
queryFmtSelectTOTPConfigurationsEncryptedData = `
|
||||
SELECT id, secret
|
||||
FROM %s;`
|
||||
|
||||
//nolint:gosec // These are not hardcoded credentials it's a query to obtain credentials.
|
||||
queryFmtUpdateTOTPConfigurationSecret = `
|
||||
UPDATE %s
|
||||
SET secret = ?
|
||||
WHERE id = ?;`
|
||||
|
||||
//nolint:gosec // These are not hardcoded credentials it's a query to obtain credentials.
|
||||
queryFmtUpdateTOTPConfigurationSecretByUsername = `
|
||||
UPDATE %s
|
||||
SET secret = ?
|
||||
WHERE username = ?;`
|
||||
|
||||
queryFmtUpsertTOTPConfiguration = `
|
||||
REPLACE INTO %s (created_at, last_used_at, username, issuer, algorithm, digits, period, secret)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?);`
|
||||
|
@ -127,6 +125,10 @@ const (
|
|||
LIMIT ?
|
||||
OFFSET ?;`
|
||||
|
||||
queryFmtSelectWebauthnDevicesEncryptedData = `
|
||||
SELECT id, public_key
|
||||
FROM %s;`
|
||||
|
||||
queryFmtSelectWebauthnDevicesByUsername = `
|
||||
SELECT id, created_at, last_used_at, rpid, username, description, kid, public_key, attestation_type, transport, aaguid, sign_count, clone_warning
|
||||
FROM %s
|
||||
|
@ -274,6 +276,11 @@ const (
|
|||
SET subject = ?
|
||||
WHERE id = ?;`
|
||||
|
||||
queryFmtUpdateOAuth2ConsentSessionSessionData = `
|
||||
UPDATE %s
|
||||
SET session_data = ?
|
||||
WHERE id = ?;`
|
||||
|
||||
queryFmtUpdateOAuth2ConsentSessionResponse = `
|
||||
UPDATE %s
|
||||
SET authorized = ?, responded_at = CURRENT_TIMESTAMP, granted_scopes = ?, granted_audience = ?, preconfiguration = ?
|
||||
|
@ -291,6 +298,10 @@ const (
|
|||
FROM %s
|
||||
WHERE signature = ? AND revoked = FALSE;`
|
||||
|
||||
queryFmtSelectOAuth2SessionEncryptedData = `
|
||||
SELECT id, session_data
|
||||
FROM %s;`
|
||||
|
||||
queryFmtInsertOAuth2Session = `
|
||||
INSERT INTO %s (challenge_id, request_id, client_id, signature, subject, requested_at,
|
||||
requested_scopes, granted_scopes, requested_audience, granted_audience,
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
package storage
|
||||
|
||||
const (
|
||||
queryFmtDropTableIfExists = `DROP TABLE IF EXISTS %s;`
|
||||
|
||||
queryFmtRenameTable = `
|
||||
ALTER TABLE %s
|
||||
RENAME TO %s;`
|
||||
|
@ -10,104 +8,10 @@ const (
|
|||
queryFmtMySQLRenameTable = `
|
||||
ALTER TABLE %s
|
||||
RENAME %s;`
|
||||
)
|
||||
|
||||
// Pre1 migration constants.
|
||||
const (
|
||||
queryFmtPre1To1SelectAuthenticationLogs = `
|
||||
SELECT username, successful, time
|
||||
FROM %s
|
||||
ORDER BY time ASC
|
||||
LIMIT 100 OFFSET ?;`
|
||||
|
||||
queryFmtPre1To1InsertAuthenticationLogs = `
|
||||
INSERT INTO %s (username, successful, time, request_uri)
|
||||
VALUES (?, ?, ?, '');`
|
||||
|
||||
queryFmtPre1InsertUserPreferencesFromSelect = `
|
||||
INSERT INTO %s (username, second_factor_method)
|
||||
SELECT username, second_factor_method
|
||||
FROM %s
|
||||
ORDER BY username ASC;`
|
||||
|
||||
queryFmtPre1SelectTOTPConfigurations = `
|
||||
SELECT username, secret
|
||||
FROM %s
|
||||
ORDER BY username ASC;`
|
||||
|
||||
queryFmtPre1To1InsertTOTPConfiguration = `
|
||||
INSERT INTO %s (username, issuer, period, secret)
|
||||
VALUES (?, ?, ?, ?);`
|
||||
|
||||
queryFmt1ToPre1InsertTOTPConfiguration = `
|
||||
INSERT INTO %s (username, secret)
|
||||
VALUES (?, ?);`
|
||||
|
||||
queryFmtPre1To1SelectU2FDevices = `
|
||||
SELECT username, keyHandle, publicKey
|
||||
FROM %s
|
||||
ORDER BY username ASC;`
|
||||
|
||||
queryFmtPre1To1InsertU2FDevice = `
|
||||
INSERT INTO %s (username, key_handle, public_key)
|
||||
VALUES (?, ?, ?);`
|
||||
|
||||
queryFmt1ToPre1InsertAuthenticationLogs = `
|
||||
INSERT INTO %s (username, successful, time)
|
||||
VALUES (?, ?, ?);`
|
||||
|
||||
queryFmt1ToPre1SelectAuthenticationLogs = `
|
||||
SELECT username, successful, time
|
||||
FROM %s
|
||||
ORDER BY id ASC
|
||||
LIMIT 100 OFFSET ?;`
|
||||
|
||||
queryFmt1ToPre1SelectU2FDevices = `
|
||||
SELECT username, key_handle, public_key
|
||||
FROM %s
|
||||
ORDER BY username ASC;`
|
||||
|
||||
queryFmt1ToPre1InsertU2FDevice = `
|
||||
INSERT INTO %s (username, keyHandle, publicKey)
|
||||
VALUES (?, ?, ?);`
|
||||
|
||||
queryCreatePre1 = `
|
||||
CREATE TABLE user_preferences (
|
||||
username VARCHAR(100),
|
||||
second_factor_method VARCHAR(11),
|
||||
PRIMARY KEY (username)
|
||||
);
|
||||
|
||||
CREATE TABLE identity_verification_tokens (
|
||||
token VARCHAR(512)
|
||||
);
|
||||
|
||||
CREATE TABLE totp_secrets (
|
||||
username VARCHAR(100),
|
||||
secret VARCHAR(64),
|
||||
PRIMARY KEY (username)
|
||||
);
|
||||
|
||||
CREATE TABLE u2f_devices (
|
||||
username VARCHAR(100),
|
||||
keyHandle TEXT,
|
||||
publicKey TEXT,
|
||||
PRIMARY KEY (username)
|
||||
);
|
||||
|
||||
CREATE TABLE authentication_logs (
|
||||
username VARCHAR(100),
|
||||
successful BOOL,
|
||||
time INTEGER
|
||||
);
|
||||
|
||||
CREATE TABLE config (
|
||||
category VARCHAR(32) NOT NULL,
|
||||
key_name VARCHAR(32) NOT NULL,
|
||||
value TEXT,
|
||||
PRIMARY KEY (category, key_name)
|
||||
);
|
||||
|
||||
INSERT INTO config (category, key_name, value)
|
||||
VALUES ('schema', 'version', '1');`
|
||||
|
||||
queryFmtPostgreSQLLockTable = `LOCK TABLE %s IN %s MODE;`
|
||||
|
||||
queryFmtSelectRowCount = `
|
||||
SELECT COUNT(id)
|
||||
FROM %s;`
|
||||
)
|
||||
|
|
|
@ -81,184 +81,9 @@ func (p *SQLProvider) SchemaVersion(ctx context.Context) (version int, err error
|
|||
return 0, nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaLatestMigration(ctx context.Context) (migration *model.Migration, err error) {
|
||||
migration = &model.Migration{}
|
||||
|
||||
err = p.db.QueryRowxContext(ctx, p.sqlSelectLatestMigration).StructScan(migration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return migration, nil
|
||||
}
|
||||
|
||||
// SchemaMigrationHistory returns migration history rows.
|
||||
func (p *SQLProvider) SchemaMigrationHistory(ctx context.Context) (migrations []model.Migration, err error) {
|
||||
rows, err := p.db.QueryxContext(ctx, p.sqlSelectMigrations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := rows.Close(); err != nil {
|
||||
p.log.Errorf(logFmtErrClosingConn, err)
|
||||
}
|
||||
}()
|
||||
|
||||
var migration model.Migration
|
||||
|
||||
for rows.Next() {
|
||||
err = rows.StructScan(&migration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
migrations = append(migrations, migration)
|
||||
}
|
||||
|
||||
return migrations, nil
|
||||
}
|
||||
|
||||
// SchemaMigrate migrates from the current version to the provided version.
|
||||
func (p *SQLProvider) SchemaMigrate(ctx context.Context, up bool, version int) (err error) {
|
||||
currentVersion, err := p.SchemaVersion(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = schemaMigrateChecks(p.name, up, version, currentVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return p.schemaMigrate(ctx, currentVersion, version)
|
||||
}
|
||||
|
||||
//nolint:gocyclo // TODO: Consider refactoring time permitting.
|
||||
func (p *SQLProvider) schemaMigrate(ctx context.Context, prior, target int) (err error) {
|
||||
migrations, err := loadMigrations(p.name, prior, target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(migrations) == 0 && (prior != 1 || target != -1) {
|
||||
return ErrNoMigrationsFound
|
||||
}
|
||||
|
||||
switch {
|
||||
case prior == -1:
|
||||
p.log.Infof(logFmtMigrationFromTo, "pre1", strconv.Itoa(migrations[len(migrations)-1].After()))
|
||||
|
||||
err = p.schemaMigratePre1To1(ctx)
|
||||
if err != nil {
|
||||
if errRollback := p.schemaMigratePre1To1Rollback(ctx, true); errRollback != nil {
|
||||
return fmt.Errorf(errFmtFailedMigrationPre1, err)
|
||||
}
|
||||
|
||||
return fmt.Errorf(errFmtFailedMigrationPre1, err)
|
||||
}
|
||||
case target == -1:
|
||||
p.log.Infof(logFmtMigrationFromTo, strconv.Itoa(prior), "pre1")
|
||||
default:
|
||||
p.log.Infof(logFmtMigrationFromTo, strconv.Itoa(prior), strconv.Itoa(migrations[len(migrations)-1].After()))
|
||||
}
|
||||
|
||||
for _, migration := range migrations {
|
||||
if prior == -1 && migration.Version == 1 {
|
||||
// Skip migration version 1 when upgrading from pre1 as it's applied as part of the pre1 upgrade.
|
||||
continue
|
||||
}
|
||||
|
||||
err = p.schemaMigrateApply(ctx, migration)
|
||||
if err != nil {
|
||||
return p.schemaMigrateRollback(ctx, prior, migration.After(), err)
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case prior == -1:
|
||||
p.log.Infof(logFmtMigrationComplete, "pre1", strconv.Itoa(migrations[len(migrations)-1].After()))
|
||||
case target == -1:
|
||||
err = p.schemaMigrate1ToPre1(ctx)
|
||||
if err != nil {
|
||||
if errRollback := p.schemaMigratePre1To1Rollback(ctx, false); errRollback != nil {
|
||||
return fmt.Errorf(errFmtFailedMigrationPre1, err)
|
||||
}
|
||||
|
||||
return fmt.Errorf(errFmtFailedMigrationPre1, err)
|
||||
}
|
||||
|
||||
p.log.Infof(logFmtMigrationComplete, strconv.Itoa(prior), "pre1")
|
||||
default:
|
||||
p.log.Infof(logFmtMigrationComplete, strconv.Itoa(prior), strconv.Itoa(migrations[len(migrations)-1].After()))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigrateRollback(ctx context.Context, prior, after int, migrateErr error) (err error) {
|
||||
migrations, err := loadMigrations(p.name, after, prior)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading migrations from version %d to version %d for rollback: %+v. rollback caused by: %+v", prior, after, err, migrateErr)
|
||||
}
|
||||
|
||||
for _, migration := range migrations {
|
||||
if prior == -1 && !migration.Up && migration.Version == 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
err = p.schemaMigrateApply(ctx, migration)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error applying migration version %d to version %d for rollback: %+v. rollback caused by: %+v", migration.Before(), migration.After(), err, migrateErr)
|
||||
}
|
||||
}
|
||||
|
||||
if prior == -1 {
|
||||
if err = p.schemaMigrate1ToPre1(ctx); err != nil {
|
||||
return fmt.Errorf("error applying migration version 1 to version pre1 for rollback: %+v. rollback caused by: %+v", err, migrateErr)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("migration rollback complete. rollback caused by: %+v", migrateErr)
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigrateApply(ctx context.Context, migration model.SchemaMigration) (err error) {
|
||||
_, err = p.db.ExecContext(ctx, migration.Query)
|
||||
if err != nil {
|
||||
return fmt.Errorf(errFmtFailedMigration, migration.Version, migration.Name, err)
|
||||
}
|
||||
|
||||
if migration.Version == 1 {
|
||||
// Skip the migration history insertion in a migration to v0.
|
||||
if !migration.Up {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add the schema encryption value if upgrading to v1.
|
||||
if err = p.setNewEncryptionCheckValue(ctx, &p.key, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if migration.Version == 1 && !migration.Up {
|
||||
return nil
|
||||
}
|
||||
|
||||
return p.schemaMigrateFinalize(ctx, migration)
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigrateFinalize(ctx context.Context, migration model.SchemaMigration) (err error) {
|
||||
return p.schemaMigrateFinalizeAdvanced(ctx, migration.Before(), migration.After())
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigrateFinalizeAdvanced(ctx context.Context, before, after int) (err error) {
|
||||
_, err = p.db.ExecContext(ctx, p.sqlInsertMigration, time.Now(), before, after, utils.Version())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.log.Debugf("Storage schema migrated from version %d to %d", before, after)
|
||||
|
||||
return nil
|
||||
// SchemaLatestVersion returns the latest version available for migration.
|
||||
func (p *SQLProvider) SchemaLatestVersion() (version int, err error) {
|
||||
return latestMigrationVersion(p.name)
|
||||
}
|
||||
|
||||
// SchemaMigrationsUp returns a list of migrations up available between the current version and the provided version.
|
||||
|
@ -293,12 +118,214 @@ func (p *SQLProvider) SchemaMigrationsDown(ctx context.Context, version int) (mi
|
|||
return loadMigrations(p.name, current, version)
|
||||
}
|
||||
|
||||
// SchemaLatestVersion returns the latest version available for migration.
|
||||
func (p *SQLProvider) SchemaLatestVersion() (version int, err error) {
|
||||
return latestMigrationVersion(p.name)
|
||||
// SchemaMigrationHistory returns migration history rows.
|
||||
func (p *SQLProvider) SchemaMigrationHistory(ctx context.Context) (migrations []model.Migration, err error) {
|
||||
rows, err := p.db.QueryxContext(ctx, p.sqlSelectMigrations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := rows.Close(); err != nil {
|
||||
p.log.Errorf(logFmtErrClosingConn, err)
|
||||
}
|
||||
}()
|
||||
|
||||
var migration model.Migration
|
||||
|
||||
for rows.Next() {
|
||||
err = rows.StructScan(&migration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
migrations = append(migrations, migration)
|
||||
}
|
||||
|
||||
return migrations, nil
|
||||
}
|
||||
|
||||
// SchemaMigrate migrates from the current version to the provided version.
|
||||
func (p *SQLProvider) SchemaMigrate(ctx context.Context, up bool, version int) (err error) {
|
||||
var (
|
||||
tx *sqlx.Tx
|
||||
conn SQLXConnection
|
||||
)
|
||||
|
||||
if p.name != providerMySQL {
|
||||
if tx, err = p.db.BeginTxx(ctx, nil); err != nil {
|
||||
return fmt.Errorf("failed to begin transaction: %w", err)
|
||||
}
|
||||
|
||||
conn = tx
|
||||
} else {
|
||||
conn = p.db
|
||||
}
|
||||
|
||||
currentVersion, err := p.SchemaVersion(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if currentVersion != 0 {
|
||||
if err = p.schemaMigrateLock(ctx, conn); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = schemaMigrateChecks(p.name, up, version, currentVersion); err != nil {
|
||||
if tx != nil {
|
||||
_ = tx.Rollback()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err = p.schemaMigrate(ctx, conn, currentVersion, version); err != nil {
|
||||
if tx != nil && err == ErrNoMigrationsFound {
|
||||
_ = tx.Rollback()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if tx != nil {
|
||||
if err = tx.Commit(); err != nil {
|
||||
if rerr := tx.Rollback(); rerr != nil {
|
||||
return fmt.Errorf("failed to commit the transaction with: commit error: %w, rollback error: %+v", err, rerr)
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to commit the transaction but it has been rolled back: commit error: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigrate(ctx context.Context, conn SQLXConnection, prior, target int) (err error) {
|
||||
migrations, err := loadMigrations(p.name, prior, target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(migrations) == 0 {
|
||||
return ErrNoMigrationsFound
|
||||
}
|
||||
|
||||
p.log.Infof(logFmtMigrationFromTo, strconv.Itoa(prior), strconv.Itoa(migrations[len(migrations)-1].After()))
|
||||
|
||||
for i, migration := range migrations {
|
||||
if migration.Up && prior == 0 && i == 1 {
|
||||
if err = p.schemaMigrateLock(ctx, conn); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = p.schemaMigrateApply(ctx, conn, migration); err != nil {
|
||||
return p.schemaMigrateRollback(ctx, conn, prior, migration.After(), err)
|
||||
}
|
||||
}
|
||||
|
||||
p.log.Infof(logFmtMigrationComplete, strconv.Itoa(prior), strconv.Itoa(migrations[len(migrations)-1].After()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigrateLock(ctx context.Context, conn SQLXConnection) (err error) {
|
||||
if p.name != providerPostgres {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err = conn.ExecContext(ctx, fmt.Sprintf(queryFmtPostgreSQLLockTable, tableMigrations, "ACCESS EXCLUSIVE")); err != nil {
|
||||
return fmt.Errorf("failed to lock tables: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigrateApply(ctx context.Context, conn SQLXConnection, migration model.SchemaMigration) (err error) {
|
||||
if _, err = conn.ExecContext(ctx, migration.Query); err != nil {
|
||||
return fmt.Errorf(errFmtFailedMigration, migration.Version, migration.Name, err)
|
||||
}
|
||||
|
||||
if migration.Version == 1 && migration.Up {
|
||||
// Add the schema encryption value if upgrading to v1.
|
||||
if err = p.setNewEncryptionCheckValue(ctx, conn, &p.key); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = p.schemaMigrateFinalize(ctx, conn, migration); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigrateFinalize(ctx context.Context, conn SQLXConnection, migration model.SchemaMigration) (err error) {
|
||||
if migration.Version == 1 && !migration.Up {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err = conn.ExecContext(ctx, p.sqlInsertMigration, time.Now(), migration.Before(), migration.After(), utils.Version()); err != nil {
|
||||
return fmt.Errorf("failed inserting migration record: %w", err)
|
||||
}
|
||||
|
||||
p.log.Debugf("Storage schema migrated from version %d to %d", migration.Before(), migration.After())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigrateRollback(ctx context.Context, conn SQLXConnection, prior, after int, merr error) (err error) {
|
||||
switch tx := conn.(type) {
|
||||
case *sqlx.Tx:
|
||||
return p.schemaMigrateRollbackWithTx(ctx, tx, merr)
|
||||
default:
|
||||
return p.schemaMigrateRollbackWithoutTx(ctx, prior, after, merr)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigrateRollbackWithTx(_ context.Context, tx *sqlx.Tx, merr error) (err error) {
|
||||
if err = tx.Rollback(); err != nil {
|
||||
return fmt.Errorf("error applying rollback %+v. rollback caused by: %w", err, merr)
|
||||
}
|
||||
|
||||
return fmt.Errorf("migration rollback complete. rollback caused by: %w", merr)
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigrateRollbackWithoutTx(ctx context.Context, prior, after int, merr error) (err error) {
|
||||
migrations, err := loadMigrations(p.name, after, prior)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading migrations from version %d to version %d for rollback: %+v. rollback caused by: %w", prior, after, err, merr)
|
||||
}
|
||||
|
||||
for _, migration := range migrations {
|
||||
if err = p.schemaMigrateApply(ctx, p.db, migration); err != nil {
|
||||
return fmt.Errorf("error applying migration version %d to version %d for rollback: %+v. rollback caused by: %w", migration.Before(), migration.After(), err, merr)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("migration rollback complete. rollback caused by: %w", merr)
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaLatestMigration(ctx context.Context) (migration *model.Migration, err error) {
|
||||
migration = &model.Migration{}
|
||||
|
||||
if err = p.db.QueryRowxContext(ctx, p.sqlSelectLatestMigration).StructScan(migration); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return migration, nil
|
||||
}
|
||||
|
||||
func schemaMigrateChecks(providerName string, up bool, targetVersion, currentVersion int) (err error) {
|
||||
switch {
|
||||
case currentVersion == -1:
|
||||
return fmt.Errorf(errFmtMigrationPre1, "up from", errFmtMigrationPre1SuggestedVersion)
|
||||
case targetVersion == -1:
|
||||
return fmt.Errorf(errFmtMigrationPre1, "down to", fmt.Sprintf("you should downgrade to schema version 1 using the current authelia version then use the suggested authelia version to downgrade to pre1: %s", errFmtMigrationPre1SuggestedVersion))
|
||||
}
|
||||
|
||||
if targetVersion == currentVersion {
|
||||
return fmt.Errorf(ErrFmtMigrateAlreadyOnTargetVersion, targetVersion, currentVersion)
|
||||
}
|
||||
|
@ -325,7 +352,7 @@ func schemaMigrateChecks(providerName string, up bool, targetVersion, currentVer
|
|||
return fmt.Errorf(ErrFmtMigrateUpTargetGreaterThanLatest, targetVersion, latest)
|
||||
}
|
||||
} else {
|
||||
if targetVersion < -1 {
|
||||
if targetVersion < 0 {
|
||||
return fmt.Errorf(ErrFmtMigrateDownTargetLessThanMinimum, targetVersion)
|
||||
}
|
||||
|
||||
|
@ -345,7 +372,7 @@ func SchemaVersionToString(version int) (versionStr string) {
|
|||
case -1:
|
||||
return "pre1"
|
||||
case 0:
|
||||
return "N/A"
|
||||
return na
|
||||
default:
|
||||
return strconv.Itoa(version)
|
||||
}
|
||||
|
|
|
@ -1,470 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/authelia/authelia/v4/internal/model"
|
||||
"github.com/authelia/authelia/v4/internal/utils"
|
||||
)
|
||||
|
||||
// schemaMigratePre1To1 takes the v1 migration and migrates to this version.
|
||||
func (p *SQLProvider) schemaMigratePre1To1(ctx context.Context) (err error) {
|
||||
migration, err := loadMigration(p.name, 1, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get Tables list.
|
||||
tables, err := p.SchemaTables(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tablesRename := []string{
|
||||
tablePre1Config,
|
||||
tablePre1TOTPSecrets,
|
||||
tablePre1IdentityVerificationTokens,
|
||||
tablePre1U2FDevices,
|
||||
tableUserPreferences,
|
||||
tableAuthenticationLogs,
|
||||
tableAlphaPreferences,
|
||||
tableAlphaIdentityVerificationTokens,
|
||||
tableAlphaAuthenticationLogs,
|
||||
tableAlphaPreferencesTableName,
|
||||
tableAlphaSecondFactorPreferences,
|
||||
tableAlphaTOTPSecrets,
|
||||
tableAlphaU2FDeviceHandles,
|
||||
}
|
||||
|
||||
if err = p.schemaMigratePre1Rename(ctx, tables, tablesRename); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = p.db.ExecContext(ctx, migration.Query); err != nil {
|
||||
return fmt.Errorf(errFmtFailedMigration, migration.Version, migration.Name, err)
|
||||
}
|
||||
|
||||
if err = p.setNewEncryptionCheckValue(ctx, &p.key, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = p.db.ExecContext(ctx, fmt.Sprintf(p.db.Rebind(queryFmtPre1InsertUserPreferencesFromSelect),
|
||||
tableUserPreferences, tablePrefixBackup+tableUserPreferences)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = p.schemaMigratePre1To1AuthenticationLogs(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = p.schemaMigratePre1To1U2F(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = p.schemaMigratePre1To1TOTP(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, table := range tablesRename {
|
||||
if _, err = p.db.Exec(fmt.Sprintf(p.db.Rebind(queryFmtDropTableIfExists), tablePrefixBackup+table)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return p.schemaMigrateFinalizeAdvanced(ctx, -1, 1)
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigratePre1Rename(ctx context.Context, tables, tablesRename []string) (err error) {
|
||||
// Rename Tables and Indexes.
|
||||
for _, table := range tables {
|
||||
if !utils.IsStringInSlice(table, tablesRename) {
|
||||
continue
|
||||
}
|
||||
|
||||
tableNew := tablePrefixBackup + table
|
||||
|
||||
if _, err = p.db.ExecContext(ctx, fmt.Sprintf(p.sqlFmtRenameTable, table, tableNew)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.name == providerPostgres {
|
||||
if table == tablePre1U2FDevices || table == tableUserPreferences {
|
||||
if _, err = p.db.ExecContext(ctx, fmt.Sprintf(`ALTER TABLE %s RENAME CONSTRAINT %s_pkey TO %s_pkey;`,
|
||||
tableNew, table, tableNew)); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigratePre1To1Rollback(ctx context.Context, up bool) (err error) {
|
||||
if up {
|
||||
migration, err := loadMigration(p.name, 1, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = p.db.ExecContext(ctx, migration.Query); err != nil {
|
||||
return fmt.Errorf(errFmtFailedMigration, migration.Version, migration.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
tables, err := p.SchemaTables(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
if !strings.HasPrefix(table, tablePrefixBackup) {
|
||||
continue
|
||||
}
|
||||
|
||||
tableNew := strings.Replace(table, tablePrefixBackup, "", 1)
|
||||
if _, err = p.db.ExecContext(ctx, fmt.Sprintf(p.sqlFmtRenameTable, table, tableNew)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.name == providerPostgres && (tableNew == tablePre1U2FDevices || tableNew == tableUserPreferences) {
|
||||
if _, err = p.db.ExecContext(ctx, fmt.Sprintf(`ALTER TABLE %s RENAME CONSTRAINT %s_pkey TO %s_pkey;`,
|
||||
tableNew, table, tableNew)); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigratePre1To1AuthenticationLogs(ctx context.Context) (err error) {
|
||||
for page := 0; true; page++ {
|
||||
attempts, err := p.schemaMigratePre1To1AuthenticationLogsGetRows(ctx, page)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
break
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
for _, attempt := range attempts {
|
||||
_, err = p.db.ExecContext(ctx, fmt.Sprintf(p.db.Rebind(queryFmtPre1To1InsertAuthenticationLogs), tableAuthenticationLogs), attempt.Username, attempt.Successful, attempt.Time)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(attempts) != 100 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigratePre1To1AuthenticationLogsGetRows(ctx context.Context, page int) (attempts []model.AuthenticationAttempt, err error) {
|
||||
rows, err := p.db.QueryxContext(ctx, fmt.Sprintf(p.db.Rebind(queryFmtPre1To1SelectAuthenticationLogs), tablePrefixBackup+tableAuthenticationLogs), page*100)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attempts = make([]model.AuthenticationAttempt, 0, 100)
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
username string
|
||||
successful bool
|
||||
timestamp int64
|
||||
)
|
||||
|
||||
err = rows.Scan(&username, &successful, ×tamp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attempts = append(attempts, model.AuthenticationAttempt{Username: username, Successful: successful, Time: time.Unix(timestamp, 0)})
|
||||
}
|
||||
|
||||
return attempts, nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigratePre1To1TOTP(ctx context.Context) (err error) {
|
||||
rows, err := p.db.QueryxContext(ctx, fmt.Sprintf(p.db.Rebind(queryFmtPre1SelectTOTPConfigurations), tablePrefixBackup+tablePre1TOTPSecrets))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var totpConfigs []model.TOTPConfiguration
|
||||
|
||||
defer func() {
|
||||
if err := rows.Close(); err != nil {
|
||||
p.log.Errorf(logFmtErrClosingConn, err)
|
||||
}
|
||||
}()
|
||||
|
||||
for rows.Next() {
|
||||
var username, secret string
|
||||
|
||||
err = rows.Scan(&username, &secret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
encryptedSecret, err := p.encrypt([]byte(secret))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totpConfigs = append(totpConfigs, model.TOTPConfiguration{Username: username, Secret: encryptedSecret})
|
||||
}
|
||||
|
||||
for _, config := range totpConfigs {
|
||||
_, err = p.db.ExecContext(ctx, fmt.Sprintf(p.db.Rebind(queryFmtPre1To1InsertTOTPConfiguration), tableTOTPConfigurations), config.Username, p.config.TOTP.Issuer, p.config.TOTP.Period, config.Secret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigratePre1To1U2F(ctx context.Context) (err error) {
|
||||
rows, err := p.db.Queryx(fmt.Sprintf(p.db.Rebind(queryFmtPre1To1SelectU2FDevices), tablePrefixBackup+tablePre1U2FDevices))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := rows.Close(); err != nil {
|
||||
p.log.Errorf(logFmtErrClosingConn, err)
|
||||
}
|
||||
}()
|
||||
|
||||
var devices []model.U2FDevice
|
||||
|
||||
for rows.Next() {
|
||||
var username, keyHandleBase64, publicKeyBase64 string
|
||||
|
||||
err = rows.Scan(&username, &keyHandleBase64, &publicKeyBase64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
keyHandle, err := base64.StdEncoding.DecodeString(keyHandleBase64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
publicKey, err := base64.StdEncoding.DecodeString(publicKeyBase64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
encryptedPublicKey, err := p.encrypt(publicKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
devices = append(devices, model.U2FDevice{Username: username, KeyHandle: keyHandle, PublicKey: encryptedPublicKey})
|
||||
}
|
||||
|
||||
for _, device := range devices {
|
||||
_, err = p.db.ExecContext(ctx, fmt.Sprintf(p.db.Rebind(queryFmtPre1To1InsertU2FDevice), tablePre1U2FDevices), device.Username, device.KeyHandle, device.PublicKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigrate1ToPre1(ctx context.Context) (err error) {
|
||||
tables, err := p.SchemaTables(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tablesRename := []string{
|
||||
tableMigrations,
|
||||
tableTOTPConfigurations,
|
||||
tableIdentityVerification,
|
||||
tablePre1U2FDevices,
|
||||
tableDuoDevices,
|
||||
tableUserPreferences,
|
||||
tableAuthenticationLogs,
|
||||
tableEncryption,
|
||||
}
|
||||
|
||||
if err = p.schemaMigratePre1Rename(ctx, tables, tablesRename); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := p.db.ExecContext(ctx, queryCreatePre1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = p.db.ExecContext(ctx, fmt.Sprintf(p.db.Rebind(queryFmtPre1InsertUserPreferencesFromSelect),
|
||||
tableUserPreferences, tablePrefixBackup+tableUserPreferences)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = p.schemaMigrate1ToPre1AuthenticationLogs(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = p.schemaMigrate1ToPre1U2F(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = p.schemaMigrate1ToPre1TOTP(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
queryFmtDropTableRebound := p.db.Rebind(queryFmtDropTableIfExists)
|
||||
|
||||
for _, table := range tablesRename {
|
||||
if _, err = p.db.Exec(fmt.Sprintf(queryFmtDropTableRebound, tablePrefixBackup+table)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigrate1ToPre1AuthenticationLogs(ctx context.Context) (err error) {
|
||||
for page := 0; true; page++ {
|
||||
attempts, err := p.schemaMigrate1ToPre1AuthenticationLogsGetRows(ctx, page)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
break
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
for _, attempt := range attempts {
|
||||
_, err = p.db.ExecContext(ctx, fmt.Sprintf(p.db.Rebind(queryFmt1ToPre1InsertAuthenticationLogs), tableAuthenticationLogs), attempt.Username, attempt.Successful, attempt.Time.Unix())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(attempts) != 100 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigrate1ToPre1AuthenticationLogsGetRows(ctx context.Context, page int) (attempts []model.AuthenticationAttempt, err error) {
|
||||
rows, err := p.db.QueryxContext(ctx, fmt.Sprintf(p.db.Rebind(queryFmt1ToPre1SelectAuthenticationLogs), tablePrefixBackup+tableAuthenticationLogs), page*100)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attempts = make([]model.AuthenticationAttempt, 0, 100)
|
||||
|
||||
var attempt model.AuthenticationAttempt
|
||||
for rows.Next() {
|
||||
err = rows.StructScan(&attempt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attempts = append(attempts, attempt)
|
||||
}
|
||||
|
||||
return attempts, nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigrate1ToPre1TOTP(ctx context.Context) (err error) {
|
||||
rows, err := p.db.QueryxContext(ctx, fmt.Sprintf(p.db.Rebind(queryFmtPre1SelectTOTPConfigurations), tablePrefixBackup+tableTOTPConfigurations))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var totpConfigs []model.TOTPConfiguration
|
||||
|
||||
defer func() {
|
||||
if err := rows.Close(); err != nil {
|
||||
p.log.Errorf(logFmtErrClosingConn, err)
|
||||
}
|
||||
}()
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
username string
|
||||
secretCipherText []byte
|
||||
)
|
||||
|
||||
err = rows.Scan(&username, &secretCipherText)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
secretClearText, err := p.decrypt(secretCipherText)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totpConfigs = append(totpConfigs, model.TOTPConfiguration{Username: username, Secret: secretClearText})
|
||||
}
|
||||
|
||||
for _, config := range totpConfigs {
|
||||
_, err = p.db.ExecContext(ctx, fmt.Sprintf(p.db.Rebind(queryFmt1ToPre1InsertTOTPConfiguration), tablePre1TOTPSecrets), config.Username, config.Secret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SQLProvider) schemaMigrate1ToPre1U2F(ctx context.Context) (err error) {
|
||||
rows, err := p.db.QueryxContext(ctx, fmt.Sprintf(p.db.Rebind(queryFmt1ToPre1SelectU2FDevices), tablePrefixBackup+tablePre1U2FDevices))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := rows.Close(); err != nil {
|
||||
p.log.Errorf(logFmtErrClosingConn, err)
|
||||
}
|
||||
}()
|
||||
|
||||
var (
|
||||
devices []model.U2FDevice
|
||||
device model.U2FDevice
|
||||
)
|
||||
|
||||
for rows.Next() {
|
||||
err = rows.StructScan(&device)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
device.PublicKey, err = p.decrypt(device.PublicKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
devices = append(devices, device)
|
||||
}
|
||||
|
||||
for _, device := range devices {
|
||||
_, err = p.db.ExecContext(ctx, fmt.Sprintf(p.db.Rebind(queryFmt1ToPre1InsertU2FDevice), tablePre1U2FDevices), device.Username, base64.StdEncoding.EncodeToString(device.KeyHandle), base64.StdEncoding.EncodeToString(device.PublicKey))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -29,7 +29,7 @@ func TestShouldReturnErrOnTargetSameAsCurrent(t *testing.T) {
|
|||
fmt.Sprintf(ErrFmtMigrateAlreadyOnTargetVersion, 1, 1))
|
||||
}
|
||||
|
||||
func TestShouldReturnErrOnUpMigrationTargetVersionLessTHanCurrent(t *testing.T) {
|
||||
func TestShouldReturnErrOnUpMigrationTargetVersionLessThanCurrent(t *testing.T) {
|
||||
assert.EqualError(t,
|
||||
schemaMigrateChecks(providerPostgres, true, 0, LatestVersion),
|
||||
fmt.Sprintf(ErrFmtMigrateUpTargetLessThanCurrent, 0, LatestVersion))
|
||||
|
@ -80,7 +80,7 @@ func TestShouldReturnErrOnVersionDoesntExits(t *testing.T) {
|
|||
fmt.Sprintf(ErrFmtMigrateUpTargetGreaterThanLatest, SchemaLatest-1, LatestVersion))
|
||||
}
|
||||
|
||||
func TestMigrationDownShouldReturnErrOnTargetLessThanPre1(t *testing.T) {
|
||||
func TestMigrationDownShouldReturnErrOnTargetLessThan1(t *testing.T) {
|
||||
assert.EqualError(t,
|
||||
schemaMigrateChecks(providerSQLite, false, -4, LatestVersion),
|
||||
fmt.Sprintf(ErrFmtMigrateDownTargetLessThanMinimum, -4))
|
||||
|
@ -93,8 +93,15 @@ func TestMigrationDownShouldReturnErrOnTargetLessThanPre1(t *testing.T) {
|
|||
schemaMigrateChecks(providerPostgres, false, -2, LatestVersion),
|
||||
fmt.Sprintf(ErrFmtMigrateDownTargetLessThanMinimum, -2))
|
||||
|
||||
assert.NoError(t,
|
||||
schemaMigrateChecks(providerPostgres, false, -1, LatestVersion))
|
||||
assert.EqualError(t,
|
||||
schemaMigrateChecks(providerPostgres, false, -1, LatestVersion),
|
||||
"schema migration down to pre1 is no longer supported: you must use an older version of authelia to perform this migration: you should downgrade to schema version 1 using the current authelia version then use the suggested authelia version to downgrade to pre1: the suggested authelia version is 4.37.2")
|
||||
}
|
||||
|
||||
func TestMigrationDownShouldReturnErrOnCurrentLessThan0(t *testing.T) {
|
||||
assert.EqualError(t,
|
||||
schemaMigrateChecks(providerPostgres, true, LatestVersion, -1),
|
||||
"schema migration up from pre1 is no longer supported: you must use an older version of authelia to perform this migration: the suggested authelia version is 4.37.2")
|
||||
}
|
||||
|
||||
func TestMigrationDownShouldReturnErrOnTargetVersionGreaterThanCurrent(t *testing.T) {
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
// SQLXConnection is a *sqlx.DB or *sqlx.Tx.
|
||||
type SQLXConnection interface {
|
||||
sqlx.Execer
|
||||
sqlx.ExecerContext
|
||||
|
||||
sqlx.Preparer
|
||||
sqlx.PreparerContext
|
||||
|
||||
sqlx.Queryer
|
||||
sqlx.QueryerContext
|
||||
|
||||
sqlx.Ext
|
||||
sqlx.ExtContext
|
||||
}
|
||||
|
||||
// EncryptionChangeKeyFunc handles encryption key changes for a specific table or tables.
|
||||
type EncryptionChangeKeyFunc func(ctx context.Context, provider *SQLProvider, tx *sqlx.Tx, key [32]byte) (err error)
|
||||
|
||||
// EncryptionCheckKeyFunc handles encryption key checking for a specific table or tables.
|
||||
type EncryptionCheckKeyFunc func(ctx context.Context, provider *SQLProvider) (table string, result EncryptionValidationTableResult)
|
||||
|
||||
type encOAuth2Session struct {
|
||||
ID int `db:"id"`
|
||||
Session []byte `db:"session_data"`
|
||||
}
|
||||
|
||||
type encWebauthnDevice struct {
|
||||
ID int `db:"id"`
|
||||
PublicKey []byte `db:"public_key"`
|
||||
}
|
||||
|
||||
type encTOTPConfiguration struct {
|
||||
ID int `db:"id" json:"-"`
|
||||
Secret []byte `db:"secret" json:"-"`
|
||||
}
|
||||
|
||||
// EncryptionValidationResult contains information about the success of a schema encryption validation.
|
||||
type EncryptionValidationResult struct {
|
||||
InvalidCheckValue bool
|
||||
Tables map[string]EncryptionValidationTableResult
|
||||
}
|
||||
|
||||
// Success returns true if no validation errors occurred.
|
||||
func (r EncryptionValidationResult) Success() bool {
|
||||
if r.InvalidCheckValue {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, table := range r.Tables {
|
||||
if table.Invalid != 0 || table.Error != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Checked returns true the validation completed all phases even if there were errors.
|
||||
func (r EncryptionValidationResult) Checked() bool {
|
||||
for _, table := range r.Tables {
|
||||
if table.Error != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// EncryptionValidationTableResult contains information about the success of a table schema encryption validation.
|
||||
type EncryptionValidationTableResult struct {
|
||||
Error error
|
||||
Total int
|
||||
Invalid int
|
||||
}
|
||||
|
||||
// ResultDescriptor returns a string representing the result.
|
||||
func (r EncryptionValidationTableResult) ResultDescriptor() string {
|
||||
if r.Total == 0 {
|
||||
return na
|
||||
}
|
||||
|
||||
if r.Error != nil || r.Invalid != 0 {
|
||||
return "FAILURE"
|
||||
}
|
||||
|
||||
return "SUCCESS"
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
FROM alpine:3.16.3
|
||||
FROM alpine:3.17.0
|
||||
|
||||
WORKDIR /kind
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM alpine:3.16.3
|
||||
FROM alpine:3.17.0
|
||||
|
||||
RUN \
|
||||
apk add --no-cache \
|
||||
|
|
|
@ -816,7 +816,7 @@ func (s *CLISuite) TestStorage00ShouldShowCorrectPreInitInformation() {
|
|||
|
||||
output, err = s.Exec("authelia-backend", []string{"authelia", s.testArg, s.coverageArg, "storage", "encryption", "check", "--config=/config/configuration.storage.yml"})
|
||||
s.Assert().NoError(err)
|
||||
s.Assert().Contains(output, "Could not check encryption key for validity. The schema version doesn't support encryption.")
|
||||
s.Assert().Contains(output, "Storage Encryption Key Validation: FAILURE\n\n\tCause: The schema version doesn't support encryption.\n")
|
||||
|
||||
output, err = s.Exec("authelia-backend", []string{"authelia", s.testArg, s.coverageArg, "storage", "migrate", "down", "--target=0", "--destroy-data", "--config=/config/configuration.storage.yml"})
|
||||
s.Assert().EqualError(err, "exit status 1")
|
||||
|
@ -1136,27 +1136,27 @@ func (s *CLISuite) TestStorage05ShouldChangeEncryptionKey() {
|
|||
output, err = s.Exec("authelia-backend", []string{"authelia", s.testArg, s.coverageArg, "storage", "encryption", "check", "--config=/config/configuration.storage.yml"})
|
||||
s.Assert().NoError(err)
|
||||
|
||||
s.Assert().Contains(output, "Encryption key validation: failed.\n\nError: the encryption key is not valid against the schema check value.\n")
|
||||
s.Assert().Contains(output, "Storage Encryption Key Validation: FAILURE\n\n\tCause: the configured encryption key does not appear to be valid for this database which may occur if the encryption key was changed in the configuration without using the cli to change it in the database.\n")
|
||||
|
||||
output, err = s.Exec("authelia-backend", []string{"authelia", s.testArg, s.coverageArg, "storage", "encryption", "check", "--verbose", "--config=/config/configuration.storage.yml"})
|
||||
s.Assert().NoError(err)
|
||||
|
||||
s.Assert().Contains(output, "Encryption key validation: failed.\n\nError: the encryption key is not valid against the schema check value, 4 of 4 total TOTP secrets were invalid.\n")
|
||||
s.Assert().Contains(output, "Storage Encryption Key Validation: FAILURE\n\n\tCause: the configured encryption key does not appear to be valid for this database which may occur if the encryption key was changed in the configuration without using the cli to change it in the database.\n\nTables:\n\n\tTable (oauth2_access_token_session): N/A\n\t\tInvalid Rows: 0\n\t\tTotal Rows: 0\n\n\tTable (oauth2_authorization_code_session): N/A\n\t\tInvalid Rows: 0\n\t\tTotal Rows: 0\n\n\tTable (oauth2_openid_connect_session): N/A\n\t\tInvalid Rows: 0\n\t\tTotal Rows: 0\n\n\tTable (oauth2_pkce_request_session): N/A\n\t\tInvalid Rows: 0\n\t\tTotal Rows: 0\n\n\tTable (oauth2_refresh_token_session): N/A\n\t\tInvalid Rows: 0\n\t\tTotal Rows: 0\n\n\tTable (totp_configurations): FAILURE\n\t\tInvalid Rows: 4\n\t\tTotal Rows: 4\n\n\tTable (webauthn_devices): N/A\n\t\tInvalid Rows: 0\n\t\tTotal Rows: 0\n")
|
||||
|
||||
output, err = s.Exec("authelia-backend", []string{"authelia", s.testArg, s.coverageArg, "storage", "encryption", "check", "--encryption-key=apple-apple-apple-apple", "--config=/config/configuration.storage.yml"})
|
||||
s.Assert().NoError(err)
|
||||
|
||||
s.Assert().Contains(output, "Encryption key validation: success.\n")
|
||||
s.Assert().Contains(output, "Storage Encryption Key Validation: SUCCESS\n")
|
||||
|
||||
output, err = s.Exec("authelia-backend", []string{"authelia", s.testArg, s.coverageArg, "storage", "encryption", "check", "--verbose", "--encryption-key=apple-apple-apple-apple", "--config=/config/configuration.storage.yml"})
|
||||
s.Assert().NoError(err)
|
||||
|
||||
s.Assert().Contains(output, "Encryption key validation: success.\n")
|
||||
s.Assert().Contains(output, "Storage Encryption Key Validation: SUCCESS\n\nTables:\n\n\tTable (oauth2_access_token_session): N/A\n\t\tInvalid Rows: 0\n\t\tTotal Rows: 0\n\n\tTable (oauth2_authorization_code_session): N/A\n\t\tInvalid Rows: 0\n\t\tTotal Rows: 0\n\n\tTable (oauth2_openid_connect_session): N/A\n\t\tInvalid Rows: 0\n\t\tTotal Rows: 0\n\n\tTable (oauth2_pkce_request_session): N/A\n\t\tInvalid Rows: 0\n\t\tTotal Rows: 0\n\n\tTable (oauth2_refresh_token_session): N/A\n\t\tInvalid Rows: 0\n\t\tTotal Rows: 0\n\n\tTable (totp_configurations): SUCCESS\n\t\tInvalid Rows: 0\n\t\tTotal Rows: 4\n\n\tTable (webauthn_devices): N/A\n\t\tInvalid Rows: 0\n\t\tTotal Rows: 0\n")
|
||||
|
||||
output, err = s.Exec("authelia-backend", []string{"authelia", s.testArg, s.coverageArg, "storage", "encryption", "change-key", "--encryption-key=apple-apple-apple-apple", "--config=/config/configuration.storage.yml"})
|
||||
s.Assert().EqualError(err, "exit status 1")
|
||||
|
||||
s.Assert().Contains(output, "Error: you must set the --new-encryption-key flag\n")
|
||||
s.Assert().Contains(output, "Error: you must either use an interactive terminal or use the --new-encryption-key flag\n")
|
||||
|
||||
output, err = s.Exec("authelia-backend", []string{"authelia", s.testArg, s.coverageArg, "storage", "encryption", "change-key", "--encryption-key=apple-apple-apple-apple", "--new-encryption-key=abc", "--config=/config/configuration.storage.yml"})
|
||||
s.Assert().EqualError(err, "exit status 1")
|
||||
|
|
|
@ -235,10 +235,10 @@ func IsX509PrivateKey(i any) bool {
|
|||
}
|
||||
|
||||
// NewTLSConfig generates a tls.Config from a schema.TLSConfig and a x509.CertPool.
|
||||
func NewTLSConfig(config *schema.TLSConfig, caCertPool *x509.CertPool) (tlsConfig *tls.Config) {
|
||||
func NewTLSConfig(config *schema.TLSConfig, rootCAs *x509.CertPool) (tlsConfig *tls.Config) {
|
||||
var certificates []tls.Certificate
|
||||
|
||||
if config.CertificateChain.HasCertificates() && config.PrivateKey != nil {
|
||||
if config.PrivateKey != nil && config.CertificateChain.HasCertificates() {
|
||||
certificates = []tls.Certificate{
|
||||
{
|
||||
Certificate: config.CertificateChain.CertificatesRaw(),
|
||||
|
@ -252,8 +252,8 @@ func NewTLSConfig(config *schema.TLSConfig, caCertPool *x509.CertPool) (tlsConfi
|
|||
ServerName: config.ServerName,
|
||||
InsecureSkipVerify: config.SkipVerify, //nolint:gosec // Informed choice by user. Off by default.
|
||||
MinVersion: config.MinimumVersion.MinVersion(),
|
||||
MaxVersion: config.MinimumVersion.MaxVersion(),
|
||||
RootCAs: caCertPool,
|
||||
MaxVersion: config.MaximumVersion.MaxVersion(),
|
||||
RootCAs: rootCAs,
|
||||
Certificates: certificates,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,8 +9,10 @@ module.exports = {
|
|||
typescript: {},
|
||||
},
|
||||
},
|
||||
plugins: ["@limegrass/import-alias"],
|
||||
extends: ["react-app", "plugin:import/errors", "plugin:import/warnings", "plugin:prettier/recommended", "prettier"],
|
||||
rules: {
|
||||
"@limegrass/import-alias/import-alias": "error",
|
||||
"import/order": [
|
||||
"error",
|
||||
{
|
||||
|
|
|
@ -25,15 +25,15 @@
|
|||
"@fortawesome/free-regular-svg-icons": "6.2.1",
|
||||
"@fortawesome/free-solid-svg-icons": "6.2.1",
|
||||
"@fortawesome/react-fontawesome": "0.2.0",
|
||||
"@mui/icons-material": "5.10.14",
|
||||
"@mui/material": "5.10.14",
|
||||
"@mui/styles": "5.10.14",
|
||||
"axios": "1.1.3",
|
||||
"@mui/icons-material": "5.10.16",
|
||||
"@mui/material": "5.10.16",
|
||||
"@mui/styles": "5.10.16",
|
||||
"axios": "1.2.0",
|
||||
"broadcast-channel": "4.18.1",
|
||||
"classnames": "2.3.2",
|
||||
"i18next": "22.0.6",
|
||||
"i18next-browser-languagedetector": "7.0.1",
|
||||
"i18next-http-backend": "2.0.1",
|
||||
"i18next-http-backend": "2.0.2",
|
||||
"qrcode.react": "3.1.0",
|
||||
"query-string": "7.1.1",
|
||||
"react": "18.2.0",
|
||||
|
@ -143,8 +143,9 @@
|
|||
]
|
||||
},
|
||||
"devDependencies": {
|
||||
"@commitlint/cli": "17.2.0",
|
||||
"@commitlint/config-conventional": "17.2.0",
|
||||
"@commitlint/cli": "17.3.0",
|
||||
"@commitlint/config-conventional": "17.3.0",
|
||||
"@limegrass/eslint-plugin-import-alias": "1.0.6",
|
||||
"@testing-library/jest-dom": "5.16.5",
|
||||
"@testing-library/react": "13.4.0",
|
||||
"@types/jest": "29.2.3",
|
||||
|
@ -153,10 +154,10 @@
|
|||
"@types/react": "18.0.25",
|
||||
"@types/react-dom": "18.0.9",
|
||||
"@types/zxcvbn": "4.4.1",
|
||||
"@typescript-eslint/eslint-plugin": "5.43.0",
|
||||
"@typescript-eslint/parser": "5.43.0",
|
||||
"@typescript-eslint/eslint-plugin": "5.45.0",
|
||||
"@typescript-eslint/parser": "5.45.0",
|
||||
"@vitejs/plugin-react": "2.2.0",
|
||||
"esbuild": "0.15.14",
|
||||
"esbuild": "0.15.16",
|
||||
"esbuild-jest": "0.5.0",
|
||||
"eslint": "8.28.0",
|
||||
"eslint-config-prettier": "8.5.0",
|
||||
|
@ -173,13 +174,13 @@
|
|||
"jest-environment-jsdom": "29.3.1",
|
||||
"jest-transform-stub": "2.0.0",
|
||||
"jest-watch-typeahead": "2.2.1",
|
||||
"prettier": "2.7.1",
|
||||
"prettier": "2.8.0",
|
||||
"react-test-renderer": "18.2.0",
|
||||
"typescript": "4.9.3",
|
||||
"vite": "3.2.4",
|
||||
"vite-plugin-eslint": "1.8.1",
|
||||
"vite-plugin-istanbul": "3.0.2",
|
||||
"vite-plugin-svgr": "2.2.2",
|
||||
"vite-tsconfig-paths": "3.5.2"
|
||||
"vite-tsconfig-paths": "3.6.0"
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -9,7 +9,7 @@ import { useNavigate } from "react-router-dom";
|
|||
import { ReactComponent as UserSvg } from "@assets/images/user.svg";
|
||||
import Brand from "@components/Brand";
|
||||
import TypographyWithTooltip from "@components/TypographyWithTootip";
|
||||
import { SettingsRoute } from "@root/constants/Routes";
|
||||
import { SettingsRoute } from "@constants/Routes";
|
||||
import { getLogoOverride } from "@utils/Configuration";
|
||||
|
||||
export interface Props {
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import { WebauthnDevice } from "@root/models/Webauthn";
|
||||
|
||||
import { WebauthnDevicesPath } from "./Api";
|
||||
import { Get } from "./Client";
|
||||
import { WebauthnDevice } from "@models/Webauthn";
|
||||
import { WebauthnDevicesPath } from "@services/Api";
|
||||
import { Get } from "@services/Client";
|
||||
|
||||
// getWebauthnDevices returns the list of webauthn devices for the authenticated user.
|
||||
export async function getWebauthnDevices(): Promise<WebauthnDevice[]> {
|
||||
|
|
|
@ -3,8 +3,7 @@ import React from "react";
|
|||
import { Grid } from "@mui/material";
|
||||
|
||||
import { AutheliaState } from "@services/State";
|
||||
|
||||
import WebauthnDevices from "./WebauthnDevices";
|
||||
import WebauthnDevices from "@views/Settings/TwoFactorAuthentication/WebauthnDevices";
|
||||
|
||||
interface Props {
|
||||
state: AutheliaState;
|
||||
|
|
|
@ -11,11 +11,10 @@ import { initiateWebauthnRegistrationProcess } from "@services/RegisterDevice";
|
|||
import { AutheliaState, AuthenticationLevel } from "@services/State";
|
||||
import { getWebauthnDevices } from "@services/UserWebauthnDevices";
|
||||
import { deleteDevice, updateDevice } from "@services/Webauthn";
|
||||
|
||||
import WebauthnDeviceDeleteDialog from "./WebauthnDeviceDeleteDialog";
|
||||
import WebauthnDeviceDetailsDialog from "./WebauthnDeviceDetailsDialog";
|
||||
import WebauthnDeviceEditDialog from "./WebauthnDeviceEditDialog";
|
||||
import WebauthnDeviceItem from "./WebauthnDeviceItem";
|
||||
import WebauthnDeviceDeleteDialog from "@views/Settings/TwoFactorAuthentication/WebauthnDeviceDeleteDialog";
|
||||
import WebauthnDeviceDetailsDialog from "@views/Settings/TwoFactorAuthentication/WebauthnDeviceDetailsDialog";
|
||||
import WebauthnDeviceEditDialog from "@views/Settings/TwoFactorAuthentication/WebauthnDeviceEditDialog";
|
||||
import WebauthnDeviceItem from "@views/Settings/TwoFactorAuthentication/WebauthnDeviceItem";
|
||||
|
||||
interface Props {
|
||||
state: AutheliaState;
|
||||
|
|
Loading…
Reference in New Issue