mirror of
https://github.com/michaelthomson0797/fleet-infra.git
synced 2026-02-04 13:09:53 +00:00
oauth2 proxy longhorn ui maybe?
This commit is contained in:
456
longhorn/helmrelease-oauth2-proxy.yaml
Normal file
456
longhorn/helmrelease-oauth2-proxy.yaml
Normal file
@@ -0,0 +1,456 @@
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: oauth2-proxy
|
||||
namespace: longhorn-system
|
||||
spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: oauth2-proxy
|
||||
version: 6.19.x # auto-update to semver bugfixes only
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: oauth2-proxy
|
||||
namespace: flux-system
|
||||
interval: 15m
|
||||
timeout: 5m
|
||||
releaseName: oauth2-proxy
|
||||
values: # paste contents of upstream values.yaml below, indented 4 spaces
|
||||
## Override the deployment namespace
|
||||
##
|
||||
namespaceOverride: ""
|
||||
|
||||
# Force the target Kubernetes version (it uses Helm `.Capabilities` if not set).
|
||||
# This is especially useful for `helm template` as capabilities are always empty
|
||||
# due to the fact that it doesn't query an actual cluster
|
||||
kubeVersion:
|
||||
|
||||
# Oauth client configuration specifics
|
||||
config:
|
||||
# Add config annotations
|
||||
annotations: {}
|
||||
# OAuth client ID
|
||||
clientID: "kube-apiserver"
|
||||
# OAuth client secret
|
||||
clientSecret: "iVdGhQUc8k4bK7rT3HAiuOjctxUW7T8bDxz4hm6aPagZkdkoCDvmnLjC5mMF6ooR0obGrezYdyt9nAjVbC0RVZsOIdSsfrSYbe0kEWGfk21FazXXApqNTIrsqBF4mSJH"
|
||||
# Create a new secret with the following command
|
||||
# openssl rand -base64 32 | head -c 32 | base64
|
||||
# Use an existing secret for OAuth2 credentials (see secret.yaml for required fields)
|
||||
# Example:
|
||||
# existingSecret: secret
|
||||
cookieSecret: "2W87TuxTRkTFVkwxbz3MuJxWE3EAJe"
|
||||
# The name of the cookie that oauth2-proxy will create
|
||||
# If left empty, it will default to the release name
|
||||
cookieName: ""
|
||||
google: {}
|
||||
# adminEmail: xxxx
|
||||
# useApplicationDefaultCredentials: true
|
||||
# targetPrincipal: xxxx
|
||||
# serviceAccountJson: xxxx
|
||||
# Alternatively, use an existing secret (see google-secret.yaml for required fields)
|
||||
# Example:
|
||||
# existingSecret: google-secret
|
||||
# groups: []
|
||||
# Example:
|
||||
# - group1@example.com
|
||||
# - group2@example.com
|
||||
# Default configuration, to be overridden
|
||||
configFile: |-
|
||||
email_domains = [ "*" ]
|
||||
upstreams = [ "http://longhorn-frontend" ]
|
||||
# Custom configuration file: oauth2_proxy.cfg
|
||||
# configFile: |-
|
||||
# pass_basic_auth = false
|
||||
# pass_access_token = true
|
||||
# Use an existing config map (see configmap.yaml for required fields)
|
||||
# Example:
|
||||
# existingConfig: config
|
||||
|
||||
alphaConfig:
|
||||
enabled: false
|
||||
# Add config annotations
|
||||
annotations: {}
|
||||
# Arbitrary configuration data to append to the server section
|
||||
serverConfigData: {}
|
||||
# Arbitrary configuration data to append to the metrics section
|
||||
metricsConfigData: {}
|
||||
# Arbitrary configuration data to append
|
||||
configData: {}
|
||||
# Arbitrary configuration to append
|
||||
# This is treated as a Go template and rendered with the root context
|
||||
configFile: ""
|
||||
# Use an existing config map (see secret-alpha.yaml for required fields)
|
||||
existingConfig: ~
|
||||
# Use an existing secret
|
||||
existingSecret: ~
|
||||
|
||||
image:
|
||||
repository: "quay.io/oauth2-proxy/oauth2-proxy"
|
||||
# appVersion is used by default
|
||||
tag: ""
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
# Secrets must be manually created in the namespace.
|
||||
# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
|
||||
# imagePullSecrets:
|
||||
# - name: myRegistryKeySecretName
|
||||
|
||||
# Set a custom containerPort if required.
|
||||
# This will default to 4180 if this value is not set and the httpScheme set to http
|
||||
# This will default to 4443 if this value is not set and the httpScheme set to https
|
||||
# containerPort: 4180
|
||||
|
||||
extraArgs:
|
||||
provider: oidc
|
||||
provider-display-name: "Authentik"
|
||||
skip-provider-button: "true"
|
||||
pass-authorization-header: "true"
|
||||
extraEnv: []
|
||||
|
||||
# -- Custom labels to add into metadata
|
||||
customLabels: {}
|
||||
|
||||
# To authorize individual email addresses
|
||||
# That is part of extraArgs but since this needs special treatment we need to do a separate section
|
||||
authenticatedEmailsFile:
|
||||
enabled: false
|
||||
# Defines how the email addresses file will be projected, via a configmap or secret
|
||||
persistence: configmap
|
||||
# template is the name of the configmap what contains the email user list but has been configured without this chart.
|
||||
# It's a simpler way to maintain only one configmap (user list) instead changing it for each oauth2-proxy service.
|
||||
# Be aware the value name in the extern config map in data needs to be named to "restricted_user_access" or to the
|
||||
# provided value in restrictedUserAccessKey field.
|
||||
template: ""
|
||||
# The configmap/secret key under which the list of email access is stored
|
||||
# Defaults to "restricted_user_access" if not filled-in, but can be overridden to allow flexibility
|
||||
restrictedUserAccessKey: ""
|
||||
# One email per line
|
||||
# example:
|
||||
# restricted_access: |-
|
||||
# name1@domain
|
||||
# name2@domain
|
||||
# If you override the config with restricted_access it will configure a user list within this chart what takes care of the
|
||||
# config map resource.
|
||||
restricted_access: ""
|
||||
annotations: {}
|
||||
# helm.sh/resource-policy: keep
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
# when service.type is ClusterIP ...
|
||||
# clusterIP: 192.0.2.20
|
||||
# when service.type is LoadBalancer ...
|
||||
# loadBalancerIP: 198.51.100.40
|
||||
# loadBalancerSourceRanges: 203.0.113.0/24
|
||||
# when service.type is NodePort ...
|
||||
# nodePort: 80
|
||||
portNumber: 80
|
||||
# Protocol set on the service
|
||||
appProtocol: http
|
||||
annotations: {}
|
||||
# foo.io/bar: "true"
|
||||
|
||||
## Create or use ServiceAccount
|
||||
serviceAccount:
|
||||
## Specifies whether a ServiceAccount should be created
|
||||
enabled: true
|
||||
## The name of the ServiceAccount to use.
|
||||
## If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
automountServiceAccountToken: true
|
||||
annotations: {}
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
# className: nginx
|
||||
path: /
|
||||
# Only used if API capabilities (networking.k8s.io/v1) allow it
|
||||
pathType: ImplementationSpecific
|
||||
# Used to create an Ingress record.
|
||||
hosts:
|
||||
- longhorn.michaelthomson.dev
|
||||
# Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
|
||||
# Warning! The configuration is dependant on your current k8s API version capabilities (networking.k8s.io/v1)
|
||||
# extraPaths:
|
||||
# - path: /*
|
||||
# pathType: ImplementationSpecific
|
||||
# backend:
|
||||
# service:
|
||||
# name: ssl-redirect
|
||||
# port:
|
||||
# name: use-annotation
|
||||
labels: {}
|
||||
# annotations:
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
tls:
|
||||
- secretName: letsencrypt-wildcard-cert-michaelthomson.dev
|
||||
hosts:
|
||||
- longhorn.michaelthomson.dev
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 300Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 300Mi
|
||||
|
||||
extraVolumes: []
|
||||
# - name: ca-bundle-cert
|
||||
# secret:
|
||||
# secretName: <secret-name>
|
||||
|
||||
extraVolumeMounts: []
|
||||
# - mountPath: /etc/ssl/certs/
|
||||
# name: ca-bundle-cert
|
||||
|
||||
# Additional containers to be added to the pod.
|
||||
extraContainers: []
|
||||
# - name: my-sidecar
|
||||
# image: nginx:latest
|
||||
|
||||
priorityClassName: ""
|
||||
|
||||
# Host aliases, useful when working "on premise" where (public) DNS resolver does not know about my hosts.
|
||||
hostAlias:
|
||||
enabled: false
|
||||
# ip: "10.xxx.xxx.xxx"
|
||||
# hostname: "auth.example.com"
|
||||
|
||||
# [TopologySpreadConstraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) configuration.
|
||||
# Ref: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling
|
||||
# topologySpreadConstraints: []
|
||||
|
||||
# Affinity for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
# affinity: {}
|
||||
|
||||
# Tolerations for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
tolerations: []
|
||||
|
||||
# Node labels for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
nodeSelector: {}
|
||||
|
||||
# Whether to use secrets instead of environment values for setting up OAUTH2_PROXY variables
|
||||
proxyVarsAsSecrets: true
|
||||
|
||||
# Configure Kubernetes liveness and readiness probes.
|
||||
# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
||||
# Disable both when deploying with Istio 1.0 mTLS. https://istio.io/help/faq/security/#k8s-health-checks
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 0
|
||||
timeoutSeconds: 1
|
||||
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 0
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
|
||||
# Configure Kubernetes security context for container
|
||||
# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
||||
securityContext:
|
||||
enabled: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 2000
|
||||
runAsGroup: 2000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
|
||||
deploymentAnnotations: {}
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
replicaCount: 1
|
||||
revisionHistoryLimit: 10
|
||||
|
||||
## PodDisruptionBudget settings
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
|
||||
podDisruptionBudget:
|
||||
enabled: true
|
||||
minAvailable: 1
|
||||
|
||||
# Configure Kubernetes security context for pod
|
||||
# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
||||
podSecurityContext: {}
|
||||
|
||||
# whether to use http or https
|
||||
httpScheme: http
|
||||
|
||||
initContainers:
|
||||
# if the redis sub-chart is enabled, wait for it to be ready
|
||||
# before starting the proxy
|
||||
# creates a role binding to get, list, watch, the redis master pod
|
||||
# if service account is enabled
|
||||
waitForRedis:
|
||||
enabled: true
|
||||
image:
|
||||
repository: "docker.io/bitnami/kubectl"
|
||||
pullPolicy: "IfNotPresent"
|
||||
# uses the kubernetes version of the cluster
|
||||
# the chart is deployed on, if not set
|
||||
kubectlVersion: ""
|
||||
securityContext:
|
||||
enabled: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
runAsGroup: 65534
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
timeout: 180
|
||||
|
||||
# Additionally authenticate against a htpasswd file. Entries must be created with "htpasswd -B" for bcrypt encryption.
|
||||
# Alternatively supply an existing secret which contains the required information.
|
||||
htpasswdFile:
|
||||
enabled: false
|
||||
existingSecret: ""
|
||||
entries: []
|
||||
# One row for each user
|
||||
# example:
|
||||
# entries:
|
||||
# - testuser:$2y$05$gY6dgXqjuzFhwdhsiFe7seM9q9Tile4Y3E.CBpAZJffkeiLaC21Gy
|
||||
|
||||
# Configure the session storage type, between cookie and redis
|
||||
sessionStorage:
|
||||
# Can be one of the supported session storage cookie|redis
|
||||
type: cookie
|
||||
redis:
|
||||
# Name of the Kubernetes secret containing the redis & redis sentinel password values (see also `sessionStorage.redis.passwordKey`)
|
||||
existingSecret: ""
|
||||
# Redis password value. Applicable for all Redis configurations. Taken from redis subchart secret if not set. `sessionStorage.redis.existingSecret` takes precedence
|
||||
password: ""
|
||||
# Key of the Kubernetes secret data containing the redis password value
|
||||
passwordKey: "redis-password"
|
||||
# Can be one of standalone|cluster|sentinel
|
||||
clientType: "standalone"
|
||||
standalone:
|
||||
# URL of redis standalone server for redis session storage (e.g. `redis://HOST[:PORT]`). Automatically generated if not set
|
||||
connectionUrl: ""
|
||||
cluster:
|
||||
# List of Redis cluster connection URLs (e.g. `["redis://127.0.0.1:8000", "redis://127.0.0.1:8000"]`)
|
||||
connectionUrls: []
|
||||
sentinel:
|
||||
# Name of the Kubernetes secret containing the redis sentinel password value (see also `sessionStorage.redis.sentinel.passwordKey`). Default: `sessionStorage.redis.existingSecret`
|
||||
existingSecret: ""
|
||||
# Redis sentinel password. Used only for sentinel connection; any redis node passwords need to use `sessionStorage.redis.password`
|
||||
password: ""
|
||||
# Key of the Kubernetes secret data containing the redis sentinel password value
|
||||
passwordKey: "redis-sentinel-password"
|
||||
# Redis sentinel master name
|
||||
masterName: ""
|
||||
# List of Redis sentinel connection URLs (e.g. `["redis://127.0.0.1:8000", "redis://127.0.0.1:8000"]`)
|
||||
connectionUrls: []
|
||||
|
||||
# Enables and configure the automatic deployment of the redis subchart
|
||||
redis:
|
||||
# provision an instance of the redis sub-chart
|
||||
enabled: false
|
||||
# Redis specific helm chart settings, please see:
|
||||
# https://github.com/bitnami/charts/tree/master/bitnami/redis#parameters
|
||||
# redisPort: 6379
|
||||
# cluster:
|
||||
# enabled: false
|
||||
# slaveCount: 1
|
||||
|
||||
# Enables apiVersion deprecation checks
|
||||
checkDeprecation: true
|
||||
|
||||
metrics:
|
||||
# Enable Prometheus metrics endpoint
|
||||
enabled: true
|
||||
# Serve Prometheus metrics on this port
|
||||
port: 44180
|
||||
# when service.type is NodePort ...
|
||||
# nodePort: 44180
|
||||
# Protocol set on the service for the metrics port
|
||||
service:
|
||||
appProtocol: http
|
||||
serviceMonitor:
|
||||
# Enable Prometheus Operator ServiceMonitor
|
||||
enabled: false
|
||||
# Define the namespace where to deploy the ServiceMonitor resource
|
||||
namespace: ""
|
||||
# Prometheus Instance definition
|
||||
prometheusInstance: default
|
||||
# Prometheus scrape interval
|
||||
interval: 60s
|
||||
# Prometheus scrape timeout
|
||||
scrapeTimeout: 30s
|
||||
# Add custom labels to the ServiceMonitor resource
|
||||
labels: {}
|
||||
|
||||
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
|
||||
scheme: ""
|
||||
|
||||
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
|
||||
## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig
|
||||
tlsConfig: {}
|
||||
|
||||
## bearerTokenFile: Path to bearer token file.
|
||||
bearerTokenFile: ""
|
||||
|
||||
## Used to pass annotations that are used by the Prometheus installed in your cluster to select Service Monitors to work with
|
||||
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
|
||||
annotations: {}
|
||||
|
||||
## Metric relabel configs to apply to samples before ingestion.
|
||||
## [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs)
|
||||
metricRelabelings: []
|
||||
# - action: keep
|
||||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||||
# sourceLabels: [__name__]
|
||||
|
||||
## Relabel configs to apply to samples before ingestion.
|
||||
## [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config)
|
||||
relabelings: []
|
||||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||||
# separator: ;
|
||||
# regex: ^(.*)$
|
||||
# targetLabel: nodename
|
||||
# replacement: $1
|
||||
# action: replace
|
||||
|
||||
# Extra K8s manifests to deploy
|
||||
extraObjects: []
|
||||
# - apiVersion: secrets-store.csi.x-k8s.io/v1
|
||||
# kind: SecretProviderClass
|
||||
# metadata:
|
||||
# name: oauth2-proxy-secrets-store
|
||||
# spec:
|
||||
# provider: aws
|
||||
# parameters:
|
||||
# objects: |
|
||||
# - objectName: "oauth2-proxy"
|
||||
# objectType: "secretsmanager"
|
||||
# jmesPath:
|
||||
# - path: "client_id"
|
||||
# objectAlias: "client-id"
|
||||
# - path: "client_secret"
|
||||
# objectAlias: "client-secret"
|
||||
# - path: "cookie_secret"
|
||||
# objectAlias: "cookie-secret"
|
||||
# secretObjects:
|
||||
# - data:
|
||||
# - key: client-id
|
||||
# objectName: client-id
|
||||
# - key: client-secret
|
||||
# objectName: client-secret
|
||||
# - key: cookie-secret
|
||||
# objectName: cookie-secret
|
||||
# secretName: oauth2-proxy-secrets-store
|
||||
# type: Opaque
|
||||
Reference in New Issue
Block a user