charts/oauth2-proxy/values-goldilocks.yaml
2023-09-05 22:49:22 +08:00

411 lines
13 KiB
YAML

## Override the deployment namespace
##
namespaceOverride: ""
# Force the target Kubernetes version (it uses Helm `.Capabilities` if not set).
# This is especially useful for `helm template` as capabilities are always empty
# due to the fact that it doesn't query an actual cluster
kubeVersion:
# Oauth client configuration specifics
config:
# Add config annotations
annotations: {}
# OAuth client ID
clientID: "600570707325-9uu9q3ibu83vioffub5is8ggv4ukh7b0.apps.googleusercontent.com"
# OAuth client secret
clientSecret: "E_fNyIORAblD6sZQFinlbHid"
# Create a new secret with the following command
# openssl rand -base64 32 | head -c 32 | base64
# Use an existing secret for OAuth2 credentials (see secret.yaml for required fields)
# Example:
# existingSecret: secret
cookieSecret: "d0c0WXdLMFdzM1R1QXFsYkFJMWlISHFTYVdDUlhGTVo="
# The name of the cookie that oauth2-proxy will create
# If left empty, it will default to the release name
cookieName: "goldilocks"
google: {}
# adminEmail: xxxx
# useApplicationDefaultCredentials: true
# targetPrincipal: xxxx
# serviceAccountJson: xxxx
# Alternatively, use an existing secret (see google-secret.yaml for required fields)
# Example:
# existingSecret: google-secret
# groups: []
# Example:
# - group1@example.com
# - group2@example.com
# Default configuration, to be overridden
configFile: |-
email_domains = [ "ervine.org", "ervine.dev", "ervine.cloud" ]
upstreams = [ "file:///dev/null" ]
google_admin_email = "sa-admin@ervine.org"
google_group = "ervine-nzbget@ervine.org"
google_service_account_json = "/etc/google/sa.json"
# Custom configuration file: oauth2_proxy.cfg
# configFile: |-
# pass_basic_auth = false
# pass_access_token = true
# Use an existing config map (see configmap.yaml for required fields)
# Example:
# existingConfig: config
alphaConfig:
enabled: false
# Add config annotations
annotations: {}
# Arbitrary configuration data to append to the server section
serverConfigData: {}
# Arbitrary configuration data to append to the metrics section
metricsConfigData: {}
# Arbitrary configuration data to append
configData: {}
# Arbitrary configuration to append
# This is treated as a Go template and rendered with the root context
configFile: ""
# Use an existing config map (see secret-alpha.yaml for required fields)
existingConfig: ~
# Use an existing secret
existingSecret: ~
image:
repository: "quay.io/oauth2-proxy/oauth2-proxy"
# appVersion is used by default
tag: ""
pullPolicy: "IfNotPresent"
# Optionally specify an array of imagePullSecrets.
# Secrets must be manually created in the namespace.
# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
# imagePullSecrets:
# - name: myRegistryKeySecretName
# Set a custom containerPort if required.
# This will default to 4180 if this value is not set and the httpScheme set to http
# This will default to 4443 if this value is not set and the httpScheme set to https
# containerPort: 4180
extraArgs: {}
extraEnv: []
# -- Custom labels to add into metadata
customLabels: {}
# To authorize individual email addresses
# That is part of extraArgs but since this needs special treatment we need to do a separate section
authenticatedEmailsFile:
enabled: false
# Defines how the email addresses file will be projected, via a configmap or secret
persistence: configmap
# template is the name of the configmap what contains the email user list but has been configured without this chart.
# It's a simpler way to maintain only one configmap (user list) instead changing it for each oauth2-proxy service.
# Be aware the value name in the extern config map in data needs to be named to "restricted_user_access" or to the
# provided value in restrictedUserAccessKey field.
template: ""
# The configmap/secret key under which the list of email access is stored
# Defaults to "restricted_user_access" if not filled-in, but can be overridden to allow flexibility
restrictedUserAccessKey: ""
# One email per line
# example:
# restricted_access: |-
# name1@domain
# name2@domain
# If you override the config with restricted_access it will configure a user list within this chart what takes care of the
# config map resource.
restricted_access: ""
annotations: {}
# helm.sh/resource-policy: keep
service:
type: ClusterIP
# when service.type is ClusterIP ...
# clusterIP: 192.0.2.20
# when service.type is LoadBalancer ...
# loadBalancerIP: 198.51.100.40
# loadBalancerSourceRanges: 203.0.113.0/24
# when service.type is NodePort ...
# nodePort: 80
portNumber: 80
# Protocol set on the service
appProtocol: http
annotations: {}
# foo.io/bar: "true"
## Create or use ServiceAccount
serviceAccount:
## Specifies whether a ServiceAccount should be created
enabled: true
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the fullname template
name:
automountServiceAccountToken: true
annotations: {}
ingress:
enabled: true
# className: nginx
path: /oauth2
# Only used if API capabilities (networking.k8s.io/v1) allow it
pathType: ImplementationSpecific
# Used to create an Ingress record.
hosts:
- goldilocks.ervine.cloud
# Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
# Warning! The configuration is dependant on your current k8s API version capabilities (networking.k8s.io/v1)
# extraPaths:
# - path: /*
# pathType: ImplementationSpecific
# backend:
# service:
# name: ssl-redirect
# port:
# name: use-annotation
labels: {}
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls:
# Secrets must be manually created in the namespace.
- secretName: goldilocks-ervine-cloud-tls
hosts:
- goldilocks.ervine.cloud
resources:
limits:
# cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
extraVolumes:
- name: google-sa
secret:
secretName: oauth2-proxy-sa-json
# - name: ca-bundle-cert
# secret:
# secretName: <secret-name>
extraVolumeMounts:
- mountPath: /etc/google/sa.json
name: google-sa
subPath: sa.json
# - mountPath: /etc/ssl/certs/
# name: ca-bundle-cert
# Additional containers to be added to the pod.
extraContainers: []
# - name: my-sidecar
# image: nginx:latest
priorityClassName: ""
# Host aliases, useful when working "on premise" where (public) DNS resolver does not know about my hosts.
hostAlias:
enabled: false
# ip: "10.xxx.xxx.xxx"
# hostname: "auth.example.com"
# [TopologySpreadConstraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) configuration.
# Ref: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling
# topologySpreadConstraints: []
# Affinity for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
# affinity: {}
# Tolerations for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
# Whether to use secrets instead of environment values for setting up OAUTH2_PROXY variables
proxyVarsAsSecrets: true
# Configure Kubernetes liveness and readiness probes.
# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
# Disable both when deploying with Istio 1.0 mTLS. https://istio.io/help/faq/security/#k8s-health-checks
livenessProbe:
enabled: true
initialDelaySeconds: 0
timeoutSeconds: 1
readinessProbe:
enabled: true
initialDelaySeconds: 0
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
# Configure Kubernetes security context for container
# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
enabled: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 2000
runAsGroup: 2000
seccompProfile:
type: RuntimeDefault
deploymentAnnotations: {}
podAnnotations: {}
podLabels: {}
replicaCount: 1
revisionHistoryLimit: 10
## PodDisruptionBudget settings
## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
podDisruptionBudget:
enabled: true
minAvailable: 1
# Configure Kubernetes security context for pod
# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
podSecurityContext: {}
# whether to use http or https
httpScheme: http
initContainers:
# if the redis sub-chart is enabled, wait for it to be ready
# before starting the proxy
# creates a role binding to get, list, watch, the redis master pod
# if service account is enabled
waitForRedis:
enabled: true
# uses the kubernetes version of the cluster
# the chart is deployed on, if not set
kubectlVersion: ""
securityContext:
enabled: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65534
runAsGroup: 65534
seccompProfile:
type: RuntimeDefault
timeout: 180
# Additionally authenticate against a htpasswd file. Entries must be created with "htpasswd -B" for bcrypt encryption.
# Alternatively supply an existing secret which contains the required information.
htpasswdFile:
enabled: false
existingSecret: ""
entries: []
# One row for each user
# example:
# entries:
# - testuser:$2y$05$gY6dgXqjuzFhwdhsiFe7seM9q9Tile4Y3E.CBpAZJffkeiLaC21Gy
# Configure the session storage type, between cookie and redis
sessionStorage:
# Can be one of the supported session storage cookie|redis
type: cookie
redis:
# Name of the Kubernetes secret containing the redis & redis sentinel password values (see also `sessionStorage.redis.passwordKey`)
existingSecret: ""
# Redis password value. Applicable for all Redis configurations. Taken from redis subchart secret if not set. `sessionStorage.redis.existingSecret` takes precedence
password: ""
# Key of the Kubernetes secret data containing the redis password value
passwordKey: "redis-password"
# Can be one of standalone|cluster|sentinel
clientType: "standalone"
standalone:
# URL of redis standalone server for redis session storage (e.g. `redis://HOST[:PORT]`). Automatically generated if not set
connectionUrl: ""
cluster:
# List of Redis cluster connection URLs (e.g. `["redis://127.0.0.1:8000", "redis://127.0.0.1:8000"]`)
connectionUrls: []
sentinel:
# Name of the Kubernetes secret containing the redis sentinel password value (see also `sessionStorage.redis.sentinel.passwordKey`). Default: `sessionStorage.redis.existingSecret`
existingSecret: ""
# Redis sentinel password. Used only for sentinel connection; any redis node passwords need to use `sessionStorage.redis.password`
password: ""
# Key of the Kubernetes secret data containing the redis sentinel password value
passwordKey: "redis-sentinel-password"
# Redis sentinel master name
masterName: ""
# List of Redis sentinel connection URLs (e.g. `["redis://127.0.0.1:8000", "redis://127.0.0.1:8000"]`)
connectionUrls: []
# Enables and configure the automatic deployment of the redis subchart
redis:
# provision an instance of the redis sub-chart
enabled: false
# Redis specific helm chart settings, please see:
# https://github.com/bitnami/charts/tree/master/bitnami/redis#parameters
# redisPort: 6379
# cluster:
# enabled: false
# slaveCount: 1
# Enables apiVersion deprecation checks
checkDeprecation: true
metrics:
# Enable Prometheus metrics endpoint
enabled: true
# Serve Prometheus metrics on this port
port: 44180
# when service.type is NodePort ...
# nodePort: 44180
# Protocol set on the service for the metrics port
service:
appProtocol: http
servicemonitor:
# Enable Prometheus Operator ServiceMonitor
enabled: true
# Define the namespace where to deploy the ServiceMonitor resource
namespace: "monitoring"
# Prometheus Instance definition
prometheusInstance: k8s
# Prometheus scrape interval
interval: 60s
# Prometheus scrape timeout
scrapeTimeout: 30s
# Add custom labels to the ServiceMonitor resource
labels: {}
# Extra K8s manifests to deploy
extraObjects: []
# - apiVersion: secrets-store.csi.x-k8s.io/v1
# kind: SecretProviderClass
# metadata:
# name: oauth2-proxy-secrets-store
# spec:
# provider: aws
# parameters:
# objects: |
# - objectName: "oauth2-proxy"
# objectType: "secretsmanager"
# jmesPath:
# - path: "client_id"
# objectAlias: "client-id"
# - path: "client_secret"
# objectAlias: "client-secret"
# - path: "cookie_secret"
# objectAlias: "cookie-secret"
# secretObjects:
# - data:
# - key: client-id
# objectName: client-id
# - key: client-secret
# objectName: client-secret
# - key: cookie-secret
# objectName: cookie-secret
# secretName: oauth2-proxy-secrets-store
# type: Opaque