charts/ingress-nginx/values.homeK8s.yaml

432 lines
12 KiB
YAML

controller:
image:
## Keep false as default for now!
chroot: false
registry: registry.k8s.io
image: ingress-nginx/controller
## for backwards compatibility consider setting the full image url via the repository value below
## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
## repository:
tag: "v1.9.6"
digest: sha256:1405cc613bd95b2c6edd8b2a152510ae91c7e62aea4698500d23b2145960ab9c
digestChroot: sha256:7eb46ff733429e0e46892903c7394aff149ac6d284d92b3946f3baf7ff26a096
pullPolicy: IfNotPresent
runAsNonRoot: true
# www-data -> uid 101
runAsUser: 101
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
readOnlyRootFilesystem: false
containerName: controller
containerPort:
http: 80
https: 443
ingressClassResource:
name: nginx
enabled: true
default: true
controllerValue: "k8s.io/ingress-nginx"
parameters: {}
ingressClass: nginx
publishService:
enabled: true
pathOverride: ""
configMapNamespace: ""
maxmindLicenseKey: "bwT9QfNFJHZxz5pi"
kind: Deployment
annotations: {}
minReadySeconds: 0
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
# -- Affinity and anti-affinity rules for server scheduling to nodes
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
# # An example of preferred pod anti-affinity, weight is in the range 1-100
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 100
# podAffinityTerm:
# labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - ingress-nginx
# - key: app.kubernetes.io/instance
# operator: In
# values:
# - ingress-nginx
# - key: app.kubernetes.io/component
# operator: In
# values:
# - controller
# topologyKey: kubernetes.io/hostname
# # An example of required pod anti-affinity
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - ingress-nginx
# - key: app.kubernetes.io/instance
# operator: In
# values:
# - ingress-nginx
# - key: app.kubernetes.io/component
# operator: In
# values:
# - controller
# topologyKey: "kubernetes.io/hostname"
# -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
##
topologySpreadConstraints: []
# - labelSelector:
# matchLabels:
# app.kubernetes.io/name: '{{ include "ingress-nginx.name" . }}'
# app.kubernetes.io/instance: '{{ .Release.Name }}'
# app.kubernetes.io/component: controller
# topologyKey: topology.kubernetes.io/zone
# maxSkew: 1
# whenUnsatisfiable: ScheduleAnyway
# - labelSelector:
# matchLabels:
# app.kubernetes.io/name: '{{ include "ingress-nginx.name" . }}'
# app.kubernetes.io/instance: '{{ .Release.Name }}'
# app.kubernetes.io/component: controller
# topologyKey: kubernetes.io/hostname
# maxSkew: 1
# whenUnsatisfiable: ScheduleAnyway
nodeSelector:
kubernetes.io/os: linux
location: livingRoom
livenessProbe:
httpGet:
# should match container.healthCheckPath
path: "/healthz"
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
# should match container.healthCheckPath
path: "/healthz"
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
healthCheckPath: "/healthz"
replicaCount: 1
# -- Minimum available pods set in PodDisruptionBudget.
# Define either 'minAvailable' or 'maxUnavailable', never both.
minAvailable: 1
# -- Maximum unavailable pods set in PodDisruptionBudget. If set, 'minAvailable' is ignored.
# maxUnavailable: 1
## Define requests resources to avoid probe issues due to CPU utilization in busy nodes
## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903
## Ideally, there should be no limits.
## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/
resources:
## limits:
## cpu: 100m
## memory: 90Mi
requests:
cpu: 100m
memory: 280Mi
# Mutually exclusive with keda autoscaling
autoscaling:
enabled: true
annotations: {}
minReplicas: 2
maxReplicas: 4
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80
behavior: {}
# scaleDown:
# stabilizationWindowSeconds: 300
# policies:
# - type: Pods
# value: 1
# periodSeconds: 180
# scaleUp:
# stabilizationWindowSeconds: 300
# policies:
# - type: Pods
# value: 2
# periodSeconds: 60
autoscalingTemplate: []
# Custom or additional autoscaling metrics
# ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics
# - type: Pods
# pods:
# metric:
# name: nginx_ingress_controller_nginx_process_requests_total
# target:
# type: AverageValue
# averageValue: 10000m
enableMimalloc: true
## Override NGINX template
service:
enabled: true
external:
enabled: true
annotations: {}
type: LoadBalancer
enableHttp: true
enableHttps: true
ports:
http: 80
https: 443
targetPorts:
http: http
https: https
appProtocol: true
opentelemetry:
enabled: true
name: opentelemetry
image:
registry: registry.k8s.io
image: ingress-nginx/opentelemetry
tag: "v20230721-3e2062ee5"
digest: sha256:13bee3f5223883d3ca62fee7309ad02d22ec00ff0d7033e3e9aca7a9f60fd472
distroless: true
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65532
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
resources: {}
admissionWebhooks:
name: admission
annotations: {}
enabled: true
failurePolicy: Fail
# timeoutSeconds: 10
port: 8443
certificate: "/usr/local/certificates/cert"
key: "/usr/local/certificates/key"
namespaceSelector: {}
objectSelector: {}
labels: {}
existingPsp: ""
service:
annotations: {}
externalIPs: []
loadBalancerSourceRanges: []
servicePort: 443
type: ClusterIP
createSecretJob:
name: create
securityContext:
runAsNonRoot: true
runAsUser: 65532
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
resources: {}
patchWebhookJob:
name: patch
securityContext:
runAsNonRoot: true
runAsUser: 65532
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
resources: {}
patch:
enabled: true
image:
registry: registry.k8s.io
image: ingress-nginx/kube-webhook-certgen
tag: v20231226-1a7112e06
digest: sha256:25d6a5f11211cc5c3f9f2bf552b585374af287b4debf693cacbe2da47daa5084
pullPolicy: IfNotPresent
priorityClassName: ""
podAnnotations: {}
networkPolicy:
enabled: false
nodeSelector:
kubernetes.io/os: linux
tolerations: []
labels: {}
securityContext: {}
metrics:
port: 10254
portName: metrics
# if this port is changed, change healthz-port: in extraArgs: accordingly
enabled: true
service:
annotations: {}
labels: {}
externalIPs: []
loadBalancerSourceRanges: []
servicePort: 10254
type: ClusterIP
serviceMonitor:
enabled: true
additionalLabels: {}
annotations: {}
namespace: ""
namespaceSelector: {}
scrapeInterval: 30s
targetLabels: []
relabelings: []
metricRelabelings: []
prometheusRule:
enabled: true
additionalLabels: {}
# namespace: ""
rules:
# These are just examples rules, please adapt them to your needs
- alert: NGINXConfigFailed
expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0
for: 1s
labels:
severity: critical
annotations:
description: bad ingress config - nginx config test failed
summary: uninstall the latest ingress changes to allow config reloads to resume
- alert: NGINXCertificateExpiry
expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds{host!="_"}) by (host) - time()) < 604800
for: 1s
labels:
severity: critical
annotations:
description: ssl certificate(s) will expire in less then a week
summary: renew expiring certificates to avoid downtime
- alert: NGINXTooMany500s
expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
for: 1m
labels:
severity: warning
annotations:
description: Too many 5XXs
summary: More than 5% of all requests returned 5XX, this requires your attention
- alert: NGINXTooMany400s
expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
for: 1m
labels:
severity: warning
annotations:
description: Too many 4XXs
summary: More than 5% of all requests returned 4XX, this requires your attention
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
priorityClassName: ""
revisionHistoryLimit: 10
defaultBackend:
enabled: true
name: defaultbackend
image:
registry: registry.k8s.io
image: defaultbackend-amd64
tag: "1.5"
pullPolicy: IfNotPresent
runAsNonRoot: true
runAsUser: 65534
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
readOnlyRootFilesystem: true
existingPsp: ""
extraArgs: {}
serviceAccount:
create: true
name: ""
automountServiceAccountToken: true
extraEnvs: []
port: 8080
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 0
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
updateStrategy: {}
minReadySeconds: 0
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
affinity: {}
podSecurityContext: {}
containerSecurityContext: {}
podLabels: {}
nodeSelector:
kubernetes.io/os: linux
location: livingRoom
replicaCount: 1
minAvailable: 1
service:
servicePort: 80
type: ClusterIP
rbac:
create: true
scope: false
podSecurityPolicy:
enabled: false
serviceAccount:
create: true
name: ""
automountServiceAccountToken: true
annotations: {}
imagePullSecrets: []
# -- TCP service key-value pairs
tcp:
'22': "git/gitea:22"
'3306': "db/mariadb:3306"
'9084': "ingress/vmware-updates-repo:9084"
'56451': "media/torrent-transmission:56451"
udp:
'56451': "media/torrent-transmission-udp:56451"