Add teleport
This commit is contained in:
parent
ec83286fc7
commit
9467c59c8d
24
teleport-kube-agent/.lint/affinity.yaml
Normal file
24
teleport-kube-agent/.lint/affinity.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: gravitational.io/dedicated
|
||||
operator: In
|
||||
values:
|
||||
- teleport
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- teleport
|
||||
topologyKey: kubernetes.io/hostname
|
||||
weight: 1
|
||||
27
teleport-kube-agent/.lint/all-v6.yaml
Normal file
27
teleport-kube-agent/.lint/all-v6.yaml
Normal file
@ -0,0 +1,27 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube,app,db
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
labels:
|
||||
cluster: testing
|
||||
apps:
|
||||
- name: grafana
|
||||
uri: http://localhost:3000
|
||||
labels:
|
||||
environment: test
|
||||
databases:
|
||||
- name: aurora
|
||||
uri: "postgres-aurora-instance-1.xxx.us-east-1.rds.amazonaws.com:5432"
|
||||
protocol: "postgres"
|
||||
labels:
|
||||
database: staging
|
||||
annotations:
|
||||
config:
|
||||
kubernetes.io/config: "test-annotation"
|
||||
kubernetes.io/config-different: 2
|
||||
deployment:
|
||||
kubernetes.io/deployment: "test-annotation"
|
||||
kubernetes.io/deployment-different: 3
|
||||
pod:
|
||||
kubernetes.io/pod: "test-annotation"
|
||||
kubernetes.io/pod-different: 4
|
||||
20
teleport-kube-agent/.lint/annotations.yaml
Normal file
20
teleport-kube-agent/.lint/annotations.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster
|
||||
annotations:
|
||||
config:
|
||||
kubernetes.io/config: "test-annotation"
|
||||
kubernetes.io/config-different: 2
|
||||
deployment:
|
||||
kubernetes.io/deployment: "test-annotation"
|
||||
kubernetes.io/deployment-different: 3
|
||||
pod:
|
||||
kubernetes.io/pod: "test-annotation"
|
||||
kubernetes.io/pod-different: 4
|
||||
secret:
|
||||
kubernetes.io/secret: "test-annotation"
|
||||
kubernetes.io/secret-different: 6
|
||||
serviceAccount:
|
||||
kubernetes.io/serviceaccount: "test-annotation"
|
||||
kubernetes.io/serviceaccount-different: 5
|
||||
15
teleport-kube-agent/.lint/aws-databases.yaml
Normal file
15
teleport-kube-agent/.lint/aws-databases.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: db
|
||||
awsDatabases:
|
||||
- types: ["rds"]
|
||||
regions: ["us-east-1"]
|
||||
tags:
|
||||
"*": "*"
|
||||
- types: ["rds"]
|
||||
regions: ["us-west-2"]
|
||||
tags:
|
||||
"env": "development"
|
||||
annotations:
|
||||
serviceAccount:
|
||||
eks.amazonaws.com/role-arn: arn:aws:iam::1234567890:role/my-rds-autodiscovery-role
|
||||
26
teleport-kube-agent/.lint/azure-databases.yaml
Normal file
26
teleport-kube-agent/.lint/azure-databases.yaml
Normal file
@ -0,0 +1,26 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: db
|
||||
azureDatabases:
|
||||
- types: ["mysql", "postgres"]
|
||||
tags:
|
||||
"*": "*"
|
||||
- types: ["mysql"]
|
||||
tags:
|
||||
"env": ["dev", "staging"]
|
||||
"origin": "alice"
|
||||
regions: ["eastus", "centralus"]
|
||||
subscriptions: ["subID1", "subID2"]
|
||||
resource_groups: ["group1", "group2"]
|
||||
# environment variables can be used to authenticate as the Azure service principal
|
||||
extraEnv:
|
||||
- name: AZURE_CLIENT_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: teleport-azure-client-secret
|
||||
key: client_secret
|
||||
optional: false
|
||||
- name: AZURE_TENANT_ID
|
||||
value: "11111111-2222-3333-4444-555555555555"
|
||||
- name: AZURE_CLIENT_ID
|
||||
value: "11111111-2222-3333-4444-555555555555"
|
||||
3
teleport-kube-agent/.lint/backwards-compatibility.yaml
Normal file
3
teleport-kube-agent/.lint/backwards-compatibility.yaml
Normal file
@ -0,0 +1,3 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
5
teleport-kube-agent/.lint/ca-pin.yaml
Normal file
5
teleport-kube-agent/.lint/ca-pin.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster
|
||||
caPin: ["sha256:7e12c17c20d9cb504bbcb3f0236be3f446861f1396dcbb44425fe28ec1c108f1"]
|
||||
7
teleport-kube-agent/.lint/clusterrole.yaml
Normal file
7
teleport-kube-agent/.lint/clusterrole.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster
|
||||
clusterRoleName: teleport-kube-agent-test
|
||||
clusterRoleBindingName: teleport-kube-agent-test
|
||||
serviceAccountName: teleport-kube-agent-test
|
||||
9
teleport-kube-agent/.lint/db.yaml
Normal file
9
teleport-kube-agent/.lint/db.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: db
|
||||
databases:
|
||||
- name: aurora
|
||||
uri: "postgres-aurora-instance-1.xxx.us-east-1.rds.amazonaws.com:5432"
|
||||
protocol: "postgres"
|
||||
labels:
|
||||
database: staging
|
||||
15
teleport-kube-agent/.lint/dnsconfig.yaml
Normal file
15
teleport-kube-agent/.lint/dnsconfig.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
dnsConfig:
|
||||
nameservers:
|
||||
- 1.2.3.4
|
||||
searches:
|
||||
- ns1.svc.cluster-domain.example
|
||||
- my.dns.search.suffix
|
||||
options:
|
||||
- name: ndots
|
||||
value: "2"
|
||||
- name: edns0
|
||||
6
teleport-kube-agent/.lint/dynamic-app.yaml
Normal file
6
teleport-kube-agent/.lint/dynamic-app.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: app
|
||||
appResources:
|
||||
- labels:
|
||||
"*": "*"
|
||||
6
teleport-kube-agent/.lint/dynamic-db.yaml
Normal file
6
teleport-kube-agent/.lint/dynamic-db.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: db
|
||||
databaseResources:
|
||||
- labels:
|
||||
"*": "*"
|
||||
5
teleport-kube-agent/.lint/existing-data-volume.yaml
Normal file
5
teleport-kube-agent/.lint/existing-data-volume.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster
|
||||
existingDataVolume: teleport-kube-agent-data
|
||||
@ -0,0 +1,6 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster
|
||||
tls:
|
||||
existingCASecretName: "helm-lint-existing-tls-secret-ca"
|
||||
5
teleport-kube-agent/.lint/extra-args.yaml
Normal file
5
teleport-kube-agent/.lint/extra-args.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster
|
||||
extraArgs: ['--debug']
|
||||
7
teleport-kube-agent/.lint/extra-env.yaml
Normal file
7
teleport-kube-agent/.lint/extra-env.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster
|
||||
extraEnv:
|
||||
- name: HTTPS_PROXY
|
||||
value: "http://username:password@my.proxy.host:3128"
|
||||
37
teleport-kube-agent/.lint/extra-labels.yaml
Normal file
37
teleport-kube-agent/.lint/extra-labels.yaml
Normal file
@ -0,0 +1,37 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster
|
||||
extraLabels:
|
||||
role:
|
||||
app.kubernetes.io/name: "teleport-kube-agent"
|
||||
resource: "role"
|
||||
roleBinding:
|
||||
app.kubernetes.io/name: "teleport-kube-agent"
|
||||
resource: "rolebinding"
|
||||
clusterRole:
|
||||
app.kubernetes.io/name: "teleport-kube-agent"
|
||||
resource: "clusterrole"
|
||||
clusterRoleBinding:
|
||||
app.kubernetes.io/name: "teleport-kube-agent"
|
||||
resource: "clusterrolebinding"
|
||||
config:
|
||||
app.kubernetes.io/name: "teleport-kube-agent"
|
||||
resource: "config"
|
||||
deployment:
|
||||
app.kubernetes.io/name: "teleport-kube-agent"
|
||||
resource: "deployment"
|
||||
pod:
|
||||
app.kubernetes.io/name: "teleport-kube-agent"
|
||||
resource: "pod"
|
||||
podDisruptionBudget:
|
||||
app.kubernetes.io/name: "teleport-kube-agent"
|
||||
resource: "poddisruptionbudget"
|
||||
podSecurityPolicy:
|
||||
app.kubernetes.io/name: "teleport-kube-agent"
|
||||
resource: "podsecuritypolicy"
|
||||
secret:
|
||||
app.kubernetes.io/name: "teleport-kube-agent"
|
||||
resource: "secret"
|
||||
serviceAccount:
|
||||
app.kubernetes.io/name: "teleport-kube-agent"
|
||||
resource: "serviceaccount"
|
||||
11
teleport-kube-agent/.lint/host-aliases.yaml
Normal file
11
teleport-kube-agent/.lint/host-aliases.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
hostAliases:
|
||||
- ip: "127.0.0.1"
|
||||
hostnames:
|
||||
- "foo.local"
|
||||
- "bar.local"
|
||||
- ip: "10.1.2.3"
|
||||
hostnames:
|
||||
- "foo.remote"
|
||||
- "bar.remote"
|
||||
@ -0,0 +1,7 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
storage:
|
||||
enabled: true
|
||||
storageClassName: "aws-gp2"
|
||||
imagePullPolicy: Always
|
||||
5
teleport-kube-agent/.lint/image-pull-policy.yaml
Normal file
5
teleport-kube-agent/.lint/image-pull-policy.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster
|
||||
imagePullPolicy: Always
|
||||
7
teleport-kube-agent/.lint/imagepullsecrets.yaml
Normal file
7
teleport-kube-agent/.lint/imagepullsecrets.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster
|
||||
image: public.ecr.aws/gravitational/teleport
|
||||
imagePullSecrets:
|
||||
- name: myRegistryKeySecretName
|
||||
17
teleport-kube-agent/.lint/initcontainers.yaml
Normal file
17
teleport-kube-agent/.lint/initcontainers.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster
|
||||
initContainers:
|
||||
- name: "teleport-init"
|
||||
image: "alpine"
|
||||
args: ["echo test"]
|
||||
# These are just sample values to test the chart.
|
||||
# They are not intended to be guidelines or suggestions for running teleport.
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 1
|
||||
memory: 2Gi
|
||||
5
teleport-kube-agent/.lint/join-params-iam.yaml
Normal file
5
teleport-kube-agent/.lint/join-params-iam.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
joinParams:
|
||||
tokenName: iam-token
|
||||
method: iam
|
||||
5
teleport-kube-agent/.lint/join-params-token.yaml
Normal file
5
teleport-kube-agent/.lint/join-params-token.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
joinParams:
|
||||
tokenName: xxxxxxx-secret-token-xxxxxxx
|
||||
method: token
|
||||
6
teleport-kube-agent/.lint/log-basic.yaml
Normal file
6
teleport-kube-agent/.lint/log-basic.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
log:
|
||||
format: json
|
||||
level: INFO
|
||||
8
teleport-kube-agent/.lint/log-extra.yaml
Normal file
8
teleport-kube-agent/.lint/log-extra.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
log:
|
||||
format: json
|
||||
level: DEBUG
|
||||
output: /var/lib/teleport/test.log
|
||||
extraFields: ["level", "timestamp", "component", "caller"]
|
||||
4
teleport-kube-agent/.lint/log-legacy.yaml
Normal file
4
teleport-kube-agent/.lint/log-legacy.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
logLevel: DEBUG
|
||||
5
teleport-kube-agent/.lint/node-selector.yaml
Normal file
5
teleport-kube-agent/.lint/node-selector.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
nodeSelector:
|
||||
gravitational.io/k8s-role: node
|
||||
7
teleport-kube-agent/.lint/pdb.yaml
Normal file
7
teleport-kube-agent/.lint/pdb.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
highAvailability:
|
||||
replicaCount: 3
|
||||
podDisruptionBudget:
|
||||
enabled: true
|
||||
minAvailable: 2
|
||||
7
teleport-kube-agent/.lint/podmonitor.yaml
Normal file
7
teleport-kube-agent/.lint/podmonitor.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
podMonitor:
|
||||
enabled: true
|
||||
additionalLabels:
|
||||
prometheus: default
|
||||
interval: 30s
|
||||
4
teleport-kube-agent/.lint/priority-class-name.yaml
Normal file
4
teleport-kube-agent/.lint/priority-class-name.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster
|
||||
priorityClassName: teleport-kube-agent
|
||||
7
teleport-kube-agent/.lint/probe-timeout-seconds.yaml
Normal file
7
teleport-kube-agent/.lint/probe-timeout-seconds.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
# These are just sample values to test the chart.
|
||||
# They are not intended to be guidelines or suggestions for running teleport.
|
||||
probeTimeoutSeconds: 5
|
||||
13
teleport-kube-agent/.lint/resources.yaml
Normal file
13
teleport-kube-agent/.lint/resources.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster
|
||||
# These are just sample values to test the chart.
|
||||
# They are not intended to be guidelines or suggestions for running teleport.
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 1
|
||||
memory: 2Gi
|
||||
6
teleport-kube-agent/.lint/security-context-empty.yaml
Normal file
6
teleport-kube-agent/.lint/security-context-empty.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: helm-lint
|
||||
securityContext: null
|
||||
initSecurityContext: null
|
||||
5
teleport-kube-agent/.lint/service-account-name.yaml
Normal file
5
teleport-kube-agent/.lint/service-account-name.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster
|
||||
serviceAccountName: teleport-kube-agent-sa
|
||||
6
teleport-kube-agent/.lint/stateful.yaml
Normal file
6
teleport-kube-agent/.lint/stateful.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
storage:
|
||||
enabled: true
|
||||
storageClassName: "aws-gp2"
|
||||
13
teleport-kube-agent/.lint/tolerations.yaml
Normal file
13
teleport-kube-agent/.lint/tolerations.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster
|
||||
tolerations:
|
||||
- key: "dedicated"
|
||||
operator: "Equal"
|
||||
value: "teleport"
|
||||
effect: "NoExecute"
|
||||
- key: "dedicated"
|
||||
operator: "Equal"
|
||||
value: "teleport"
|
||||
effect: "NoSchedule"
|
||||
6
teleport-kube-agent/.lint/updater.yaml
Normal file
6
teleport-kube-agent/.lint/updater.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: "custom"
|
||||
updater:
|
||||
enabled: true
|
||||
versionServer: https://my-custom-version-server/v1
|
||||
releaseChannel: custom/preview
|
||||
5
teleport-kube-agent/.lint/v10.yaml
Normal file
5
teleport-kube-agent/.lint/v10.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
teleportVersionOverride: "10.3.5-dev"
|
||||
5
teleport-kube-agent/.lint/v11.yaml
Normal file
5
teleport-kube-agent/.lint/v11.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
teleportVersionOverride: "11.0.1-dev"
|
||||
11
teleport-kube-agent/.lint/volumes.yaml
Normal file
11
teleport-kube-agent/.lint/volumes.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: kube
|
||||
kubeClusterName: test-kube-cluster
|
||||
extraVolumeMounts:
|
||||
- name: "my-mount"
|
||||
mountPath: "/path/to/mount"
|
||||
extraVolumes:
|
||||
- name: "my-mount"
|
||||
secret:
|
||||
secretName: "mySecret"
|
||||
9
teleport-kube-agent/Chart.yaml
Normal file
9
teleport-kube-agent/Chart.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
apiVersion: v2
|
||||
appVersion: 13.3.8
|
||||
description: Teleport provides a secure SSH, Kubernetes, database and application
|
||||
remote access solution that doesn't get in the way.
|
||||
icon: https://goteleport.com/images/logos/logo-teleport-square.svg
|
||||
keywords:
|
||||
- Teleport
|
||||
name: teleport-kube-agent
|
||||
version: 13.3.8
|
||||
245
teleport-kube-agent/README.md
Normal file
245
teleport-kube-agent/README.md
Normal file
@ -0,0 +1,245 @@
|
||||
# Teleport Agent chart
|
||||
|
||||
This chart is a Teleport agent used to register any or all of the following services
|
||||
with an existing Teleport cluster:
|
||||
- Teleport Kubernetes access
|
||||
- Teleport Application access
|
||||
- Teleport Database access
|
||||
|
||||
To use it, you will need:
|
||||
- an existing Teleport cluster (at least proxy and auth services)
|
||||
- a reachable proxy endpoint (`$PROXY_ENDPOINT` e.g. `teleport.example.com:3080` or `teleport.example.com:443`)
|
||||
- a reachable reverse tunnel port on the proxy (e.g. `teleport.example.com:3024`). The address is automatically
|
||||
retrieved from the Teleport proxy configuration.
|
||||
- either a static or dynamic join token for the Teleport Cluster
|
||||
- a [static join token](https://goteleport.com/docs/setup/admin/adding-nodes/#adding-nodes-to-the-cluster)
|
||||
for this Teleport cluster (`$JOIN_TOKEN`) is used by default.
|
||||
- optionally a [dynamic join token](https://goteleport.com/docs/setup/admin/adding-nodes/#short-lived-dynamic-tokens) can
|
||||
be used on Kubernetes clusters that support persistent volumes. Set `storage.enabled=true` and
|
||||
`storage.storageClassName=<storage class configured in kubernetes>` in the helm configuration to use persistent
|
||||
volumes.
|
||||
|
||||
|
||||
## Combining roles
|
||||
|
||||
You can combine multiple roles as a comma-separated list: `--set roles=kube\,db\,app`
|
||||
|
||||
Note that commas must be escaped if the values are provided on the command line. This is due to the way that
|
||||
Helm parses arguments.
|
||||
|
||||
You must also provide the settings for each individual role which is enabled as detailed below.
|
||||
|
||||
## Backwards compatibility
|
||||
|
||||
To provide backwards compatibility with older versions of the `teleport-kube-agent` chart, if you do
|
||||
not specify any value for `roles`, the chart will run with only the `kube` role enabled.
|
||||
|
||||
## Kubernetes access
|
||||
|
||||
To use Teleport Kubernetes access, you will also need:
|
||||
- to choose a name for your Kubernetes cluster, distinct from other registered
|
||||
clusters (`$KUBERNETES_CLUSTER_NAME`)
|
||||
|
||||
To install the agent, run:
|
||||
|
||||
```sh
|
||||
$ helm install teleport-kube-agent . \
|
||||
--create-namespace \
|
||||
--namespace teleport \
|
||||
--set roles=kube \
|
||||
--set proxyAddr=${PROXY_ENDPOINT?} \
|
||||
--set authToken=${JOIN_TOKEN?} \
|
||||
--set kubeClusterName=${KUBERNETES_CLUSTER_NAME?}
|
||||
```
|
||||
|
||||
Set the values in the above command as appropriate for your setup.
|
||||
|
||||
You can also optionally set labels for your Kubernetes cluster using the
|
||||
format `--set "labels.key=value"` - for example: `--set "labels.env=development,labels.region=us-west-1"`
|
||||
|
||||
To avoid specifying the auth token in plain text, it's possible to create a secret containing the token beforehand. To do so, run:
|
||||
|
||||
```sh
|
||||
export TELEPORT_KUBE_TOKEN=`<auth token> | base64 -w0`
|
||||
export TELEPORT_NAMESPACE=teleport
|
||||
|
||||
cat <<EOF > secrets.yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: teleport-kube-agent-join-token
|
||||
namespace: ${TELEPORT_NAMESPACE?}
|
||||
type: Opaque
|
||||
data:
|
||||
auth-token: ${TELEPORT_KUBE_TOKEN?}
|
||||
EOF
|
||||
|
||||
$ kubectl apply -f secret.yaml
|
||||
|
||||
$ helm install teleport-kube-agent . \
|
||||
--create-namespace \
|
||||
--namespace ${TELEPORT_NAMESPACE?} \
|
||||
--set roles=kube \
|
||||
--set proxyAddr=${PROXY_ENDPOINT?} \
|
||||
--set kubeClusterName=${KUBERNETES_CLUSTER_NAME?}
|
||||
```
|
||||
|
||||
Note that due to backwards compatibility, the `labels` value **only** applies to the Teleport
|
||||
Kubernetes service. To set labels for applications or databases, use the different formats
|
||||
detailed below.
|
||||
|
||||
## Application access
|
||||
|
||||
### Dynamic Registration mode
|
||||
|
||||
To use Teleport application access in [dynamic registration mode](https://goteleport.com/docs/application-access/guides/dynamic-registration/),
|
||||
you will need to know the application resource selector. (`$APP_RESOURCE_KEY` and `$APP_RESOURCE_VALUE`)
|
||||
|
||||
To listen for all application resources, set both variables to `*`.
|
||||
|
||||
To install the agent in dynamic application registration mode, run:
|
||||
```sh
|
||||
$ helm install teleport-kube-agent . \
|
||||
--create-namespace \
|
||||
--namespace teleport \
|
||||
--set roles=app \
|
||||
--set proxyAddr=${PROXY_ENDPOINT?} \
|
||||
--set authToken=${JOIN_TOKEN?} \
|
||||
--set "appResources[0].labels.${APP_RESOURCE_KEY?}=${APP_RESOURCE_VALUE?}"
|
||||
```
|
||||
|
||||
### Manual configuration mode
|
||||
|
||||
To use Teleport Application access, you will also need:
|
||||
- the name of an application that you would like to proxy (`$APP_NAME`)
|
||||
- the URI to connect to the application from the node where this chart is deployed (`$APP_URI`)
|
||||
|
||||
To install the agent, run:
|
||||
|
||||
```sh
|
||||
$ helm install teleport-kube-agent . \
|
||||
--create-namespace \
|
||||
--namespace teleport \
|
||||
--set roles=app \
|
||||
--set proxyAddr=${PROXY_ENDPOINT?} \
|
||||
--set authToken=${JOIN_TOKEN?} \
|
||||
--set "apps[0].name=${APP_NAME?}" \
|
||||
--set "apps[0].uri=${APP_URI?}"
|
||||
```
|
||||
|
||||
Set the values in the above command as appropriate for your setup.
|
||||
|
||||
These are the supported values for the `apps` map:
|
||||
|
||||
| Key | Description | Example | Default | Required |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| `name` | Name of the app to be accessed | `apps[0].name=grafana` | | Yes |
|
||||
| `uri` | URI of the app to be accessed | `apps[0].uri=http://localhost:3000` | | Yes |
|
||||
| `public_addr` | Public address used to access the app | `apps[0].public_addr=grafana.teleport.example.com` | | No |
|
||||
| `labels.[name]` | Key-value pairs to set against the app for grouping/RBAC | `apps[0].labels.env=local,apps[0].labels.region=us-west-1` | | No |
|
||||
| `insecure_skip_verify` | Whether to skip validation of TLS certificates presented by backend apps | `apps[0].insecure_skip_verify=true` | `false` | No |
|
||||
| `rewrite.redirect` | A list of URLs to rewrite to the public address of the app service | `apps[0].rewrite.redirect[0]=https://192.168.1.1` | | No
|
||||
|
||||
You can add multiple apps using `apps[1].name`, `apps[1].uri`, `apps[2].name`, `apps[2].uri` etc.
|
||||
|
||||
After installing, the new application should show up in `tsh apps ls` after a few minutes.
|
||||
|
||||
## Database access
|
||||
|
||||
### Dynamic Registration mode
|
||||
|
||||
To use Teleport database access in [dynamic registration mode](https://goteleport.com/docs/database-access/guides/dynamic-registration/),
|
||||
you will need to know the database resource selector. (`$DB_RESOURCE_KEY` and `$DB_RESOURCE_VALUE`)
|
||||
|
||||
To listen for all database resources, set both variables to `*`.
|
||||
|
||||
To install the agent in dynamic database registration mode, run:
|
||||
```sh
|
||||
$ helm install teleport-kube-agent . \
|
||||
--create-namespace \
|
||||
--namespace teleport \
|
||||
--set roles=db \
|
||||
--set proxyAddr=${PROXY_ENDPOINT?} \
|
||||
--set authToken=${JOIN_TOKEN?} \
|
||||
--set "databaseResources[0].labels.${DB_RESOURCE_KEY?}=${DB_RESOURCE_VALUE?}"
|
||||
```
|
||||
|
||||
### Auto-discovery mode (AWS)
|
||||
|
||||
To use Teleport database access in AWS database auto-discovery mode, you will also need:
|
||||
- the database types you are attempting to auto-discover (`types`)
|
||||
- the AWS region(s) you would like to run auto-discovery in (`regions`)
|
||||
- the AWS resource tags if you want to target only certain databases (`tags`)
|
||||
|
||||
See the [AWS databases Helm chart reference](https://goteleport.com/docs/reference/helm-reference/teleport-kube-agent/#awsDatabases)
|
||||
for an example of installing an agent with AWS database auto-discovery.
|
||||
|
||||
### Auto-discovery mode (Azure)
|
||||
|
||||
To use Teleport database access in Azure database auto-discovery mode, you will also need:
|
||||
- the database types you are attempting to auto-discover (`types`)
|
||||
- the Azure resource tags if you want to target only certain databases (`tags`)
|
||||
|
||||
You can optionally specify:
|
||||
- the Azure subscription(s) to auto-discover in (`subscriptions`)
|
||||
- the Azure region(s) to auto-discover in (`regions`)
|
||||
- the Azure resource-group(s) to auto-discover in (`resource_groups`)
|
||||
|
||||
The default for each of these optional settings is `[*]`, which will auto-discover in all
|
||||
subscriptions, regions, or resource groups accessible by the Teleport service
|
||||
principal in Azure.
|
||||
|
||||
See the [Azure databases Helm chart reference](https://goteleport.com/docs/reference/helm-reference/teleport-kube-agent/#azureDatabases)
|
||||
for an example of installing an agent with Azure database auto-discovery.
|
||||
|
||||
### Manual configuration mode
|
||||
|
||||
To use Teleport database access, you will also need:
|
||||
- the name of an database that you would like to proxy (`$DB_NAME`)
|
||||
- the URI to connect to the database from the node where this chart is deployed (`$DB_URI`)
|
||||
- the database protocol used for the database (`$DB_PROTOCOL`)
|
||||
|
||||
To install the agent in manual database configuration mode, run:
|
||||
|
||||
```sh
|
||||
$ helm install teleport-kube-agent . \
|
||||
--create-namespace \
|
||||
--namespace teleport \
|
||||
--set roles=db \
|
||||
--set proxyAddr=${PROXY_ENDPOINT?} \
|
||||
--set authToken=${JOIN_TOKEN?} \
|
||||
--set "databases[0].name=${DB_NAME?}" \
|
||||
--set "databases[0].uri=${DB_URI?}" \
|
||||
--set "databases[0].protocol=${DB_PROTOCOL?}"
|
||||
```
|
||||
|
||||
Set the values in the above command as appropriate for your setup.
|
||||
|
||||
These are the supported values for the `databases` map:
|
||||
|
||||
| Key | Description | Example | Default | Required |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| `name` | Name of the database to be accessed | `databases[0].name=aurora` | | Yes |
|
||||
| `uri` | URI of the database to be accessed | `databases[0].uri=postgres-aurora-instance-1.xxx.us-east-1.rds.amazonaws.com:5432` | | Yes |
|
||||
| `protocol` | Database protocol | `databases[0].protocol=postgres` | | Yes |
|
||||
| `description` | Free-form description of the database proxy instance | `databases[0].description='AWS Aurora instance of PostgreSQL 13.0'` | | No |
|
||||
| `aws.region` | AWS-specific region configuration (only used for RDS/Aurora) | `databases[0].aws.region=us-east-1` | | No |
|
||||
| `labels.[name]` | Key-value pairs to set against the database for grouping/RBAC | `databases[0].labels.db=postgres-dev,apps[0].labels.region=us-east-1` | | No |
|
||||
|
||||
You can add multiple databases using `databases[1].name`, `databases[1].uri`, `databases[1].protocol`,
|
||||
`databases[2].name`, `databases[2].uri`, `databases[2].protocol` etc.
|
||||
|
||||
After installing, the new database should show up in `tsh db ls` after a few minutes.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If the service for a given role doesn't show up, look into the agent logs with:
|
||||
|
||||
```sh
|
||||
$ kubectl logs -n teleport deployment/teleport-kube-agent
|
||||
```
|
||||
|
||||
## Contributing to the chart
|
||||
|
||||
Please read [CONTRIBUTING.md](../CONTRIBUTING.md) before raising a pull request to this chart.
|
||||
21
teleport-kube-agent/aws-and-manual-db.yaml
Normal file
21
teleport-kube-agent/aws-and-manual-db.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
authToken: auth-token
|
||||
proxyAddr: proxy.example.com:3080
|
||||
roles: db
|
||||
awsDatabases:
|
||||
- types: ["rds"]
|
||||
regions: ["us-east-1"]
|
||||
tags:
|
||||
"*": "*"
|
||||
- types: ["rds"]
|
||||
regions: ["us-west-2"]
|
||||
tags:
|
||||
"env": "development"
|
||||
databases:
|
||||
- name: aurora
|
||||
uri: "postgres-aurora-instance-1.xxx.us-east-1.rds.amazonaws.com:5432"
|
||||
protocol: "postgres"
|
||||
labels:
|
||||
database: staging
|
||||
annotations:
|
||||
serviceAccount:
|
||||
eks.amazonaws.com/role-arn: arn:aws:iam::1234567890:role/my-rds-autodiscovery-role
|
||||
53
teleport-kube-agent/templates/NOTES.txt
Normal file
53
teleport-kube-agent/templates/NOTES.txt
Normal file
@ -0,0 +1,53 @@
|
||||
{{- if and .Values.podSecurityPolicy.enabled (semverCompare "<1.23.0-0" .Capabilities.KubeVersion.Version) }}
|
||||
SECURITY WARNING: Kubernetes 1.25 removes PodSecurityPolicy support and Helm
|
||||
doesn't support upgrading from 1.24 to 1.25 with PSPs enabled. Since version 12
|
||||
the `teleport-cluster` chart doesn't deploy PSPs on Kubernetes 1.23 or older.
|
||||
Instead, we recommend you to configure Pod Security AdmissionControllers for
|
||||
the namespace "{{.Release.Namespace}}" by adding the label
|
||||
`pod-security.kubernetes.io/enforce: baseline` on the namespace resource.
|
||||
|
||||
See https://goteleport.com/docs/deploy-a-cluster/helm-deployments/migration-kubernetes-1-25-psp/
|
||||
|
||||
To remove this warning, explicitly set "podSecurityPolicy.enabled=false".
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.teleportVersionOverride }}
|
||||
|
||||
DANGER: `teleportVersionOverride` MUST NOT be used to control the Teleport version.
|
||||
This chart is designed to run Teleport version {{ .Chart.AppVersion }}.
|
||||
You will face compatibility issues trying to run a different Teleport version with it.
|
||||
|
||||
If you want to run Teleport version {{.Values.teleportVersionOverride}},
|
||||
you should use `helm --version {{.Values.teleportVersionOverride}}` instead.
|
||||
{{- end }}
|
||||
{{- if contains "-gke." .Capabilities.KubeVersion.Version -}}
|
||||
{{- $groupName := (coalesce .Values.adminClusterRoleBinding.name "cluster-admin") }}
|
||||
|
||||
WARNING: GKE Autopilot clusters forbid users from impersonating system-wide identities.
|
||||
This means you won't be able to use the `system:masters` Kubernetes Group in
|
||||
the Teleport Roles for GKE Autopilot clusters.
|
||||
|
||||
Given that you installed Teleport on a GKE cluster, we recommend you use the
|
||||
Kubernetes Group `{{ $groupName }}` instead of `system:masters` in the Teleport Roles
|
||||
for GKE Autopilot clusters.
|
||||
|
||||
To do so, you can use the following Teleport Role resource:
|
||||
|
||||
kind: role
|
||||
metadata:
|
||||
name: gke-kube-access
|
||||
version: v6
|
||||
spec:
|
||||
allow:
|
||||
kubernetes_labels:
|
||||
'*': '*'
|
||||
kubernetes_groups:
|
||||
- "{{ $groupName }}"
|
||||
|
||||
This chart automatically created the `{{ $groupName }}` Kubernetes Group for you and
|
||||
assigned it admin privileges on the Kubernetes cluster.
|
||||
|
||||
Consult the built-in security features that GKE Autopilot enforces:
|
||||
https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-security#built-in-security
|
||||
|
||||
{{- end }}
|
||||
116
teleport-kube-agent/templates/_config.tpl
Normal file
116
teleport-kube-agent/templates/_config.tpl
Normal file
@ -0,0 +1,116 @@
|
||||
{{- define "teleport-kube-agent.config" -}}
|
||||
{{- $logLevel := (coalesce .Values.logLevel .Values.log.level "INFO") -}}
|
||||
{{- if (ge (include "teleport-kube-agent.version" . | semver).Major 11) }}
|
||||
version: v3
|
||||
{{- end }}
|
||||
teleport:
|
||||
join_params:
|
||||
method: "{{ .Values.joinParams.method }}"
|
||||
token_name: "/etc/teleport-secrets/auth-token"
|
||||
{{- if (ge (include "teleport-kube-agent.version" . | semver).Major 11) }}
|
||||
proxy_server: {{ required "proxyAddr is required in chart values" .Values.proxyAddr }}
|
||||
{{- else }}
|
||||
auth_servers: ["{{ required "proxyAddr is required in chart values" .Values.proxyAddr }}"]
|
||||
{{- end }}
|
||||
{{- if .Values.caPin }}
|
||||
ca_pin: {{- toYaml .Values.caPin | nindent 8 }}
|
||||
{{- end }}
|
||||
log:
|
||||
severity: {{ $logLevel }}
|
||||
output: {{ .Values.log.output }}
|
||||
format:
|
||||
output: {{ .Values.log.format }}
|
||||
extra_fields: {{ .Values.log.extraFields | toJson }}
|
||||
|
||||
kubernetes_service:
|
||||
{{- if or (contains "kube" (.Values.roles | toString)) (empty .Values.roles) }}
|
||||
enabled: true
|
||||
kube_cluster_name: {{ required "kubeClusterName is required in chart values when kube role is enabled, see README" .Values.kubeClusterName }}
|
||||
{{- if .Values.labels }}
|
||||
labels: {{- toYaml .Values.labels | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
enabled: false
|
||||
{{- end }}
|
||||
|
||||
app_service:
|
||||
{{- if contains "app" (.Values.roles | toString) }}
|
||||
enabled: true
|
||||
{{- if not (or (.Values.apps) (.Values.appResources)) }}
|
||||
{{- fail "at least one of 'apps' and 'appResources' is required in chart values when app role is enabled, see README" }}
|
||||
{{- end }}
|
||||
{{- if .Values.apps }}
|
||||
{{- range $app := .Values.apps }}
|
||||
{{- if not (hasKey $app "name") }}
|
||||
{{- fail "'name' is required for all 'apps' in chart values when app role is enabled, see README" }}
|
||||
{{- end }}
|
||||
{{- if not (hasKey $app "uri") }}
|
||||
{{- fail "'uri' is required for all 'apps' in chart values when app role is enabled, see README" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
apps:
|
||||
{{- toYaml .Values.apps | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.appResources }}
|
||||
resources:
|
||||
{{- toYaml .Values.appResources | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
enabled: false
|
||||
{{- end }}
|
||||
|
||||
db_service:
|
||||
{{- if contains "db" (.Values.roles | toString) }}
|
||||
enabled: true
|
||||
{{- if not (or (.Values.awsDatabases) (.Values.azureDatabases) (.Values.databases) (.Values.databaseResources)) }}
|
||||
{{- fail "at least one of 'awsDatabases', 'azureDatabases', 'databases' or 'databaseResources' is required in chart values when db role is enabled, see README" }}
|
||||
{{- end }}
|
||||
{{- if .Values.awsDatabases }}
|
||||
aws:
|
||||
{{- range $awsDb := .Values.awsDatabases }}
|
||||
{{- if not (hasKey $awsDb "types") }}
|
||||
{{- fail "'types' is required for all 'awsDatabases' in chart values when key is set and db role is enabled, see README" }}
|
||||
{{- end }}
|
||||
{{- if not (hasKey $awsDb "regions") }}
|
||||
{{- fail "'regions' is required for all 'awsDatabases' in chart values when key is set and db role is enabled, see README" }}
|
||||
{{- end }}
|
||||
{{- if not (hasKey $awsDb "tags") }}
|
||||
{{- fail "'tags' is required for all 'awsDatabases' in chart values when key is set and db role is enabled, see README" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- toYaml .Values.awsDatabases | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.azureDatabases }}
|
||||
azure:
|
||||
{{- toYaml .Values.azureDatabases | nindent 6 }}
|
||||
{{- end}}
|
||||
{{- if .Values.databases }}
|
||||
databases:
|
||||
{{- range $db := .Values.databases }}
|
||||
{{- if not (hasKey $db "name") }}
|
||||
{{- fail "'name' is required for all 'databases' in chart values when db role is enabled, see README" }}
|
||||
{{- end }}
|
||||
{{- if not (hasKey $db "uri") }}
|
||||
{{- fail "'uri' is required for all 'databases' is required in chart values when db role is enabled, see README" }}
|
||||
{{- end }}
|
||||
{{- if not (hasKey $db "protocol") }}
|
||||
{{- fail "'protocol' is required for all 'databases' in chart values when db role is enabled, see README" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- toYaml .Values.databases | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.databaseResources }}
|
||||
resources:
|
||||
{{- toYaml .Values.databaseResources | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
enabled: false
|
||||
{{- end }}
|
||||
|
||||
auth_service:
|
||||
enabled: false
|
||||
ssh_service:
|
||||
enabled: false
|
||||
proxy_service:
|
||||
enabled: false
|
||||
{{- end -}}
|
||||
46
teleport-kube-agent/templates/_helpers.tpl
Normal file
46
teleport-kube-agent/templates/_helpers.tpl
Normal file
@ -0,0 +1,46 @@
|
||||
{{- define "teleport.kube.agent.isUpgrade" -}}
|
||||
{{- /* Checks if action is an upgrade from an old release that didn't support Secret storage */}}
|
||||
{{- if .Release.IsUpgrade }}
|
||||
{{- $deployment := (lookup "apps/v1" "Deployment" .Release.Namespace .Release.Name ) -}}
|
||||
{{- if ($deployment) }}
|
||||
true
|
||||
{{- else if .Values.unitTestUpgrade }}
|
||||
true
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
if serviceAccount is not defined or serviceAccount.name is empty, use .Release.Name
|
||||
*/}}
|
||||
{{- define "teleport-kube-agent.serviceAccountName" -}}
|
||||
{{- coalesce .Values.serviceAccount.name .Values.serviceAccountName .Release.Name -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use for the post-delete hook
|
||||
if serviceAccount is not defined or serviceAccount.name is empty, use .Release.Name-delete-hook
|
||||
*/}}
|
||||
{{- define "teleport-kube-agent.deleteHookServiceAccountName" -}}
|
||||
{{- coalesce .Values.serviceAccount.name .Values.serviceAccountName (printf "%s-delete-hook" .Release.Name) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "teleport-kube-agent.version" -}}
|
||||
{{- if .Values.teleportVersionOverride -}}
|
||||
{{- .Values.teleportVersionOverride -}}
|
||||
{{- else -}}
|
||||
{{- .Chart.Version -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "teleport-kube-agent.baseImage" -}}
|
||||
{{- if .Values.enterprise -}}
|
||||
{{- .Values.enterpriseImage -}}
|
||||
{{- else -}}
|
||||
{{- .Values.image -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "teleport-kube-agent.image" -}}
|
||||
{{ include "teleport-kube-agent.baseImage" . }}:{{ include "teleport-kube-agent.version" . }}
|
||||
{{- end -}}
|
||||
24
teleport-kube-agent/templates/admin_clusterrolebinding.yaml
Normal file
24
teleport-kube-agent/templates/admin_clusterrolebinding.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
{{/* GKE Autopilot clusters forbid users from impersonating system:masters
|
||||
Groups. This is a security measure released under the GKE Warden authz module
|
||||
https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-security#built-in-security
|
||||
Because of this limitation, users are unable to specify kubernetes_groups=["system:masters"]
|
||||
in Teleport, so we create a Kubernetes Group called cluster-admin when we detect
|
||||
that the underlying cluster is a GKE cluster. */}}
|
||||
{{- if or (contains "-gke." .Capabilities.KubeVersion.Version) (.Values.adminClusterRoleBinding.create) -}}
|
||||
{{- $groupName := (coalesce .Values.adminClusterRoleBinding.name "cluster-admin") }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: admin-k8s-cluster-group
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
# This is the built-in cluster-admin role that exists in all K8S clusters.
|
||||
# We are binding the cluster-admin role to the cluster-admin group.
|
||||
# See https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: Group
|
||||
name: {{ $groupName }}
|
||||
{{- end }}
|
||||
31
teleport-kube-agent/templates/clusterrole.yaml
Normal file
31
teleport-kube-agent/templates/clusterrole.yaml
Normal file
@ -0,0 +1,31 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ .Values.clusterRoleName | default .Release.Name }}
|
||||
{{- if .Values.extraLabels.clusterRole }}
|
||||
labels:
|
||||
{{- toYaml .Values.extraLabels.clusterRole | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- users
|
||||
- groups
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- impersonate
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- "authorization.k8s.io"
|
||||
resources:
|
||||
- selfsubjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
{{- end -}}
|
||||
18
teleport-kube-agent/templates/clusterrolebinding.yaml
Normal file
18
teleport-kube-agent/templates/clusterrolebinding.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ .Values.clusterRoleBindingName | default .Release.Name }}
|
||||
{{- if .Values.extraLabels.clusterRoleBinding }}
|
||||
labels:
|
||||
{{- toYaml .Values.extraLabels.clusterRoleBinding | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ .Values.clusterRoleName | default .Release.Name }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "teleport-kube-agent.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end -}}
|
||||
16
teleport-kube-agent/templates/config.yaml
Normal file
16
teleport-kube-agent/templates/config.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if .Values.extraLabels.config }}
|
||||
labels:
|
||||
{{- toYaml .Values.extraLabels.config | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.annotations.config }}
|
||||
annotations:
|
||||
{{- toYaml .Values.annotations.config | nindent 4 }}
|
||||
{{- end }}
|
||||
data:
|
||||
teleport.yaml: |
|
||||
{{- mustMergeOverwrite (include "teleport-kube-agent.config" . | fromYaml) .Values.teleportConfig | toYaml | nindent 4 -}}
|
||||
95
teleport-kube-agent/templates/delete_hook.yaml
Normal file
95
teleport-kube-agent/templates/delete_hook.yaml
Normal file
@ -0,0 +1,95 @@
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "teleport-kube-agent.deleteHookServiceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-delete
|
||||
"helm.sh/hook-weight": "-4"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
---
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-delete-hook
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-delete
|
||||
"helm.sh/hook-weight": "-3"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets",]
|
||||
verbs: ["get", "delete", "list"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-delete-hook
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-delete
|
||||
"helm.sh/hook-weight": "-2"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-delete-hook
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .Release.Name }}-delete-hook
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
{{- end }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-delete-hook
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-delete
|
||||
"helm.sh/hook-weight": "-1"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-delete-hook
|
||||
spec:
|
||||
{{- if .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml .Values.imagePullSecrets | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "teleport-kube-agent.deleteHookServiceAccountName" . }}
|
||||
restartPolicy: OnFailure
|
||||
{{- if .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml .Values.tolerations | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml .Values.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: post-delete-job
|
||||
env:
|
||||
- name: KUBE_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: RELEASE_NAME
|
||||
value: {{ .Release.Name }}
|
||||
image: {{ include "teleport-kube-agent.image" . | quote }}
|
||||
{{- if .Values.imagePullPolicy }}
|
||||
imagePullPolicy: {{ toYaml .Values.imagePullPolicy }}
|
||||
{{- end }}
|
||||
command: ["teleport"]
|
||||
args: ["kube-state", "delete"]
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext: {{- toYaml .Values.securityContext | nindent 10 }}
|
||||
{{- end }}
|
||||
216
teleport-kube-agent/templates/deployment.yaml
Normal file
216
teleport-kube-agent/templates/deployment.yaml
Normal file
@ -0,0 +1,216 @@
|
||||
#
|
||||
# Warning to maintainers, any changes to this file that are not specific to the Deployment need to also be duplicated
|
||||
# in the statefulset.yaml file.
|
||||
#
|
||||
{{- if and (not .Values.storage.enabled) (include "teleport.kube.agent.isUpgrade" . ) }}
|
||||
{{- $replicaCount := (coalesce .Values.replicaCount .Values.highAvailability.replicaCount "1") }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ .Release.Name }}
|
||||
{{- if .Values.extraLabels.deployment }}
|
||||
{{- toYaml .Values.extraLabels.deployment | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.annotations.deployment }}
|
||||
annotations:
|
||||
{{- toYaml .Values.annotations.deployment | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: {{ $replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
# ConfigMap checksum, to recreate the pod on config changes.
|
||||
checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }}
|
||||
{{- if .Values.annotations.pod }}
|
||||
{{- toYaml .Values.annotations.pod | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ .Release.Name }}
|
||||
{{- if .Values.extraLabels.pod }}
|
||||
{{- toYaml .Values.extraLabels.pod | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig: {{- toYaml .Values.dnsConfig | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.dnsPolicy }}
|
||||
dnsPolicy: {{ .Values.dnsPolicy | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.hostAliases }}
|
||||
hostAliases: {{- toYaml .Values.hostAliases | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.affinity (gt (int $replicaCount) 1) }}
|
||||
affinity:
|
||||
{{- if .Values.affinity }}
|
||||
{{- if .Values.highAvailability.requireAntiAffinity }}
|
||||
{{- fail "Cannot use highAvailability.requireAntiAffinity when affinity is also set in chart values - unset one or the other" }}
|
||||
{{- end }}
|
||||
{{- toYaml .Values.affinity | nindent 8 }}
|
||||
{{- else }}
|
||||
podAntiAffinity:
|
||||
{{- if .Values.highAvailability.requireAntiAffinity }}
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- {{ .Release.Name }}
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
{{- else if gt (int $replicaCount) 1 }}
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 50
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- {{ .Release.Name }}
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml .Values.tolerations | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml .Values.imagePullSecrets | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.initContainers }}
|
||||
initContainers: {{- toYaml .Values.initContainers | nindent 6 }}
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.initSecurityContext }}
|
||||
securityContext: {{- toYaml .Values.initSecurityContext | nindent 10 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /etc/teleport
|
||||
name: "config"
|
||||
readOnly: true
|
||||
- mountPath: /etc/teleport-secrets
|
||||
name: "auth-token"
|
||||
readOnly: true
|
||||
- mountPath: /var/lib/teleport
|
||||
name: "data"
|
||||
{{- if .Values.tls.existingCASecretName }}
|
||||
- mountPath: /etc/teleport-tls-ca
|
||||
name: "teleport-tls-ca"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if .Values.extraVolumeMounts }}
|
||||
{{- toYaml .Values.extraVolumeMounts | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml .Values.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: "teleport"
|
||||
image: {{ include "teleport-kube-agent.image" . | quote }}
|
||||
{{- if .Values.imagePullPolicy }}
|
||||
imagePullPolicy: {{ toYaml .Values.imagePullPolicy }}
|
||||
{{- end }}
|
||||
env:
|
||||
# This variable is set for telemetry purposes.
|
||||
# Telemetry is opt-in for oss users and controlled at the auth level.
|
||||
- name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT
|
||||
value: "true"
|
||||
{{- if .Values.updater.enabled }}
|
||||
- name: TELEPORT_EXT_UPGRADER
|
||||
value: kube
|
||||
{{- end }}
|
||||
{{- if (gt (len .Values.extraEnv) 0) }}
|
||||
{{- toYaml .Values.extraEnv | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.tls.existingCASecretName }}
|
||||
- name: SSL_CERT_FILE
|
||||
value: /etc/teleport-tls-ca/ca.pem
|
||||
{{- end }}
|
||||
args:
|
||||
- "--diag-addr=0.0.0.0:3000"
|
||||
{{- if .Values.insecureSkipProxyTLSVerify }}
|
||||
- "--insecure"
|
||||
{{- end }}
|
||||
{{- if .Values.extraArgs }}
|
||||
{{- toYaml .Values.extraArgs | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext: {{- toYaml .Values.securityContext | nindent 10 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: diag
|
||||
containerPort: 3000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: diag
|
||||
initialDelaySeconds: 5 # wait 5s for agent to start
|
||||
periodSeconds: 5 # poll health every 5s
|
||||
failureThreshold: 6 # consider agent unhealthy after 30s (6 * 5s)
|
||||
timeoutSeconds: {{ .Values.probeTimeoutSeconds }}
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: diag
|
||||
initialDelaySeconds: 5 # wait 5s for agent to register
|
||||
periodSeconds: 5 # poll health every 5s
|
||||
failureThreshold: 12 # consider agent unhealthy after 60s (12 * 5s)
|
||||
timeoutSeconds: {{ .Values.probeTimeoutSeconds }}
|
||||
{{- if .Values.resources }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 10 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /etc/teleport
|
||||
name: "config"
|
||||
readOnly: true
|
||||
- mountPath: /etc/teleport-secrets
|
||||
name: "auth-token"
|
||||
readOnly: true
|
||||
- mountPath: /var/lib/teleport
|
||||
name: {{ default "data" .Values.existingDataVolume }}
|
||||
{{- if .Values.tls.existingCASecretName }}
|
||||
- mountPath: /etc/teleport-tls-ca
|
||||
name: "teleport-tls-ca"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if .Values.extraVolumeMounts }}
|
||||
{{- toYaml .Values.extraVolumeMounts | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: "config"
|
||||
configMap:
|
||||
name: {{ .Release.Name }}
|
||||
- name: "auth-token"
|
||||
secret:
|
||||
secretName: {{ coalesce .Values.secretName .Values.joinTokenSecret.name }}
|
||||
{{- if not .Values.existingDataVolume }}
|
||||
- name: "data"
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- if .Values.tls.existingCASecretName }}
|
||||
- name: "teleport-tls-ca"
|
||||
secret:
|
||||
secretName: {{ .Values.tls.existingCASecretName }}
|
||||
{{- end }}
|
||||
{{- if .Values.extraVolumes }}
|
||||
{{- toYaml .Values.extraVolumes | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "teleport-kube-agent.serviceAccountName" . }}
|
||||
{{- end }}
|
||||
97
teleport-kube-agent/templates/hook.yaml
Normal file
97
teleport-kube-agent/templates/hook.yaml
Normal file
@ -0,0 +1,97 @@
|
||||
{{- $deployment := (lookup "apps/v1" "Deployment" .Release.Namespace .Release.Name ) -}}
|
||||
{{- if $deployment }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-hook
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-upgrade
|
||||
"helm.sh/hook-weight": "-4"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-hook
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-upgrade
|
||||
"helm.sh/hook-weight": "-3"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
rules:
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets"]
|
||||
resourceNames: ["{{ .Release.Name }}"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods",]
|
||||
verbs: ["get", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["deployments",]
|
||||
resourceNames: ["{{ .Release.Name }}"]
|
||||
verbs: ["get", "delete", "list"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-hook
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-upgrade
|
||||
"helm.sh/hook-weight": "-2"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-hook
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .Release.Name }}-hook
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-hook
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-upgrade
|
||||
"helm.sh/hook-weight": "-1"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-hook
|
||||
spec:
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- if .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml .Values.tolerations | nindent 6 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ .Release.Name }}-hook
|
||||
restartPolicy: OnFailure
|
||||
{{- if .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml .Values.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: post-install-job
|
||||
image: alpine/k8s:1.26.0
|
||||
command:
|
||||
- sh
|
||||
- "-c"
|
||||
- |
|
||||
/bin/sh <<'EOF'
|
||||
set -eu -o pipefail
|
||||
# wait until statefulset is ready
|
||||
kubectl rollout status --watch --timeout=600s statefulset/{{ .Release.Name }}
|
||||
# delete deployment
|
||||
kubectl delete deployment/{{ .Release.Name }}
|
||||
EOF
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext: {{- toYaml .Values.securityContext | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end}}
|
||||
21
teleport-kube-agent/templates/pdb.yaml
Normal file
21
teleport-kube-agent/templates/pdb.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
{{- if .Values.highAvailability.podDisruptionBudget.enabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1" }}
|
||||
apiVersion: policy/v1
|
||||
{{- else }}
|
||||
apiVersion: policy/v1beta1
|
||||
{{- end }}
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ .Release.Name }}
|
||||
{{- if .Values.extraLabels.podDisruptionBudget }}
|
||||
{{- toYaml .Values.extraLabels.podDisruptionBudget | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
minAvailable: {{ .Values.highAvailability.podDisruptionBudget.minAvailable }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
31
teleport-kube-agent/templates/podmonitor.yaml
Normal file
31
teleport-kube-agent/templates/podmonitor.yaml
Normal file
@ -0,0 +1,31 @@
|
||||
{{- if.Values.podMonitor.enabled -}}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- with .Values.podMonitor.additionalLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
jobLabel: {{ .Release.Name }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ .Release.Name }}
|
||||
podMetricsEndpoints:
|
||||
- port: diag
|
||||
path: /metrics
|
||||
{{- with .Values.podMonitor.interval }}
|
||||
interval: {{ . | quote }}
|
||||
{{- end }}
|
||||
podTargetLabels:
|
||||
- "app.kubernetes.io/name"
|
||||
- "app.kubernetes.io/instance"
|
||||
- "app.kubernetes.io/component"
|
||||
- "app.kubernetes.io/version"
|
||||
- "teleport.dev/majorVersion"
|
||||
{{- end }}
|
||||
70
teleport-kube-agent/templates/psp.yaml
Normal file
70
teleport-kube-agent/templates/psp.yaml
Normal file
@ -0,0 +1,70 @@
|
||||
{{/* PSPs are deprecated in 1.22 and removed in 1.25. However Helm doesn't handle their removal properly in 1.25
|
||||
We must remove them before 1.25 to ensure the Helm state doesn't corrupt. As this is a breaking change, this
|
||||
only applies to v12+ charts. v11 and below will only show a warning from the NOTES.txt.
|
||||
Users must use PSAs instead (beta in 1.23, GA in 1.25). The "teleport-cluster" chart runs in "baseline" mode */}}
|
||||
{{- if and .Values.podSecurityPolicy.enabled (semverCompare "<1.23.0-0" .Capabilities.KubeVersion.Version) -}}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
{{- if .Values.extraLabels.podSecurityPolicy }}
|
||||
labels:
|
||||
{{- toYaml .Values.extraLabels.podSecurityPolicy | nindent 4 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
spec:
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
requiredDropCapabilities:
|
||||
- ALL
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: MustRunAs
|
||||
ranges:
|
||||
# Forbid adding the root group.
|
||||
- min: 1
|
||||
max: 65535
|
||||
runAsUser:
|
||||
rule: MustRunAsNonRoot
|
||||
fsGroup:
|
||||
rule: MustRunAs
|
||||
ranges:
|
||||
# Forbid adding the root group.
|
||||
- min: 1
|
||||
max: 65535
|
||||
readOnlyRootFilesystem: true
|
||||
volumes:
|
||||
- '*'
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-psp
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-psp
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-psp
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "teleport-kube-agent.serviceAccountName" . }}
|
||||
{{- end -}}
|
||||
14
teleport-kube-agent/templates/role.yaml
Normal file
14
teleport-kube-agent/templates/role.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ .Values.roleName | default .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if .Values.extraLabels.role }}
|
||||
labels:
|
||||
{{- toYaml .Values.extraLabels.role | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
# objects is "secrets"
|
||||
resources: ["secrets"]
|
||||
verbs: ["create", "get", "update","patch"]
|
||||
17
teleport-kube-agent/templates/rolebinding.yaml
Normal file
17
teleport-kube-agent/templates/rolebinding.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ .Values.roleBindingName | default .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if .Values.extraLabels.roleBinding }}
|
||||
labels:
|
||||
{{- toYaml .Values.extraLabels.roleBinding | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ .Values.roleName | default .Release.Name }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "teleport-kube-agent.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
19
teleport-kube-agent/templates/secret.yaml
Normal file
19
teleport-kube-agent/templates/secret.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
{{- if .Values.joinTokenSecret.create }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ coalesce .Values.secretName .Values.joinTokenSecret.name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if .Values.extraLabels.secret }}
|
||||
labels:
|
||||
{{- toYaml .Values.extraLabels.secret | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.annotations.secret }}
|
||||
annotations:
|
||||
{{- toYaml .Values.annotations.secret | nindent 4 }}
|
||||
{{- end }}
|
||||
type: Opaque
|
||||
stringData:
|
||||
auth-token: |
|
||||
{{ coalesce .Values.joinParams.tokenName .Values.authToken }}
|
||||
{{- end}}
|
||||
15
teleport-kube-agent/templates/serviceaccount.yaml
Normal file
15
teleport-kube-agent/templates/serviceaccount.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "teleport-kube-agent.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if .Values.extraLabels.serviceAccount }}
|
||||
labels:
|
||||
{{- toYaml .Values.extraLabels.serviceAccount | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.annotations.serviceAccount }}
|
||||
annotations:
|
||||
{{- toYaml .Values.annotations.serviceAccount | nindent 4 }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
239
teleport-kube-agent/templates/statefulset.yaml
Normal file
239
teleport-kube-agent/templates/statefulset.yaml
Normal file
@ -0,0 +1,239 @@
|
||||
#
|
||||
# Warning to maintainers, any changes to this file that are not specific to the StatefulSet need to also be duplicated
|
||||
# in the deployment.yaml file.
|
||||
#
|
||||
{{- $replicaCount := (coalesce .Values.replicaCount .Values.highAvailability.replicaCount "1") }}
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ .Release.Name }}
|
||||
{{- if .Values.extraLabels.deployment }}
|
||||
{{- toYaml .Values.extraLabels.deployment | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
serviceName: {{ .Release.Name }}
|
||||
replicas: {{ $replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
# ConfigMap checksum, to recreate the pod on config changes.
|
||||
checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }}
|
||||
{{- if .Values.annotations.pod }}
|
||||
{{- toYaml .Values.annotations.pod | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ .Release.Name }}
|
||||
{{- if .Values.extraLabels.pod }}
|
||||
{{- toYaml .Values.extraLabels.pod | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig: {{- toYaml .Values.dnsConfig | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.dnsPolicy }}
|
||||
dnsPolicy: {{ .Values.dnsPolicy | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.hostAliases }}
|
||||
hostAliases: {{- toYaml .Values.hostAliases | nindent 8 }}
|
||||
{{- end }}
|
||||
securityContext:
|
||||
fsGroup: 9807
|
||||
{{- if or .Values.affinity (gt (int $replicaCount) 1) }}
|
||||
affinity:
|
||||
{{- if .Values.affinity }}
|
||||
{{- if .Values.highAvailability.requireAntiAffinity }}
|
||||
{{- fail "Cannot use highAvailability.requireAntiAffinity when affinity is also set in chart values - unset one or the other" }}
|
||||
{{- end }}
|
||||
{{- toYaml .Values.affinity | nindent 8 }}
|
||||
{{- else }}
|
||||
podAntiAffinity:
|
||||
{{- if .Values.highAvailability.requireAntiAffinity }}
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- {{ .Release.Name }}
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
{{- else if gt (int $replicaCount) 1 }}
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 50
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- {{ .Release.Name }}
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml .Values.tolerations | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml .Values.imagePullSecrets | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.initContainers }}
|
||||
initContainers: {{- toYaml .Values.initContainers | nindent 6 }}
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if .Values.initSecurityContext }}
|
||||
securityContext: {{- toYaml .Values.initSecurityContext | nindent 10 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /etc/teleport
|
||||
name: "config"
|
||||
readOnly: true
|
||||
- mountPath: /etc/teleport-secrets
|
||||
name: "auth-token"
|
||||
readOnly: true
|
||||
- mountPath: /var/lib/teleport
|
||||
name: "{{ .Release.Name }}-teleport-data"
|
||||
{{- if .Values.tls.existingCASecretName }}
|
||||
- mountPath: /etc/teleport-tls-ca
|
||||
name: "teleport-tls-ca"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if .Values.extraVolumeMounts }}
|
||||
{{- toYaml .Values.extraVolumeMounts | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "teleport-kube-agent.serviceAccountName" . }}
|
||||
{{- if .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml .Values.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: "teleport"
|
||||
image: {{ include "teleport-kube-agent.image" . | quote }}
|
||||
{{- if .Values.imagePullPolicy }}
|
||||
imagePullPolicy: {{ toYaml .Values.imagePullPolicy }}
|
||||
{{- end }}
|
||||
env:
|
||||
# This variable is set for telemetry purposes.
|
||||
# Telemetry is opt-in and controlled at the auth level.
|
||||
- name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT
|
||||
value: "true"
|
||||
- name: TELEPORT_REPLICA_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: KUBE_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: RELEASE_NAME
|
||||
value: {{ .Release.Name }}
|
||||
{{- if .Values.updater.enabled }}
|
||||
- name: TELEPORT_EXT_UPGRADER
|
||||
value: kube
|
||||
{{- end }}
|
||||
{{- if .Values.tls.existingCASecretName }}
|
||||
- name: SSL_CERT_FILE
|
||||
value: /etc/teleport-tls-ca/ca.pem
|
||||
{{- end }}
|
||||
{{- if .Values.extraEnv }}
|
||||
{{- toYaml .Values.extraEnv | nindent 10 }}
|
||||
{{- end }}
|
||||
args:
|
||||
- "--diag-addr=0.0.0.0:3000"
|
||||
{{- if .Values.insecureSkipProxyTLSVerify }}
|
||||
- "--insecure"
|
||||
{{- end }}
|
||||
{{- if .Values.extraArgs }}
|
||||
{{- toYaml .Values.extraArgs | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext: {{- toYaml .Values.securityContext | nindent 10 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: diag
|
||||
containerPort: 3000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: diag
|
||||
initialDelaySeconds: 5 # wait 5s for agent to start
|
||||
periodSeconds: 5 # poll health every 5s
|
||||
failureThreshold: 6 # consider agent unhealthy after 30s (6 * 5s)
|
||||
timeoutSeconds: {{ .Values.probeTimeoutSeconds }}
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: diag
|
||||
initialDelaySeconds: 5 # wait 5s for agent to register
|
||||
periodSeconds: 5 # poll health every 5s
|
||||
failureThreshold: 12 # consider agent unhealthy after 60s (12 * 5s)
|
||||
timeoutSeconds: {{ .Values.probeTimeoutSeconds }}
|
||||
{{- if .Values.resources }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 10 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /etc/teleport
|
||||
name: "config"
|
||||
readOnly: true
|
||||
- mountPath: /etc/teleport-secrets
|
||||
name: "auth-token"
|
||||
readOnly: true
|
||||
{{- if .Values.storage.enabled }}
|
||||
- mountPath: /var/lib/teleport
|
||||
name: "{{ .Release.Name }}-teleport-data"
|
||||
{{- else }}
|
||||
- mountPath: /var/lib/teleport
|
||||
name: "data"
|
||||
{{- end }}
|
||||
{{- if .Values.tls.existingCASecretName }}
|
||||
- mountPath: /etc/teleport-tls-ca
|
||||
name: "teleport-tls-ca"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if .Values.extraVolumeMounts }}
|
||||
{{- toYaml .Values.extraVolumeMounts | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: "config"
|
||||
configMap:
|
||||
name: {{ .Release.Name }}
|
||||
- name: "auth-token"
|
||||
secret:
|
||||
secretName: {{ coalesce .Values.secretName .Values.joinTokenSecret.name }}
|
||||
{{- if not .Values.storage.enabled }}
|
||||
- name: "data"
|
||||
emptyDir: {}
|
||||
{{- end}}
|
||||
{{- if .Values.tls.existingCASecretName }}
|
||||
- name: "teleport-tls-ca"
|
||||
secret:
|
||||
secretName: {{ .Values.tls.existingCASecretName }}
|
||||
{{- end }}
|
||||
{{- if .Values.extraVolumes }}
|
||||
{{- toYaml .Values.extraVolumes | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if and .Values.storage.enabled }}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: "{{ .Release.Name }}-teleport-data"
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: {{ .Values.storage.storageClassName }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.storage.requests }}
|
||||
{{- end }}
|
||||
7
teleport-kube-agent/templates/updater/_helpers.tpl
Normal file
7
teleport-kube-agent/templates/updater/_helpers.tpl
Normal file
@ -0,0 +1,7 @@
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
if serviceAccount is not defined or serviceAccount.name is empty, use .Release.Name
|
||||
*/}}
|
||||
{{- define "teleport-kube-agent-updater.serviceAccountName" -}}
|
||||
{{- coalesce .Values.updater.serviceAccount.name (include "teleport-kube-agent.serviceAccountName" . | printf "%s-updater") -}}
|
||||
{{- end -}}
|
||||
113
teleport-kube-agent/templates/updater/deployment.yaml
Normal file
113
teleport-kube-agent/templates/updater/deployment.yaml
Normal file
@ -0,0 +1,113 @@
|
||||
{{- if .Values.updater.enabled -}}
|
||||
{{- $updater := mustMergeOverwrite (mustDeepCopy .Values) .Values.updater -}}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-updater
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ .Release.Name }}-updater
|
||||
{{- if $updater.extraLabels.deployment }}
|
||||
{{- toYaml $updater.extraLabels.deployment | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $updater.annotations.deployment }}
|
||||
annotations: {{- toYaml $updater.annotations.deployment | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ .Release.Name }}-updater
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $updater.annotations.pod }}
|
||||
{{- toYaml $updater.annotations.pod | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ .Release.Name }}-updater
|
||||
{{- if $updater.extraLabels.pod }}
|
||||
{{- toYaml $updater.extraLabels.pod | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if $updater.affinity }}
|
||||
affinity: {{- toYaml $updater.affinity | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if $updater.tolerations }}
|
||||
tolerations: {{- toYaml $updater.tolerations | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if $updater.imagePullSecrets }}
|
||||
imagePullSecrets: {{- toYaml $updater.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if $updater.nodeSelector }}
|
||||
nodeSelector: {{- toYaml $updater.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: "kube-agent-updater"
|
||||
image: "{{ $updater.image }}:{{ include "teleport-kube-agent.version" . }}"
|
||||
{{- if $updater.imagePullPolicy }}
|
||||
imagePullPolicy: {{ toYaml $updater.imagePullPolicy }}
|
||||
{{- end }}
|
||||
{{- if or $updater.extraEnv $updater.tls.existingCASecretName }}
|
||||
env:
|
||||
{{- if (gt (len $updater.extraEnv) 0) }}
|
||||
{{- toYaml $updater.extraEnv | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if $updater.tls.existingCASecretName }}
|
||||
- name: SSL_CERT_FILE
|
||||
value: /etc/teleport-tls-ca/ca.pem
|
||||
# Used to track whether a Teleport agent was installed using this method.
|
||||
- name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT
|
||||
value: true
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
args:
|
||||
- "--agent-name={{ .Release.Name }}"
|
||||
- "--agent-namespace={{ .Release.Namespace }}"
|
||||
- "--base-image={{ include "teleport-kube-agent.baseImage" . }}"
|
||||
- "--version-server={{ $updater.versionServer }}"
|
||||
- "--version-channel={{ $updater.releaseChannel }}"
|
||||
{{- if $updater.securityContext }}
|
||||
securityContext: {{- toYaml $updater.securityContext | nindent 10 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
- name: healthz
|
||||
containerPort: 8081
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: healthz
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
failureThreshold: 6 # consider agent unhealthy after 30s (6 * 5s)
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: healthz
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
failureThreshold: 6 # consider unready after 30s
|
||||
timeoutSeconds: 5
|
||||
{{- if $updater.resources }}
|
||||
resources: {{- toYaml $updater.resources | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if $updater.tls.existingCASecretName }}
|
||||
volumeMounts:
|
||||
- mountPath: /etc/teleport-tls-ca
|
||||
name: "teleport-tls-ca"
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: "teleport-tls-ca"
|
||||
secret:
|
||||
secretName: {{ $updater.tls.existingCASecretName }}
|
||||
{{- end }}
|
||||
{{- if $updater.priorityClassName }}
|
||||
priorityClassName: {{ $updater.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "teleport-kube-agent-updater.serviceAccountName" . }}
|
||||
{{- end -}}
|
||||
95
teleport-kube-agent/templates/updater/role.yaml
Normal file
95
teleport-kube-agent/templates/updater/role.yaml
Normal file
@ -0,0 +1,95 @@
|
||||
{{- if .Values.updater.enabled -}}
|
||||
{{- $updater := mustMergeOverwrite (mustDeepCopy .Values) .Values.updater -}}
|
||||
{{- if $updater.rbac.create -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-updater
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if $updater.extraLabels.role }}
|
||||
labels: {{- toYaml $updater.extraLabels.role | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
# the updater needs to list pods to check their health
|
||||
# it also needs to delete pods to unstuck Statefulset rollouts
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
# the updater needs to get the secret created by the agent containing the
|
||||
# maintenance window
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}-shared-state
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
# the controller in the updater must be able to watch deployments and
|
||||
# statefulsets and get the one it should reconcile
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- statefulsets
|
||||
- deployments/status
|
||||
- statefulsets/status
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
# However the updater should only update the agent it is watching
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- statefulsets
|
||||
verbs:
|
||||
- update
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
22
teleport-kube-agent/templates/updater/rolebinding.yaml
Normal file
22
teleport-kube-agent/templates/updater/rolebinding.yaml
Normal file
@ -0,0 +1,22 @@
|
||||
{{- if .Values.updater.enabled -}}
|
||||
{{- $updater := mustMergeOverwrite (mustDeepCopy .Values) .Values.updater -}}
|
||||
{{- if $updater.rbac.create -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-updater
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if $updater.extraLabels.roleBinding }}
|
||||
labels:
|
||||
{{- toYaml $updater.extraLabels.roleBinding | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-updater
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "teleport-kube-agent-updater.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
16
teleport-kube-agent/templates/updater/serviceaccount.yaml
Normal file
16
teleport-kube-agent/templates/updater/serviceaccount.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
{{- if .Values.updater.enabled -}}
|
||||
{{- $updater := mustMergeOverwrite (mustDeepCopy .Values) .Values.updater -}}
|
||||
{{- if $updater.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "teleport-kube-agent-updater.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if $updater.extraLabels.serviceAccount }}
|
||||
labels: {{- toYaml $updater.extraLabels.serviceAccount | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $updater.annotations.serviceAccount }}
|
||||
annotations: {{- toYaml $updater.annotations.serviceAccount | nindent 4 }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
23
teleport-kube-agent/tests/README.md
Normal file
23
teleport-kube-agent/tests/README.md
Normal file
@ -0,0 +1,23 @@
|
||||
## Unit tests for Helm charts
|
||||
|
||||
Helm chart unit tests run here using the [helm-unittest](https://github.com/quintush/helm-unittest/) Helm plugin.
|
||||
|
||||
*Note: there are multiple forks for the helm-unittest plugin.
|
||||
They are not compatible and don't provide the same featureset (e.g. including templates from sub-directories).
|
||||
Our tests rely on features and bugfixes that are only available on the quintush fork
|
||||
(which seems to be the most maintained at the time of writing)*
|
||||
|
||||
If you get a snapshot error during your testing, you should verify that your changes intended to alter the output, then run
|
||||
this command from the root of your Teleport checkout to update the snapshots:
|
||||
|
||||
```bash
|
||||
make -C build.assets test-helm-update-snapshots
|
||||
```
|
||||
|
||||
After this, re-run the tests to make sure everything is fine:
|
||||
|
||||
```bash
|
||||
make -C build.assets test-helm
|
||||
```
|
||||
|
||||
Commit the updated snapshots along with your changes.
|
||||
@ -0,0 +1,28 @@
|
||||
generate a admin cluster role binding when adminClusterRoleBinding.create is true:
|
||||
1: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: admin-k8s-cluster-group
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: Group
|
||||
name: cluster-admin
|
||||
generate a admin cluster role binding when adminClusterRoleBinding.create is true and adminClusterRoleBinding.name is set:
|
||||
1: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: admin-k8s-cluster-group
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: Group
|
||||
name: my-cluster-admin
|
||||
@ -0,0 +1,57 @@
|
||||
creates a ClusterRole:
|
||||
1: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: RELEASE-NAME
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- users
|
||||
- groups
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- impersonate
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
resources:
|
||||
- selfsubjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
sets ClusterRole labels when specified:
|
||||
1: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: teleport-kube-agent
|
||||
resource: clusterrole
|
||||
name: RELEASE-NAME
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- users
|
||||
- groups
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- impersonate
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
resources:
|
||||
- selfsubjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
@ -0,0 +1,31 @@
|
||||
creates a ClusterRoleBinding:
|
||||
1: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: RELEASE-NAME
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: RELEASE-NAME
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: RELEASE-NAME
|
||||
namespace: NAMESPACE
|
||||
sets ClusterRoleBinding labels when specified:
|
||||
1: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: teleport-kube-agent
|
||||
resource: clusterrolebinding
|
||||
name: RELEASE-NAME
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: RELEASE-NAME
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: RELEASE-NAME
|
||||
namespace: NAMESPACE
|
||||
1130
teleport-kube-agent/tests/__snapshot__/config_test.yaml.snap
Normal file
1130
teleport-kube-agent/tests/__snapshot__/config_test.yaml.snap
Normal file
File diff suppressed because it is too large
Load Diff
1950
teleport-kube-agent/tests/__snapshot__/deployment_test.yaml.snap
Normal file
1950
teleport-kube-agent/tests/__snapshot__/deployment_test.yaml.snap
Normal file
File diff suppressed because it is too large
Load Diff
205
teleport-kube-agent/tests/__snapshot__/job_test.yaml.snap
Normal file
205
teleport-kube-agent/tests/__snapshot__/job_test.yaml.snap
Normal file
@ -0,0 +1,205 @@
|
||||
should create ServiceAccount for post-delete hook by default:
|
||||
1: |
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
helm.sh/hook: post-delete
|
||||
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
|
||||
helm.sh/hook-weight: "-4"
|
||||
name: RELEASE-NAME-delete-hook
|
||||
namespace: NAMESPACE
|
||||
? should inherit ServiceAccount name from values and not create serviceAccount if
|
||||
serviceAccount.create is false and serviceAccount.name is set
|
||||
: 1: |
|
||||
containers:
|
||||
- args:
|
||||
- kube-state
|
||||
- delete
|
||||
command:
|
||||
- teleport
|
||||
env:
|
||||
- name: KUBE_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: RELEASE_NAME
|
||||
value: RELEASE-NAME
|
||||
image: public.ecr.aws/gravitational/teleport-distroless:13.3.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: post-delete-job
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 9807
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: lint-serviceaccount
|
||||
should not create ServiceAccount for post-delete hook if serviceAccount.create is false:
|
||||
1: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
annotations:
|
||||
helm.sh/hook: post-delete
|
||||
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
|
||||
helm.sh/hook-weight: "-3"
|
||||
name: RELEASE-NAME-delete-hook
|
||||
namespace: NAMESPACE
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- delete
|
||||
- list
|
||||
2: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
helm.sh/hook: post-delete
|
||||
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
|
||||
helm.sh/hook-weight: "-2"
|
||||
name: RELEASE-NAME-delete-hook
|
||||
namespace: NAMESPACE
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: RELEASE-NAME-delete-hook
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: RELEASE-NAME-delete-hook
|
||||
namespace: NAMESPACE
|
||||
3: |
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
annotations:
|
||||
helm.sh/hook: post-delete
|
||||
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
|
||||
helm.sh/hook-weight: "-1"
|
||||
name: RELEASE-NAME-delete-hook
|
||||
namespace: NAMESPACE
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: RELEASE-NAME-delete-hook
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- kube-state
|
||||
- delete
|
||||
command:
|
||||
- teleport
|
||||
env:
|
||||
- name: KUBE_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: RELEASE_NAME
|
||||
value: RELEASE-NAME
|
||||
image: public.ecr.aws/gravitational/teleport-distroless:13.3.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: post-delete-job
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 9807
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: lint-serviceaccount
|
||||
should not create ServiceAccount, Role or RoleBinding for post-delete hook if serviceAccount.create and rbac.create are false:
|
||||
1: |
|
||||
containers:
|
||||
- args:
|
||||
- kube-state
|
||||
- delete
|
||||
command:
|
||||
- teleport
|
||||
env:
|
||||
- name: KUBE_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: RELEASE_NAME
|
||||
value: RELEASE-NAME
|
||||
image: public.ecr.aws/gravitational/teleport-distroless:13.3.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: post-delete-job
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 9807
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: lint-serviceaccount
|
||||
should set nodeSelector in post-delete hook:
|
||||
1: |
|
||||
containers:
|
||||
- args:
|
||||
- kube-state
|
||||
- delete
|
||||
command:
|
||||
- teleport
|
||||
env:
|
||||
- name: KUBE_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: RELEASE_NAME
|
||||
value: RELEASE-NAME
|
||||
image: public.ecr.aws/gravitational/teleport-distroless:13.3.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: post-delete-job
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 9807
|
||||
nodeSelector:
|
||||
gravitational.io/k8s-role: node
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: RELEASE-NAME-delete-hook
|
||||
should set securityContext in post-delete hook:
|
||||
1: |
|
||||
containers:
|
||||
- args:
|
||||
- kube-state
|
||||
- delete
|
||||
command:
|
||||
- teleport
|
||||
env:
|
||||
- name: KUBE_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: RELEASE_NAME
|
||||
value: RELEASE-NAME
|
||||
image: public.ecr.aws/gravitational/teleport-distroless:13.3.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: post-delete-job
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 9807
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: RELEASE-NAME-delete-hook
|
||||
30
teleport-kube-agent/tests/__snapshot__/pdb_test.yaml.snap
Normal file
30
teleport-kube-agent/tests/__snapshot__/pdb_test.yaml.snap
Normal file
@ -0,0 +1,30 @@
|
||||
sets PodDisruptionBudget labels when specified:
|
||||
1: |
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
labels:
|
||||
app: RELEASE-NAME
|
||||
app.kubernetes.io/name: teleport-kube-agent
|
||||
resource: poddisruptionbudget
|
||||
name: RELEASE-NAME
|
||||
namespace: NAMESPACE
|
||||
spec:
|
||||
minAvailable: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: RELEASE-NAME
|
||||
should create a PDB when enabled in values (pdb.yaml):
|
||||
1: |
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
labels:
|
||||
app: RELEASE-NAME
|
||||
name: RELEASE-NAME
|
||||
namespace: NAMESPACE
|
||||
spec:
|
||||
minAvailable: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: RELEASE-NAME
|
||||
123
teleport-kube-agent/tests/__snapshot__/psp_test.yaml.snap
Normal file
123
teleport-kube-agent/tests/__snapshot__/psp_test.yaml.snap
Normal file
@ -0,0 +1,123 @@
|
||||
creates a PodSecurityPolicy when enabled in values and supported:
|
||||
1: |
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
|
||||
name: RELEASE-NAME
|
||||
spec:
|
||||
allowPrivilegeEscalation: false
|
||||
fsGroup:
|
||||
ranges:
|
||||
- max: 65535
|
||||
min: 1
|
||||
rule: MustRunAs
|
||||
hostIPC: false
|
||||
hostNetwork: false
|
||||
hostPID: false
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
requiredDropCapabilities:
|
||||
- ALL
|
||||
runAsUser:
|
||||
rule: MustRunAsNonRoot
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
ranges:
|
||||
- max: 65535
|
||||
min: 1
|
||||
rule: MustRunAs
|
||||
volumes:
|
||||
- '*'
|
||||
2: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: RELEASE-NAME-psp
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resourceNames:
|
||||
- RELEASE-NAME
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
3: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: RELEASE-NAME-psp
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: RELEASE-NAME-psp
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: RELEASE-NAME
|
||||
sets PodSecurityPolicy labels when specified:
|
||||
1: |
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
|
||||
labels:
|
||||
app.kubernetes.io/name: teleport-kube-agent
|
||||
resource: podsecuritypolicy
|
||||
name: RELEASE-NAME
|
||||
spec:
|
||||
allowPrivilegeEscalation: false
|
||||
fsGroup:
|
||||
ranges:
|
||||
- max: 65535
|
||||
min: 1
|
||||
rule: MustRunAs
|
||||
hostIPC: false
|
||||
hostNetwork: false
|
||||
hostPID: false
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
requiredDropCapabilities:
|
||||
- ALL
|
||||
runAsUser:
|
||||
rule: MustRunAsNonRoot
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
ranges:
|
||||
- max: 65535
|
||||
min: 1
|
||||
rule: MustRunAs
|
||||
volumes:
|
||||
- '*'
|
||||
2: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: RELEASE-NAME-psp
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resourceNames:
|
||||
- RELEASE-NAME
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
3: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: RELEASE-NAME-psp
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: RELEASE-NAME-psp
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: RELEASE-NAME
|
||||
37
teleport-kube-agent/tests/__snapshot__/role_test.yaml.snap
Normal file
37
teleport-kube-agent/tests/__snapshot__/role_test.yaml.snap
Normal file
@ -0,0 +1,37 @@
|
||||
creates a Role:
|
||||
1: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: RELEASE-NAME
|
||||
namespace: NAMESPACE
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
sets Role labels when specified:
|
||||
1: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: teleport-kube-agent
|
||||
resource: role
|
||||
name: RELEASE-NAME
|
||||
namespace: NAMESPACE
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
@ -0,0 +1,33 @@
|
||||
creates a RoleBinding:
|
||||
1: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: RELEASE-NAME
|
||||
namespace: NAMESPACE
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: RELEASE-NAME
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: RELEASE-NAME
|
||||
namespace: NAMESPACE
|
||||
sets RoleBinding labels when specified:
|
||||
1: |
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: teleport-kube-agent
|
||||
resource: rolebinding
|
||||
name: RELEASE-NAME
|
||||
namespace: NAMESPACE
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: RELEASE-NAME
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: RELEASE-NAME
|
||||
namespace: NAMESPACE
|
||||
82
teleport-kube-agent/tests/__snapshot__/secret_test.yaml.snap
Normal file
82
teleport-kube-agent/tests/__snapshot__/secret_test.yaml.snap
Normal file
@ -0,0 +1,82 @@
|
||||
generate a secret when neither authToken nor joinParams.tokenName are provided:
|
||||
1: |
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: teleport-kube-agent-join-token
|
||||
namespace: NAMESPACE
|
||||
stringData:
|
||||
auth-token: ""
|
||||
type: Opaque
|
||||
generates a secret when authToken is provided:
|
||||
1: |
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: teleport-kube-agent-join-token
|
||||
namespace: NAMESPACE
|
||||
stringData:
|
||||
auth-token: |
|
||||
sample-auth-token-dont-use-this
|
||||
type: Opaque
|
||||
generates a secret when joinParams.tokenName is provided:
|
||||
1: |
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: teleport-kube-agent-join-token
|
||||
namespace: NAMESPACE
|
||||
stringData:
|
||||
auth-token: |
|
||||
sample-auth-token-dont-use-this
|
||||
type: Opaque
|
||||
generates a secret with a custom name when authToken and joinTokenSecret.name are provided:
|
||||
1: |
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: some-other-secret-name
|
||||
namespace: NAMESPACE
|
||||
stringData:
|
||||
auth-token: |
|
||||
sample-auth-token-dont-use-this
|
||||
type: Opaque
|
||||
generates a secret with a custom name when authToken and secretName are provided:
|
||||
1: |
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: some-other-secret-name
|
||||
namespace: NAMESPACE
|
||||
stringData:
|
||||
auth-token: |
|
||||
sample-auth-token-dont-use-this
|
||||
type: Opaque
|
||||
sets Secret annotations when specified:
|
||||
1: |
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
annotations:
|
||||
kubernetes.io/secret: test-annotation
|
||||
kubernetes.io/secret-different: 6
|
||||
name: teleport-kube-agent-join-token
|
||||
namespace: NAMESPACE
|
||||
stringData:
|
||||
auth-token: |
|
||||
auth-token
|
||||
type: Opaque
|
||||
sets Secret labels when specified:
|
||||
1: |
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: teleport-kube-agent
|
||||
resource: secret
|
||||
name: teleport-kube-agent-join-token
|
||||
namespace: NAMESPACE
|
||||
stringData:
|
||||
auth-token: |
|
||||
auth-token
|
||||
type: Opaque
|
||||
@ -0,0 +1,20 @@
|
||||
sets ServiceAccount annotations when specified:
|
||||
1: |
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
kubernetes.io/serviceaccount: test-annotation
|
||||
kubernetes.io/serviceaccount-different: 5
|
||||
name: RELEASE-NAME
|
||||
namespace: NAMESPACE
|
||||
sets ServiceAccount labels when specified:
|
||||
1: |
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: teleport-kube-agent
|
||||
resource: serviceaccount
|
||||
name: RELEASE-NAME
|
||||
namespace: NAMESPACE
|
||||
2490
teleport-kube-agent/tests/__snapshot__/statefulset_test.yaml.snap
Normal file
2490
teleport-kube-agent/tests/__snapshot__/statefulset_test.yaml.snap
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,117 @@
|
||||
sets the affinity:
|
||||
1: |
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: gravitational.io/dedicated
|
||||
operator: In
|
||||
values:
|
||||
- teleport
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- teleport
|
||||
topologyKey: kubernetes.io/hostname
|
||||
weight: 1
|
||||
containers:
|
||||
- args:
|
||||
- --agent-name=RELEASE-NAME
|
||||
- --agent-namespace=NAMESPACE
|
||||
- --base-image=public.ecr.aws/gravitational/teleport-distroless
|
||||
- --version-server=https://my-custom-version-server/v1
|
||||
- --version-channel=custom/preview
|
||||
image: public.ecr.aws/gravitational/teleport-kube-agent-updater:13.3.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 6
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: healthz
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 5
|
||||
name: kube-agent-updater
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
- containerPort: 8081
|
||||
name: healthz
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
failureThreshold: 6
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: healthz
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 5
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 9807
|
||||
serviceAccountName: RELEASE-NAME-updater
|
||||
sets the tolerations:
|
||||
1: |
|
||||
containers:
|
||||
- args:
|
||||
- --agent-name=RELEASE-NAME
|
||||
- --agent-namespace=NAMESPACE
|
||||
- --base-image=public.ecr.aws/gravitational/teleport-distroless
|
||||
- --version-server=https://my-custom-version-server/v1
|
||||
- --version-channel=custom/preview
|
||||
image: public.ecr.aws/gravitational/teleport-kube-agent-updater:13.3.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 6
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: healthz
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 5
|
||||
name: kube-agent-updater
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
- containerPort: 8081
|
||||
name: healthz
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
failureThreshold: 6
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: healthz
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 5
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 9807
|
||||
serviceAccountName: RELEASE-NAME-updater
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
key: dedicated
|
||||
operator: Equal
|
||||
value: teleport
|
||||
- effect: NoSchedule
|
||||
key: dedicated
|
||||
operator: Equal
|
||||
value: teleport
|
||||
@ -0,0 +1,76 @@
|
||||
sets the correct role rules:
|
||||
1: |
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- RELEASE-NAME-shared-state
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
- statefulsets
|
||||
- deployments/status
|
||||
- statefulsets/status
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- apps
|
||||
resourceNames:
|
||||
- RELEASE-NAME
|
||||
resources:
|
||||
- deployments
|
||||
- statefulsets
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resourceNames:
|
||||
- RELEASE-NAME
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
35
teleport-kube-agent/tests/admin_clusterrolebinding_test.yaml
Normal file
35
teleport-kube-agent/tests/admin_clusterrolebinding_test.yaml
Normal file
@ -0,0 +1,35 @@
|
||||
suite: AdminClusterRoleBinding
|
||||
templates:
|
||||
- admin_clusterrolebinding.yaml
|
||||
tests:
|
||||
- it: don't generate a admin cluster role binding when adminClusterRoleBinding.create is false
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 0
|
||||
- it: generate a admin cluster role binding when adminClusterRoleBinding.create is true
|
||||
set:
|
||||
adminClusterRoleBinding:
|
||||
create: true
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ClusterRoleBinding
|
||||
- equal:
|
||||
path: subjects[0].name
|
||||
value: cluster-admin
|
||||
- matchSnapshot: {}
|
||||
- it: generate a admin cluster role binding when adminClusterRoleBinding.create is true and adminClusterRoleBinding.name is set
|
||||
set:
|
||||
adminClusterRoleBinding:
|
||||
create: true
|
||||
name: my-cluster-admin
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ClusterRoleBinding
|
||||
- equal:
|
||||
path: subjects[0].name
|
||||
value: my-cluster-admin
|
||||
- matchSnapshot: {}
|
||||
23
teleport-kube-agent/tests/clusterrole_test.yaml
Normal file
23
teleport-kube-agent/tests/clusterrole_test.yaml
Normal file
@ -0,0 +1,23 @@
|
||||
suite: ClusterRole
|
||||
templates:
|
||||
- clusterrole.yaml
|
||||
tests:
|
||||
- it: creates a ClusterRole
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ClusterRole
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: sets ClusterRole labels when specified
|
||||
values:
|
||||
- ../.lint/extra-labels.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.labels.app\.kubernetes\.io/name
|
||||
value: teleport-kube-agent
|
||||
- equal:
|
||||
path: metadata.labels.resource
|
||||
value: clusterrole
|
||||
- matchSnapshot: {}
|
||||
23
teleport-kube-agent/tests/clusterrolebinding_test.yaml
Normal file
23
teleport-kube-agent/tests/clusterrolebinding_test.yaml
Normal file
@ -0,0 +1,23 @@
|
||||
suite: ClusterRoleBinding
|
||||
templates:
|
||||
- clusterrolebinding.yaml
|
||||
tests:
|
||||
- it: creates a ClusterRoleBinding
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ClusterRoleBinding
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: sets ClusterRoleBinding labels when specified
|
||||
values:
|
||||
- ../.lint/extra-labels.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.labels.app\.kubernetes\.io/name
|
||||
value: teleport-kube-agent
|
||||
- equal:
|
||||
path: metadata.labels.resource
|
||||
value: clusterrolebinding
|
||||
- matchSnapshot: {}
|
||||
291
teleport-kube-agent/tests/config_test.yaml
Normal file
291
teleport-kube-agent/tests/config_test.yaml
Normal file
@ -0,0 +1,291 @@
|
||||
suite: ConfigMap
|
||||
templates:
|
||||
- config.yaml
|
||||
tests:
|
||||
- it: matches snapshot for affinity.yaml
|
||||
values:
|
||||
- ../.lint/affinity.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for all-v6.yaml
|
||||
values:
|
||||
- ../.lint/all-v6.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot and tests for extra-labels.yaml
|
||||
values:
|
||||
- ../.lint/extra-labels.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.labels.app\.kubernetes\.io/name
|
||||
value: teleport-kube-agent
|
||||
- equal:
|
||||
path: metadata.labels.resource
|
||||
value: config
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot and tests for annotations.yaml
|
||||
values:
|
||||
- ../.lint/annotations.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- equal:
|
||||
path: metadata.annotations.kubernetes\.io/config
|
||||
value: test-annotation
|
||||
- equal:
|
||||
path: metadata.annotations.kubernetes\.io/config-different
|
||||
value: 2
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for aws-databases.yaml
|
||||
values:
|
||||
- ../.lint/aws-databases.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for azure-databases.yaml
|
||||
values:
|
||||
- ../.lint/azure-databases.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for backwards-compatibility.yaml
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for ca-pin.yaml
|
||||
values:
|
||||
- ../.lint/ca-pin.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: does not generate a config for clusterrole.yaml
|
||||
values:
|
||||
- ../.lint/clusterrole.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for db.yaml
|
||||
values:
|
||||
- ../.lint/db.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for dynamic-app.yaml
|
||||
values:
|
||||
- ../.lint/dynamic-app.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for dynamic-db.yaml
|
||||
values:
|
||||
- ../.lint/dynamic-db.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for imagepullsecrets.yaml
|
||||
values:
|
||||
- ../.lint/imagepullsecrets.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for initcontainers.yaml
|
||||
values:
|
||||
- ../.lint/initcontainers.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for join-params-iam.yaml
|
||||
values:
|
||||
- ../.lint/join-params-iam.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for join-params-token.yaml
|
||||
values:
|
||||
- ../.lint/join-params-token.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for log-basic.yaml
|
||||
values:
|
||||
- ../.lint/log-basic.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for log-extra.yaml
|
||||
values:
|
||||
- ../.lint/log-extra.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for log-legacy.yaml
|
||||
values:
|
||||
- ../.lint/log-legacy.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for node-selector.yaml
|
||||
values:
|
||||
- ../.lint/node-selector.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for pdb.yaml
|
||||
values:
|
||||
- ../.lint/log-extra.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: does not generate a config for pdb.yaml
|
||||
values:
|
||||
- ../.lint/pdb.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for resources.yaml
|
||||
values:
|
||||
- ../.lint/resources.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for stateful.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for tolerations.yaml
|
||||
values:
|
||||
- ../.lint/tolerations.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for volumes.yaml
|
||||
values:
|
||||
- ../.lint/volumes.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for v10.yaml
|
||||
values:
|
||||
- ../.lint/v10.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: matches snapshot for v11.yaml
|
||||
values:
|
||||
- ../.lint/v11.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
- matchSnapshot: {}
|
||||
687
teleport-kube-agent/tests/deployment_test.yaml
Normal file
687
teleport-kube-agent/tests/deployment_test.yaml
Normal file
@ -0,0 +1,687 @@
|
||||
suite: Deployment
|
||||
templates:
|
||||
- deployment.yaml
|
||||
- config.yaml
|
||||
release:
|
||||
upgrade: true
|
||||
tests:
|
||||
- it: creates a Deployment if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/all-v6.yaml
|
||||
asserts:
|
||||
- isKind:
|
||||
of: Deployment
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
|
||||
- it: sets Deployment labels when specified if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/extra-labels.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.labels.app\.kubernetes\.io/name
|
||||
value: teleport-kube-agent
|
||||
- equal:
|
||||
path: metadata.labels.resource
|
||||
value: deployment
|
||||
- matchSnapshot:
|
||||
path: spec
|
||||
|
||||
- it: sets Pod labels when specified if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/extra-labels.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.metadata.labels.app\.kubernetes\.io/name
|
||||
value: teleport-kube-agent
|
||||
- equal:
|
||||
path: spec.template.metadata.labels.resource
|
||||
value: pod
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: sets Deployment annotations when specified if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/annotations.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.annotations.kubernetes\.io/deployment
|
||||
value: test-annotation
|
||||
- equal:
|
||||
path: metadata.annotations.kubernetes\.io/deployment-different
|
||||
value: 3
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: sets Pod annotations when specified if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/annotations.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.metadata.annotations.kubernetes\.io/pod
|
||||
value: test-annotation
|
||||
- equal:
|
||||
path: spec.template.metadata.annotations.kubernetes\.io/pod-different
|
||||
value: 4
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should have one replica when replicaCount is not set if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.replicas
|
||||
value: 1
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should have multiple replicas when replicaCount is set (using .replicaCount, deprecated) if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
replicaCount: 3
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.replicas
|
||||
value: 3
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should have multiple replicas when replicaCount is set (using highAvailability.replicaCount) if action is Upgrade
|
||||
template: deployment.yaml
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
highAvailability:
|
||||
replicaCount: 3
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.replicas
|
||||
value: 3
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set affinity when set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/affinity.yaml
|
||||
asserts:
|
||||
- isNotNull:
|
||||
path: spec.template.spec.affinity
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set required affinity when highAvailability.requireAntiAffinity is set if action is Upgrade
|
||||
template: deployment.yaml
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
|
||||
highAvailability:
|
||||
replicaCount: 2
|
||||
requireAntiAffinity: true
|
||||
asserts:
|
||||
- isNotNull:
|
||||
path: spec.template.spec.affinity
|
||||
- isNotNull:
|
||||
path: spec.template.spec.affinity.podAntiAffinity
|
||||
- isNotNull:
|
||||
path: spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
|
||||
- isNull:
|
||||
path: spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set preferred affinity when more than one replica is used if action is Upgrade
|
||||
template: deployment.yaml
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
|
||||
highAvailability:
|
||||
replicaCount: 3
|
||||
asserts:
|
||||
- isNotNull:
|
||||
path: spec.template.spec.affinity
|
||||
- isNotNull:
|
||||
path: spec.template.spec.affinity.podAntiAffinity
|
||||
- isNotNull:
|
||||
path: spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution
|
||||
- isNull:
|
||||
path: spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set tolerations when set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/tolerations.yaml
|
||||
asserts:
|
||||
- isNotNull:
|
||||
path: spec.template.spec.tolerations
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set resources when set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/resources.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].resources.limits.cpu
|
||||
value: 2
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].resources.limits.memory
|
||||
value: 4Gi
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].resources.requests.cpu
|
||||
value: 1
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].resources.requests.memory
|
||||
value: 2Gi
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set SecurityContext if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation
|
||||
value: false
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.capabilities
|
||||
value:
|
||||
drop:
|
||||
- all
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem
|
||||
value: true
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.runAsNonRoot
|
||||
value: true
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.runAsUser
|
||||
value: 9807
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set image and tag correctly if action is Upgrade
|
||||
template: deployment.yaml
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
|
||||
teleportVersionOverride: 12.2.1
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].image
|
||||
value: public.ecr.aws/gravitational/teleport-distroless:12.2.1
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should mount extraVolumes and extraVolumeMounts if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/volumes.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].volumeMounts
|
||||
content:
|
||||
mountPath: /path/to/mount
|
||||
name: my-mount
|
||||
- contains:
|
||||
path: spec.template.spec.volumes
|
||||
content:
|
||||
name: my-mount
|
||||
secret:
|
||||
secretName: mySecret
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set imagePullPolicy when set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
|
||||
imagePullPolicy: Always
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].imagePullPolicy
|
||||
value: Always
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set environment when extraEnv set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
|
||||
proxyAddr: helm-lint.example.com
|
||||
authToken: sample-auth-token-dont-use-this
|
||||
kubeClusterName: helm-lint.example.com
|
||||
extraEnv:
|
||||
- name: HTTPS_PROXY
|
||||
value: "http://username:password@my.proxy.host:3128"
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: HTTPS_PROXY
|
||||
value: "http://username:password@my.proxy.host:3128"
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should provision initContainer correctly when set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/initcontainers.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.initContainers[0].args
|
||||
content: "echo test"
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].name
|
||||
value: "teleport-init"
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].image
|
||||
value: "alpine"
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].resources.limits.cpu
|
||||
value: 2
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].resources.limits.memory
|
||||
value: 4Gi
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].resources.requests.cpu
|
||||
value: 1
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].resources.requests.memory
|
||||
value: 2Gi
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should add insecureSkipProxyTLSVerify to args when set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
|
||||
insecureSkipProxyTLSVerify: true
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--insecure"
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should expose diag port if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].ports
|
||||
content:
|
||||
name: diag
|
||||
containerPort: 3000
|
||||
protocol: TCP
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set nodeSelector if set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/node-selector.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.nodeSelector
|
||||
value:
|
||||
gravitational.io/k8s-role: node
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should add emptyDir for data when existingDataVolume is not set if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.volumes
|
||||
content:
|
||||
name: data
|
||||
emptyDir: {}
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].volumeMounts
|
||||
content:
|
||||
mountPath: /var/lib/teleport
|
||||
name: data
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should correctly configure existingDataVolume when set if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/existing-data-volume.yaml
|
||||
asserts:
|
||||
- notContains:
|
||||
path: spec.template.spec.volumes
|
||||
content:
|
||||
name: data
|
||||
emptyDir: {}
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].volumeMounts
|
||||
content:
|
||||
mountPath: /var/lib/teleport
|
||||
name: teleport-kube-agent-data
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should mount tls.existingCASecretName and set environment when set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/existing-tls-secret-with-ca.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.volumes
|
||||
content:
|
||||
name: teleport-tls-ca
|
||||
secret:
|
||||
secretName: helm-lint-existing-tls-secret-ca
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].volumeMounts
|
||||
content:
|
||||
mountPath: /etc/teleport-tls-ca
|
||||
name: teleport-tls-ca
|
||||
readOnly: true
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: SSL_CERT_FILE
|
||||
value: /etc/teleport-tls-ca/ca.pem
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should mount tls.existingCASecretName and set extra environment when set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/existing-tls-secret-with-ca.yaml
|
||||
- ../.lint/extra-env.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.volumes
|
||||
content:
|
||||
name: teleport-tls-ca
|
||||
secret:
|
||||
secretName: helm-lint-existing-tls-secret-ca
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].volumeMounts
|
||||
content:
|
||||
mountPath: /etc/teleport-tls-ca
|
||||
name: teleport-tls-ca
|
||||
readOnly: true
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: SSL_CERT_FILE
|
||||
value: /etc/teleport-tls-ca/ca.pem
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: HTTPS_PROXY
|
||||
value: http://username:password@my.proxy.host:3128
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set priorityClassName when set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/priority-class-name.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.priorityClassName
|
||||
value: teleport-kube-agent
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set not set priorityClassName when not set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
asserts:
|
||||
- isNull:
|
||||
path: spec.template.spec.priorityClassName
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set serviceAccountName when set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/service-account-name.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.serviceAccountName
|
||||
value: teleport-kube-agent-sa
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set default serviceAccountName when not set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.serviceAccountName
|
||||
value: RELEASE-NAME
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set probeTimeoutSeconds when set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/probe-timeout-seconds.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].livenessProbe.timeoutSeconds
|
||||
value: 5
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].readinessProbe.timeoutSeconds
|
||||
value: 5
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set dnsConfig when set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/dnsconfig.yaml
|
||||
asserts:
|
||||
- notEqual:
|
||||
path: spec.template.spec.dnsConfig
|
||||
value: null
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec.dnsConfig
|
||||
|
||||
- it: should set dnsPolicy when set in values if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/dnsconfig.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.dnsPolicy
|
||||
value: ClusterFirstWithHostNet
|
||||
|
||||
- it: should not render Deployment if action is fresh install without storage
|
||||
template: deployment.yaml
|
||||
release:
|
||||
upgrade: false
|
||||
values:
|
||||
- ../.lint/all-v6.yaml
|
||||
set:
|
||||
storage:
|
||||
enabled: false
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 0
|
||||
|
||||
- it: sets by default a container security context if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/initcontainers.yaml
|
||||
asserts:
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec.initContainers[0].securityContext
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec.containers[0].securityContext
|
||||
|
||||
- it: sets no container security context when manually unset and if action is Upgrade
|
||||
template: deployment.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/initcontainers.yaml
|
||||
- ../.lint/security-context-empty.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].securityContext
|
||||
value: null
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext
|
||||
value: null
|
||||
208
teleport-kube-agent/tests/job_test.yaml
Normal file
208
teleport-kube-agent/tests/job_test.yaml
Normal file
@ -0,0 +1,208 @@
|
||||
suite: Job
|
||||
templates:
|
||||
- delete_hook.yaml
|
||||
|
||||
release:
|
||||
upgrade: true
|
||||
tests:
|
||||
- it: should create ServiceAccount, Role, RoleBinding and Job for post-delete hook by default
|
||||
template: delete_hook.yaml
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
assets:
|
||||
- containsDocument:
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
- containsDocument:
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
- containsDocument:
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
- containsDocument:
|
||||
kind: Job
|
||||
apiVersion: batch/v1
|
||||
|
||||
- it: should set securityContext in post-delete hook
|
||||
template: delete_hook.yaml
|
||||
# documentIndex: 0=ServiceAccount 1=Role 2=RoleBinding 3=Job
|
||||
documentIndex: 3
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation
|
||||
value: false
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.capabilities
|
||||
value:
|
||||
drop:
|
||||
- all
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem
|
||||
value: true
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.runAsNonRoot
|
||||
value: true
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.runAsUser
|
||||
value: 9807
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set nodeSelector in post-delete hook
|
||||
template: delete_hook.yaml
|
||||
# documentIndex: 0=ServiceAccount 1=Role 2=RoleBinding 3=Job
|
||||
documentIndex: 3
|
||||
values:
|
||||
- ../.lint/node-selector.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.nodeSelector
|
||||
value:
|
||||
gravitational.io/k8s-role: node
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should create ServiceAccount for post-delete hook by default
|
||||
template: delete_hook.yaml
|
||||
# documentIndex: 0=ServiceAccount 1=Role 2=RoleBinding 3=Job
|
||||
documentIndex: 0
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
asserts:
|
||||
- containsDocument:
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
- equal:
|
||||
path: metadata.name
|
||||
value: RELEASE-NAME-delete-hook
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should create ServiceAccount for post-delete hook with a custom name if serviceAccount.name is set and serviceAccount.create is true
|
||||
template: delete_hook.yaml
|
||||
# documentIndex: 0=ServiceAccount 1=Role 2=RoleBinding 3=Job
|
||||
documentIndex: 0
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
set:
|
||||
serviceAccount:
|
||||
create: true
|
||||
name: lint-serviceaccount
|
||||
asserts:
|
||||
- containsDocument:
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
- equal:
|
||||
path: metadata.name
|
||||
value: lint-serviceaccount
|
||||
|
||||
- it: should create Role for post-delete hook by default
|
||||
template: delete_hook.yaml
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
asserts:
|
||||
- containsDocument:
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
- it: should create RoleBinding for post-delete hook by default
|
||||
template: delete_hook.yaml
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
asserts:
|
||||
- containsDocument:
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
- it: should not create ServiceAccount for post-delete hook if serviceAccount.create is false
|
||||
template: delete_hook.yaml
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
set:
|
||||
serviceAccount:
|
||||
create: false
|
||||
name: lint-serviceaccount
|
||||
asserts:
|
||||
- not: true
|
||||
containsDocument:
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should inherit ServiceAccount name from values and not create serviceAccount if serviceAccount.create is false and serviceAccount.name is set
|
||||
template: delete_hook.yaml
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
set:
|
||||
serviceAccount:
|
||||
create: false
|
||||
name: lint-serviceaccount
|
||||
asserts:
|
||||
- not: true
|
||||
containsDocument:
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
# ServiceAccount is not created in this mode
|
||||
# documentIndex: 0=Role 1=RoleBinding 2=Job
|
||||
- documentIndex: 2
|
||||
equal:
|
||||
path: spec.template.spec.serviceAccountName
|
||||
value: lint-serviceaccount
|
||||
- documentIndex: 2
|
||||
matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should not create Role for post-delete hook if rbac.create is false
|
||||
template: delete_hook.yaml
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
set:
|
||||
rbac:
|
||||
create: false
|
||||
asserts:
|
||||
- not: true
|
||||
containsDocument:
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
- it: should not create RoleBinding for post-delete hook if rbac.create is false
|
||||
template: delete_hook.yaml
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
set:
|
||||
rbac:
|
||||
create: false
|
||||
asserts:
|
||||
- not: true
|
||||
containsDocument:
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
- it: should not create ServiceAccount, Role or RoleBinding for post-delete hook if serviceAccount.create and rbac.create are false
|
||||
template: delete_hook.yaml
|
||||
values:
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
set:
|
||||
rbac:
|
||||
create: false
|
||||
serviceAccount:
|
||||
create: false
|
||||
name: lint-serviceaccount
|
||||
asserts:
|
||||
- not: true
|
||||
containsDocument:
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
- not: true
|
||||
containsDocument:
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
- not: true
|
||||
containsDocument:
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
26
teleport-kube-agent/tests/pdb_test.yaml
Normal file
26
teleport-kube-agent/tests/pdb_test.yaml
Normal file
@ -0,0 +1,26 @@
|
||||
suite: PodDisruptionBudget
|
||||
templates:
|
||||
- pdb.yaml
|
||||
tests:
|
||||
- it: should create a PDB when enabled in values (pdb.yaml)
|
||||
values:
|
||||
- ../.lint/pdb.yaml
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: PodDisruptionBudget
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: sets PodDisruptionBudget labels when specified
|
||||
values:
|
||||
- ../.lint/pdb.yaml
|
||||
- ../.lint/extra-labels.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.labels.app\.kubernetes\.io/name
|
||||
value: teleport-kube-agent
|
||||
- equal:
|
||||
path: metadata.labels.resource
|
||||
value: poddisruptionbudget
|
||||
- matchSnapshot: {}
|
||||
43
teleport-kube-agent/tests/podmonitor_test.yaml
Normal file
43
teleport-kube-agent/tests/podmonitor_test.yaml
Normal file
@ -0,0 +1,43 @@
|
||||
suite: PodMonitor
|
||||
templates:
|
||||
- podmonitor.yaml
|
||||
tests:
|
||||
- it: does not create a PodMonitor by default
|
||||
set:
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 0
|
||||
|
||||
- it: creates a PodMonitor when enabled
|
||||
set:
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
podMonitor:
|
||||
enabled: true
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: PodMonitor
|
||||
|
||||
- it: configures scrape interval if provided
|
||||
set:
|
||||
proxyAddr: proxy.example.com:3080
|
||||
kubeClusterName: test-kube-cluster-name
|
||||
podMonitor:
|
||||
enabled: true
|
||||
interval: 2m
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.podMetricsEndpoints[0].interval
|
||||
value: 2m
|
||||
|
||||
- it: wears additional labels if provided
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.labels.prometheus
|
||||
value: default
|
||||
values:
|
||||
- ../.lint/podmonitor.yaml
|
||||
55
teleport-kube-agent/tests/psp_test.yaml
Normal file
55
teleport-kube-agent/tests/psp_test.yaml
Normal file
@ -0,0 +1,55 @@
|
||||
suite: PodSecurityPolicy
|
||||
templates:
|
||||
- psp.yaml
|
||||
tests:
|
||||
- it: creates a PodSecurityPolicy when enabled in values and supported
|
||||
capabilities:
|
||||
majorVersion: 1
|
||||
minorVersion: 22
|
||||
set:
|
||||
podSecurityPolicy:
|
||||
enabled: true
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 3
|
||||
- documentIndex: 0
|
||||
isKind:
|
||||
of: PodSecurityPolicy
|
||||
- documentIndex: 1
|
||||
isKind:
|
||||
of: Role
|
||||
- documentIndex: 2
|
||||
isKind:
|
||||
of: RoleBinding
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: sets PodSecurityPolicy labels when specified
|
||||
capabilities:
|
||||
majorVersion: 1
|
||||
minorVersion: 22
|
||||
values:
|
||||
- ../.lint/extra-labels.yaml
|
||||
set:
|
||||
podSecurityPolicy:
|
||||
enabled: true
|
||||
asserts:
|
||||
- documentIndex: 0
|
||||
equal:
|
||||
path: metadata.labels.app\.kubernetes\.io/name
|
||||
value: teleport-kube-agent
|
||||
- documentIndex: 0
|
||||
equal:
|
||||
path: metadata.labels.resource
|
||||
value: podsecuritypolicy
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: does not create a PodSecurityPolicy when enabled in values but not supported
|
||||
capabilities:
|
||||
majorVersion: 1
|
||||
minorVersion: 25
|
||||
set:
|
||||
podSecurityPolicy:
|
||||
enabled: true
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 0
|
||||
34
teleport-kube-agent/tests/role_test.yaml
Normal file
34
teleport-kube-agent/tests/role_test.yaml
Normal file
@ -0,0 +1,34 @@
|
||||
suite: Role
|
||||
templates:
|
||||
- role.yaml
|
||||
tests:
|
||||
- it: Create a Role when upgrading
|
||||
release:
|
||||
isupgrade: true
|
||||
set:
|
||||
unitTestUpgrade: true
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: Role
|
||||
|
||||
- it: creates a Role
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: Role
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: sets Role labels when specified
|
||||
values:
|
||||
- ../.lint/extra-labels.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.labels.app\.kubernetes\.io/name
|
||||
value: teleport-kube-agent
|
||||
- equal:
|
||||
path: metadata.labels.resource
|
||||
value: role
|
||||
- matchSnapshot: {}
|
||||
34
teleport-kube-agent/tests/rolebinding_test.yaml
Normal file
34
teleport-kube-agent/tests/rolebinding_test.yaml
Normal file
@ -0,0 +1,34 @@
|
||||
suite: RoleBinding
|
||||
templates:
|
||||
- rolebinding.yaml
|
||||
tests:
|
||||
- it: Create a RoleBinding when upgrading
|
||||
release:
|
||||
isupgrade: true
|
||||
set:
|
||||
unitTestUpgrade: true
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: RoleBinding
|
||||
|
||||
- it: creates a RoleBinding
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: RoleBinding
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: sets RoleBinding labels when specified
|
||||
values:
|
||||
- ../.lint/extra-labels.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.labels.app\.kubernetes\.io/name
|
||||
value: teleport-kube-agent
|
||||
- equal:
|
||||
path: metadata.labels.resource
|
||||
value: rolebinding
|
||||
- matchSnapshot: {}
|
||||
101
teleport-kube-agent/tests/secret_test.yaml
Normal file
101
teleport-kube-agent/tests/secret_test.yaml
Normal file
@ -0,0 +1,101 @@
|
||||
suite: Secret
|
||||
templates:
|
||||
- secret.yaml
|
||||
tests:
|
||||
- it: generate a secret when neither authToken nor joinParams.tokenName are provided
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: Secret
|
||||
- matchSnapshot: {}
|
||||
- it: generates a secret when authToken is provided
|
||||
set:
|
||||
authToken: sample-auth-token-dont-use-this
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: Secret
|
||||
- equal:
|
||||
path: metadata.name
|
||||
value: teleport-kube-agent-join-token
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: generates a secret when joinParams.tokenName is provided
|
||||
set:
|
||||
joinParams:
|
||||
tokenName: sample-auth-token-dont-use-this
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: Secret
|
||||
- equal:
|
||||
path: metadata.name
|
||||
value: teleport-kube-agent-join-token
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: generates a secret with a custom name when authToken and secretName are provided
|
||||
set:
|
||||
authToken: sample-auth-token-dont-use-this
|
||||
secretName: some-other-secret-name
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: Secret
|
||||
- equal:
|
||||
path: metadata.name
|
||||
value: some-other-secret-name
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: generates a secret with a custom name when authToken and joinTokenSecret.name are provided
|
||||
set:
|
||||
authToken: sample-auth-token-dont-use-this
|
||||
joinTokenSecret:
|
||||
name: some-other-secret-name
|
||||
create: true
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: Secret
|
||||
- equal:
|
||||
path: metadata.name
|
||||
value: some-other-secret-name
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: does not create a secret when joinTokenSecret.create is false
|
||||
set:
|
||||
authToken: sample-auth-token-dont-use-this
|
||||
joinTokenSecret:
|
||||
name: some-other-secret-name
|
||||
create: false
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 0
|
||||
|
||||
- it: sets Secret labels when specified
|
||||
values:
|
||||
- ../.lint/extra-labels.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.labels.app\.kubernetes\.io/name
|
||||
value: teleport-kube-agent
|
||||
- equal:
|
||||
path: metadata.labels.resource
|
||||
value: secret
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: sets Secret annotations when specified
|
||||
values:
|
||||
- ../.lint/annotations.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.annotations.kubernetes\.io/secret
|
||||
value: test-annotation
|
||||
- equal:
|
||||
path: metadata.annotations.kubernetes\.io/secret-different
|
||||
value: 6
|
||||
- matchSnapshot: {}
|
||||
33
teleport-kube-agent/tests/serviceaccount_test.yaml
Normal file
33
teleport-kube-agent/tests/serviceaccount_test.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
suite: ServiceAccount
|
||||
templates:
|
||||
- serviceaccount.yaml
|
||||
tests:
|
||||
- it: sets ServiceAccount labels when specified
|
||||
values:
|
||||
- ../.lint/extra-labels.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.labels.app\.kubernetes\.io/name
|
||||
value: teleport-kube-agent
|
||||
- equal:
|
||||
path: metadata.labels.resource
|
||||
value: serviceaccount
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: sets ServiceAccount annotations when specified
|
||||
values:
|
||||
- ../.lint/annotations.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.annotations.kubernetes\.io/serviceaccount
|
||||
value: test-annotation
|
||||
- equal:
|
||||
path: metadata.annotations.kubernetes\.io/serviceaccount-different
|
||||
value: 5
|
||||
- matchSnapshot: {}
|
||||
- it: skips ServiceAccount creation
|
||||
set:
|
||||
serviceAccount.create: false
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 0
|
||||
721
teleport-kube-agent/tests/statefulset_test.yaml
Normal file
721
teleport-kube-agent/tests/statefulset_test.yaml
Normal file
@ -0,0 +1,721 @@
|
||||
suite: StatefulSet
|
||||
templates:
|
||||
- statefulset.yaml
|
||||
- config.yaml
|
||||
tests:
|
||||
- it: creates a StatefulSet
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
elease:
|
||||
isupgrade: true
|
||||
asserts:
|
||||
- isKind:
|
||||
of: StatefulSet
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
|
||||
- it: sets StatefulSet labels when specified
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
- ../.lint/extra-labels.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.labels.app\.kubernetes\.io/name
|
||||
value: teleport-kube-agent
|
||||
- equal:
|
||||
path: metadata.labels.resource
|
||||
value: deployment
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: sets Pod labels when specified
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
- ../.lint/extra-labels.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.metadata.labels.app\.kubernetes\.io/name
|
||||
value: teleport-kube-agent
|
||||
- equal:
|
||||
path: spec.template.metadata.labels.resource
|
||||
value: pod
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: sets Pod annotations when specified
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/annotations.yaml
|
||||
- ../.lint/stateful.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.metadata.annotations.kubernetes\.io/pod
|
||||
value: test-annotation
|
||||
- equal:
|
||||
path: spec.template.metadata.annotations.kubernetes\.io/pod-different
|
||||
value: 4
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should have one replica when replicaCount is not set
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.replicas
|
||||
value: 1
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should have multiple replicas when replicaCount is set (using .replicaCount, deprecated)
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
set:
|
||||
replicaCount: 3
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.replicas
|
||||
value: 3
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should have multiple replicas when replicaCount is set (using highAvailability.replicaCount)
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
set:
|
||||
highAvailability:
|
||||
replicaCount: 3
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.replicas
|
||||
value: 3
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set affinity when set in values
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
- ../.lint/affinity.yaml
|
||||
asserts:
|
||||
- isNotNull:
|
||||
path: spec.template.spec.affinity
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set required affinity when highAvailability.requireAntiAffinity is set
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
set:
|
||||
highAvailability:
|
||||
replicaCount: 2
|
||||
requireAntiAffinity: true
|
||||
asserts:
|
||||
- isNotNull:
|
||||
path: spec.template.spec.affinity
|
||||
- isNotNull:
|
||||
path: spec.template.spec.affinity.podAntiAffinity
|
||||
- isNotNull:
|
||||
path: spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
|
||||
- isNull:
|
||||
path: spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set preferred affinity when more than one replica is used
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
set:
|
||||
highAvailability:
|
||||
replicaCount: 3
|
||||
asserts:
|
||||
- isNotNull:
|
||||
path: spec.template.spec.affinity
|
||||
- isNotNull:
|
||||
path: spec.template.spec.affinity.podAntiAffinity
|
||||
- isNotNull:
|
||||
path: spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution
|
||||
- isNull:
|
||||
path: spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set tolerations when set in values
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
- ../.lint/tolerations.yaml
|
||||
asserts:
|
||||
- isNotNull:
|
||||
path: spec.template.spec.tolerations
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set resources when set in values
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
- ../.lint/resources.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].resources.limits.cpu
|
||||
value: 2
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].resources.limits.memory
|
||||
value: 4Gi
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].resources.requests.cpu
|
||||
value: 1
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].resources.requests.memory
|
||||
value: 2Gi
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set SecurityContext
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation
|
||||
value: false
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.capabilities
|
||||
value:
|
||||
drop:
|
||||
- all
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem
|
||||
value: true
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.runAsNonRoot
|
||||
value: true
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.runAsUser
|
||||
value: 9807
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set image and tag correctly
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
set:
|
||||
teleportVersionOverride: 12.2.1
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].image
|
||||
value: public.ecr.aws/gravitational/teleport-distroless:12.2.1
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should mount extraVolumes and extraVolumeMounts
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
- ../.lint/volumes.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].volumeMounts
|
||||
content:
|
||||
mountPath: /path/to/mount
|
||||
name: my-mount
|
||||
- contains:
|
||||
path: spec.template.spec.volumes
|
||||
content:
|
||||
name: my-mount
|
||||
secret:
|
||||
secretName: mySecret
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should mount auth token if token is provided
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/join-params-token.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].volumeMounts
|
||||
content:
|
||||
mountPath: /etc/teleport-secrets
|
||||
name: auth-token
|
||||
readOnly: true
|
||||
- contains:
|
||||
path: spec.template.spec.volumes
|
||||
content:
|
||||
name: auth-token
|
||||
secret:
|
||||
secretName: teleport-kube-agent-join-token
|
||||
|
||||
- it: should set imagePullPolicy when set in values
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
set:
|
||||
imagePullPolicy: Always
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].imagePullPolicy
|
||||
value: Always
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set environment when extraEnv set in values
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
set:
|
||||
extraEnv:
|
||||
- name: HTTPS_PROXY
|
||||
value: "http://username:password@my.proxy.host:3128"
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: TELEPORT_REPLICA_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: KUBE_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: HTTPS_PROXY
|
||||
value: "http://username:password@my.proxy.host:3128"
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should provision initContainer correctly when set in values
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
- ../.lint/initcontainers.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.initContainers[0].args
|
||||
content: "echo test"
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].name
|
||||
value: "teleport-init"
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].image
|
||||
value: "alpine"
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].resources.limits.cpu
|
||||
value: 2
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].resources.limits.memory
|
||||
value: 4Gi
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].resources.requests.cpu
|
||||
value: 1
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].resources.requests.memory
|
||||
value: 2Gi
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should add insecureSkipProxyTLSVerify to args when set in values
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
set:
|
||||
insecureSkipProxyTLSVerify: true
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--insecure"
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should expose diag port
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].ports
|
||||
content:
|
||||
name: diag
|
||||
containerPort: 3000
|
||||
protocol: TCP
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set nodeSelector if set in values
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
- ../.lint/node-selector.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.nodeSelector
|
||||
value:
|
||||
gravitational.io/k8s-role: node
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should not add emptyDir for data when using StatefulSet
|
||||
template: statefulset.yaml
|
||||
release:
|
||||
isupgrade: true
|
||||
set:
|
||||
unitTestUpgrade: true
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
asserts:
|
||||
- notContains:
|
||||
path: spec.template.spec.volumes
|
||||
content:
|
||||
name: data
|
||||
emptyDir: {}
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should add volumeMount for data volume when using StatefulSet
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
asserts:
|
||||
- notContains:
|
||||
path: spec.template.spec.containers[0].volumeMounts
|
||||
content:
|
||||
name: data
|
||||
mountPath: RELEASE-NAME-teleport-data
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should add volumeClaimTemplate for data volume when using StatefulSet and action is an Upgrade
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
release:
|
||||
isupgrade: true
|
||||
asserts:
|
||||
- isNotNull:
|
||||
path: spec.volumeClaimTemplates[0].spec
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should add volumeClaimTemplate for data volume when using StatefulSet and is Fresh Install
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
release:
|
||||
isupgrade: false
|
||||
asserts:
|
||||
- isNotNull:
|
||||
path: spec.volumeClaimTemplates
|
||||
- matchSnapshot: {}
|
||||
|
||||
- it: should set storage.storageClassName when set in values and action is an Upgrade
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
release:
|
||||
isupgrade: true
|
||||
set:
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
|
||||
storage:
|
||||
storageClassName: helm-lint-storage-class
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.volumeClaimTemplates[0].spec.storageClassName
|
||||
value: helm-lint-storage-class
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set storage.requests when set in values and action is an Upgrade
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
release:
|
||||
isupgrade: true
|
||||
set:
|
||||
storage:
|
||||
requests: 256Mi
|
||||
# unit test does not support lookup functions, so to test the behavior we use this undoc value
|
||||
# https://github.com/helm/helm/issues/8137
|
||||
unitTestUpgrade: true
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.volumeClaimTemplates[0].spec.resources.requests.storage
|
||||
value: 256Mi
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should mount tls.existingCASecretName and set environment when set in values
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/existing-tls-secret-with-ca.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.volumes
|
||||
content:
|
||||
name: teleport-tls-ca
|
||||
secret:
|
||||
secretName: helm-lint-existing-tls-secret-ca
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].volumeMounts
|
||||
content:
|
||||
mountPath: /etc/teleport-tls-ca
|
||||
name: teleport-tls-ca
|
||||
readOnly: true
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: SSL_CERT_FILE
|
||||
value: /etc/teleport-tls-ca/ca.pem
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should mount tls.existingCASecretName and set extra environment when set in values
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/existing-tls-secret-with-ca.yaml
|
||||
- ../.lint/extra-env.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.volumes
|
||||
content:
|
||||
name: teleport-tls-ca
|
||||
secret:
|
||||
secretName: helm-lint-existing-tls-secret-ca
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].volumeMounts
|
||||
content:
|
||||
mountPath: /etc/teleport-tls-ca
|
||||
name: teleport-tls-ca
|
||||
readOnly: true
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: SSL_CERT_FILE
|
||||
value: /etc/teleport-tls-ca/ca.pem
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: HTTPS_PROXY
|
||||
value: http://username:password@my.proxy.host:3128
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
|
||||
- it: should set serviceAccountName when set in values
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
- ../.lint/service-account-name.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.serviceAccountName
|
||||
value: teleport-kube-agent-sa
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set default serviceAccountName when not set in values
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
- ../.lint/backwards-compatibility.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.serviceAccountName
|
||||
value: RELEASE-NAME
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set probeTimeoutSeconds when set in values
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
- ../.lint/probe-timeout-seconds.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].livenessProbe.timeoutSeconds
|
||||
value: 5
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].readinessProbe.timeoutSeconds
|
||||
value: 5
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should set dnsConfig when set in values
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
- ../.lint/dnsconfig.yaml
|
||||
asserts:
|
||||
- notEqual:
|
||||
path: spec.template.spec.dnsConfig
|
||||
value: null
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec.dnsConfig
|
||||
|
||||
- it: should set dnsPolicy when set in values
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
- ../.lint/dnsconfig.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.dnsPolicy
|
||||
value: ClusterFirstWithHostNet
|
||||
|
||||
- it: should install Statefulset when storage is disabled and mode is a Fresh Install
|
||||
template: statefulset.yaml
|
||||
release:
|
||||
isupgrade: false
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
set:
|
||||
storage:
|
||||
enabled: false
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: TELEPORT_REPLICA_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: KUBE_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- notContains:
|
||||
path: spec.template.spec.containers[0].volumeMounts
|
||||
content:
|
||||
name: data
|
||||
mountPath: RELEASE-NAME-teleport-data
|
||||
- isNull:
|
||||
path: spec.volumeClaimTemplates
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: should generate Statefulset when storage is disabled and mode is a Upgrade
|
||||
template: statefulset.yaml
|
||||
release:
|
||||
isupgrade: true
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
set:
|
||||
unitTestUpgrade: false
|
||||
storage:
|
||||
enabled: false
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: TELEPORT_REPLICA_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: KUBE_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- notContains:
|
||||
path: spec.template.spec.containers[0].volumeMounts
|
||||
content:
|
||||
name: data
|
||||
mountPath: RELEASE-NAME-teleport-data
|
||||
- isNull:
|
||||
path: spec.volumeClaimTemplates
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
|
||||
- it: sets by default a container security context
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/initcontainers.yaml
|
||||
asserts:
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec.initContainers[0].securityContext
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec.containers[0].securityContext
|
||||
|
||||
- it: sets no container security context when manually unset
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/initcontainers.yaml
|
||||
- ../.lint/security-context-empty.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].securityContext
|
||||
value: null
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext
|
||||
value: null
|
||||
|
||||
- it: should enable maintenance schedule export when updater is enabled
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/existing-tls-secret-with-ca.yaml
|
||||
- ../.lint/updater.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: TELEPORT_EXT_UPGRADER
|
||||
value: kube
|
||||
|
||||
- it: should set the installation method environment variable
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT
|
||||
value: "true"
|
||||
|
||||
- it: should set the hostAliases when specified
|
||||
template: statefulset.yaml
|
||||
values:
|
||||
- ../.lint/stateful.yaml
|
||||
- ../.lint/host-aliases.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.hostAliases
|
||||
value:
|
||||
- ip: "127.0.0.1"
|
||||
hostnames:
|
||||
- "foo.local"
|
||||
- "bar.local"
|
||||
- ip: "10.1.2.3"
|
||||
hostnames:
|
||||
- "foo.remote"
|
||||
- "bar.remote"
|
||||
227
teleport-kube-agent/tests/updater_deployment_test.yaml
Normal file
227
teleport-kube-agent/tests/updater_deployment_test.yaml
Normal file
@ -0,0 +1,227 @@
|
||||
suite: Updater Deployment
|
||||
templates:
|
||||
- updater/deployment.yaml
|
||||
tests:
|
||||
#
|
||||
# Basic tests
|
||||
#
|
||||
- it: does not create a Deployment when updater.enabled is false (default)
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 0
|
||||
- it: creates a Deployment when updater.enabled is true
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
asserts:
|
||||
- containsDocument:
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
name: RELEASE-NAME-updater
|
||||
namespace: NAMESPACE
|
||||
#
|
||||
# Testing the agent configuration
|
||||
#
|
||||
- it: sets the updater base image
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
set:
|
||||
image: repo.example.com/gravitational/teleport-distroless
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--base-image=repo.example.com/gravitational/teleport-distroless"
|
||||
- it: sets the updater base entreprise image
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
set:
|
||||
enterprise: true
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--base-image=public.ecr.aws/gravitational/teleport-ent-distroless"
|
||||
- it: sets the updater agent name
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
release:
|
||||
name: my-release
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--agent-name=my-release"
|
||||
- it: sets the updater agent namespace
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
release:
|
||||
namespace: my-namespace
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--agent-namespace=my-namespace"
|
||||
- it: sets the updater version server
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--version-server=https://my-custom-version-server/v1"
|
||||
- it: sets the updater release channel
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--version-channel=custom/preview"
|
||||
#
|
||||
# Kubernetes-related tests
|
||||
#
|
||||
- it: sets the deployment annotations
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
- ../.lint/annotations.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.annotations.kubernetes\.io/deployment
|
||||
value: test-annotation
|
||||
- equal:
|
||||
path: metadata.annotations.kubernetes\.io/deployment-different
|
||||
value: 3
|
||||
- it: sets the pod annotations
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
- ../.lint/annotations.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.metadata.annotations.kubernetes\.io/pod
|
||||
value: test-annotation
|
||||
- equal:
|
||||
path: spec.template.metadata.annotations.kubernetes\.io/pod-different
|
||||
value: 4
|
||||
- it: sets the affinity
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
- ../.lint/affinity.yaml
|
||||
asserts:
|
||||
- isNotNull:
|
||||
path: spec.template.spec.affinity
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
- it: sets the tolerations
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
- ../.lint/tolerations.yaml
|
||||
asserts:
|
||||
- isNotNull:
|
||||
path: spec.template.spec.tolerations
|
||||
- matchSnapshot:
|
||||
path: spec.template.spec
|
||||
- it: sets the imagePullSecrets
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
- ../.lint/imagepullsecrets.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.imagePullSecrets[0].name
|
||||
value: myRegistryKeySecretName
|
||||
- it: sets the nodeSelector
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
- ../.lint/node-selector.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.nodeSelector
|
||||
value:
|
||||
gravitational.io/k8s-role: node
|
||||
- it: sets the updater container image and version
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
set:
|
||||
teleportVersionOverride: 12.2.1
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].image
|
||||
value: public.ecr.aws/gravitational/teleport-kube-agent-updater:12.2.1
|
||||
- it: sets the updater container imagePullPolicy
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
- ../.lint/image-pull-policy.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].imagePullPolicy
|
||||
value: Always
|
||||
- it: mounts the tls CA if provided and set the env var
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
- ../.lint/existing-tls-secret-with-ca.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.volumes
|
||||
content:
|
||||
name: teleport-tls-ca
|
||||
secret:
|
||||
secretName: helm-lint-existing-tls-secret-ca
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].volumeMounts
|
||||
content:
|
||||
mountPath: /etc/teleport-tls-ca
|
||||
name: teleport-tls-ca
|
||||
readOnly: true
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: SSL_CERT_FILE
|
||||
value: /etc/teleport-tls-ca/ca.pem
|
||||
- it: sets the updater container extraEnv
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
- ../.lint/extra-env.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: HTTPS_PROXY
|
||||
value: http://username:password@my.proxy.host:3128
|
||||
- it: sets the pod resources
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
- ../.lint/resources.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].resources.limits.cpu
|
||||
value: 2
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].resources.limits.memory
|
||||
value: 4Gi
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].resources.requests.cpu
|
||||
value: 1
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].resources.requests.memory
|
||||
value: 2Gi
|
||||
- it: sets the pod priorityClass
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
- ../.lint/priority-class-name.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.priorityClassName
|
||||
value: teleport-kube-agent
|
||||
- it: sets the pod service-account
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
- ../.lint/service-account-name.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.serviceAccountName
|
||||
value: teleport-kube-agent-sa-updater
|
||||
- it: sets the pod service-account (override)
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
- ../.lint/service-account-name.yaml
|
||||
set:
|
||||
updater:
|
||||
serviceAccount:
|
||||
name: distinct-updater-sa
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.serviceAccountName
|
||||
value: distinct-updater-sa
|
||||
39
teleport-kube-agent/tests/updater_role_test.yaml
Normal file
39
teleport-kube-agent/tests/updater_role_test.yaml
Normal file
@ -0,0 +1,39 @@
|
||||
suite: Updater Role
|
||||
templates:
|
||||
- updater/role.yaml
|
||||
tests:
|
||||
#
|
||||
# Basic tests
|
||||
#
|
||||
- it: does not create a Role when updater.enabled is false (default)
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 0
|
||||
- it: creates a Role when updater.enabled is true
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
asserts:
|
||||
- containsDocument:
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
name: RELEASE-NAME-updater
|
||||
namespace: NAMESPACE
|
||||
- it: does not create a Role when updater.enabled is true but rbac creation is disabled
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
set:
|
||||
rbac:
|
||||
create: false
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 0
|
||||
|
||||
#
|
||||
# Catch-all content test
|
||||
#
|
||||
- it: sets the correct role rules
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
asserts:
|
||||
- matchSnapshot:
|
||||
path: rules
|
||||
49
teleport-kube-agent/tests/updater_rolebinding_test.yaml
Normal file
49
teleport-kube-agent/tests/updater_rolebinding_test.yaml
Normal file
@ -0,0 +1,49 @@
|
||||
suite: Updater Role
|
||||
templates:
|
||||
- updater/rolebinding.yaml
|
||||
tests:
|
||||
#
|
||||
# Basic tests
|
||||
#
|
||||
- it: does not create a RoleBinding when updater.enabled is false (default)
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 0
|
||||
- it: creates a RoleBinding when updater.enabled is true
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
asserts:
|
||||
- containsDocument:
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
name: RELEASE-NAME-updater
|
||||
namespace: NAMESPACE
|
||||
- it: does not create a RoleBinding when updater.enabled is true but rbac creation is disabled
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
set:
|
||||
rbac:
|
||||
create: false
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 0
|
||||
|
||||
#
|
||||
# Catch-all content test
|
||||
#
|
||||
- it: sets the correct rolebinding content
|
||||
values:
|
||||
- ../.lint/updater.yaml
|
||||
asserts:
|
||||
- equal:
|
||||
path: roleRef
|
||||
value:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: RELEASE-NAME-updater
|
||||
- equal:
|
||||
path: subjects
|
||||
value:
|
||||
- kind: ServiceAccount
|
||||
name: RELEASE-NAME-updater
|
||||
namespace: NAMESPACE
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user