diff --git a/10_delete_kube-dns.sh b/10_delete_kube-dns.sh new file mode 100755 index 0000000..681fdfe --- /dev/null +++ b/10_delete_kube-dns.sh @@ -0,0 +1,10 @@ +#!/bin/bash +## +## Script to delete the kube-dns components (in case it goes wrong) +## +POD=`kubectl get pods -n kube-system | grep kube | awk '{ print $1 }'` +kubectl delete pods $POD -n kube-system +kubectl delete configmaps kube-dns -n kube-system +kubectl delete deployments kube-dns -n kube-system +kubectl delete serviceaccounts kube-dns -n kube-system +kubectl delete services kube-dns -n kube-system diff --git a/11_delete_all.sh b/11_delete_all.sh new file mode 100755 index 0000000..998eadc --- /dev/null +++ b/11_delete_all.sh @@ -0,0 +1,53 @@ +#!/bin/bash +## +## Script to completely delete the GCE infrastructure +## +echo "This script will completely remove the Kubernetes items that have been created" + +. .gce_kubernetes.config + +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT + +gcloud compute forwarding-rules delete kubernetes-forwarding-rule --region $(gcloud config get-value compute/region) --quiet +echo "Forwarding rule deleted" + +gcloud compute target-pools delete kubernetes-target-pool --quiet +echo "Deleted the target pool" + +gcloud compute firewall-rules delete $KUBE_NETWORK-allow-health-check --quiet +echo "Deleted the firewall rule to check health" + +gcloud compute http-health-checks delete kubernetes --quiet +echo "Deleted the load balancer health check" + +KUBE_CONTROLLERS=$((KUBE_CONTROLLERS-1)) +KUBE_WORKERS=$((KUBE_WORKERS-1)) +for ((i=0; i<=$KUBE_CONTROLLERS; i++)); do + gcloud compute instances delete controller-${i} --quiet +done +for ((i=0; i<=$KUBE_WORKERS; i++)); do + gcloud compute instances delete worker-${i} --quiet +done +echo "Compute instances deleted" + +gcloud compute addresses delete $KUBE_NETWORK --region $(gcloud config get-value compute/region) --quiet +echo "Deleted the reserved static address" + +gcloud compute firewall-rules delete $KUBE_EXT_FW_NAME --quiet +gcloud compute firewall-rules delete $KUBE_INT_FW_NAME --quiet +gcloud compute firewall-rules delete $KUBE_NETWORK-allow-nginx-service --quiet +echo "Internal and external firewall rules deleted" + +gcloud compute routes delete kubernetes-route-10-200-0-0-24 --quiet +gcloud compute routes delete kubernetes-route-10-200-1-0-24 --quiet +gcloud compute networks subnets delete $KUBE_SUBNET --quiet +gcloud compute networks delete $KUBE_NETWORK --quiet +echo "Network, subnet, and routes deleted - all items created should now be deleted" + +rm -rf certs-dir +echo "SSL keys, certificates, and kubeconfigs deleted" + +rm -f ~/bin/cfssl* +echo "CloudFlare SSL tools deleted" diff --git a/12_uninstall_etcd.sh b/12_uninstall_etcd.sh new file mode 100755 index 0000000..25898c1 --- /dev/null +++ b/12_uninstall_etcd.sh @@ -0,0 +1,19 @@ +#!/bin/bash +## +## Script to uninstall etcd from controllers +## +. .gce_kubernetes.config + +echo "######################################" +echo "Set the gcloud compute region and zone" +echo "######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" +echo "" + +KUBE_CONTROLLERS=$((KUBE_CONTROLLERS-1)) +for (( i=0; i<=$KUBE_CONTROLLERS; i++ )); do + gcloud compute ssh controller-${i} -- 'sudo yum remove -y etcd; sudo rm -rf /var/lib/etcd /etc/etcd' +done diff --git a/3_install_configure_kubernetes.sh b/3_install_configure_kubernetes.sh new file mode 100755 index 0000000..11addcd --- /dev/null +++ b/3_install_configure_kubernetes.sh @@ -0,0 +1,24 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## +echo "###################################### +Set the gcloud compute region and zone +######################################" +gcloud config set compute/region us-west1 +gcloud config set compute/zone us-west1-c +gcloud config set project kubernetescentos-205702 +echo "Compute region and zone set" + +for i in 0 1; do + gcloud compute scp controller_setup.sh controller-${i}: +done + +gcloud compute scp controller-0/* controller-0: +gcloud compute scp controller-1/* controller-1: + +echo "Controller setup scripts copied across" +for i in 0 1; do + gcloud compute ssh controller-${i} -- ~/controller_setup.sh +done + diff --git a/5.1_worker_kubelet_setup.sh b/5.1_worker_kubelet_setup.sh new file mode 100755 index 0000000..532df66 --- /dev/null +++ b/5.1_worker_kubelet_setup.sh @@ -0,0 +1,19 @@ +#!/bin/bash +. .gce_kubernetes.config +echo "###################################### +Set the gcloud compute region and zone +######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" + +KUBE_WORKERS=$((KUBE_WORKERS-1)) + +for ((i=0; i<=$KUBE_WORKERS; i++)); do + echo "KUBE_POD_ADDR=$KUBE_POD_ADDR.$i.0" > .worker_variables_$i + echo "KUBE_NODE_POD_PREFIX=$KUBE_NODE_POD_PREFIX" >> .worker_variables_$i + gcloud compute scp .worker_variables_$i worker-${i}:.worker_variables + gcloud compute scp worker_kubelet_setup.sh worker-${i}: + gcloud compute ssh worker-${i} -- sudo ~/worker_kubelet_setup.sh +done diff --git a/5.2_worker_kube-proxy_setup.sh b/5.2_worker_kube-proxy_setup.sh new file mode 100755 index 0000000..7488df4 --- /dev/null +++ b/5.2_worker_kube-proxy_setup.sh @@ -0,0 +1,21 @@ +. .gce_kubernetes.config +echo "###################################### +Set the gcloud compute region and zone +######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" + +KUBE_WORKERS=$((KUBE_WORKERS-1)) + +sed "s|KUBE_POD_CIDR|$KUBE_POD_CIDR|g" worker_kube-proxy_setup.sh.template > worker_kube-proxy_setup.sh +chmod 755 worker_kube-proxy_setup.sh +for ((i=0; i<=$KUBE_WORKERS; i++)); do + gcloud compute scp worker_kube-proxy_setup.sh worker-${i}: +done + +echo "Worker kube-proxy setup scripts copied across" +for ((i=0; i<=$KUBE_WORKERS; i++)); do + gcloud compute ssh worker-${i} -- sudo ~/worker_kube-proxy_setup.sh +done diff --git a/5_worker_node_setup.sh b/5_worker_node_setup.sh new file mode 100755 index 0000000..c2579dd --- /dev/null +++ b/5_worker_node_setup.sh @@ -0,0 +1,19 @@ +echo "###################################### +Set the gcloud compute region and zone +######################################" +gcloud config set compute/region us-west1 +gcloud config set compute/zone us-west1-c +gcloud config set-value project kubernetescentos-205702 +echo "Compute region and zone set" + +for i in 0 1; do + gcloud scp worker_setup.sh worker-${i}: +done + +gcloud scp worker-0/* worker-0: +gcloud scp worker-1/* worker-1: + +echo "Controller setup scripts copied across" +for i in 0 1; do + gcloud compute ssh worker-${i} -- worker_setup.sh +done diff --git a/6_client_admin_config.sh b/6_client_admin_config.sh new file mode 100755 index 0000000..58bc617 --- /dev/null +++ b/6_client_admin_config.sh @@ -0,0 +1,24 @@ +#!/bin/bash +## +## Script to set up the client (this system) for Kubernetes access +## +. .gce_kubernetes.config +echo "###################################### +Set the gcloud compute region and zone +######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" + +cd certs-dir +rm admin.kubeconfig +KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe $KUBE_NETWORK --region $GCE_REGION --format 'value(address)') +echo $KUBERNETES_PUBLIC_ADDRESS +kubectl config set-cluster $KUBE_CLUSTER --certificate-authority=ca.pem --embed-certs=true --server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 --kubeconfig=admin.kubeconfig +kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --kubeconfig=admin.kubeconfig +kubectl config set-context $KUBE_NETWORK --cluster=$KUBE_CLUSTER --user=admin --kubeconfig=admin.kubeconfig +kubectl config use-context $KUBE_NETWORK --kubeconfig=admin.kubeconfig + +kubectl get componentstatuses --kubeconfig=admin.kubeconfig +kubectl get nodes --kubeconfig=admin.kubeconfig diff --git a/7_pod_routing.sh b/7_pod_routing.sh new file mode 100755 index 0000000..b0e02f6 --- /dev/null +++ b/7_pod_routing.sh @@ -0,0 +1,22 @@ +#!/bin/bash +## +## Script to set up the pod routing between nodes +## +. .gce_kubernetes.config +echo "###################################### +Set the gcloud compute region and zone +######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" + +KUBE_WORKERS=$((KUBE_WORKERS-1)) + +for ((i=0; i<=$KUBE_WORKERS; i++)); do + gcloud compute routes create kubernetes-pod-route-worker-${i} --network $KUBE_NETWORK --next-hop-address $KUBE_SUBNET_ADDR.2${i} --destination-range $KUBE_POD_ADDR.${i}.0/24 +done +sleep 10 +gcloud compute routes list --filter "network: $KUBE_NETWORK" + +echo "Routes should be listed above as created" diff --git a/8_deploying_kube-dns.sh b/8_deploying_kube-dns.sh new file mode 100755 index 0000000..52fbf07 --- /dev/null +++ b/8_deploying_kube-dns.sh @@ -0,0 +1,22 @@ +#!/bin/bash +## +## Script to deploy the kube-dns pod to the cluster +## +echo "Taking the kube-dns.yaml file from the Kubernetes the Hard Way author" +wget https://storage.googleapis.com/kubernetes-the-hard-way/kube-dns.yaml +kubectl create -f kube-dns.yaml --kubeconfig=certs-dir/admin.kubeconfig + +sleep 2 +kubectl get pods -l k8s-app=kube-dns -n kube-system -o wide --kubeconfig=certs-dir/admin.kubeconfig + +echo "Verifying this has worked" +kubectl run busybox --image=busybox --kubeconfig=certs-dir/admin.kubeconfig --command -- sleep 3600 + +echo "Is the busybox pod running?" +kubectl get pods -l run=busybox --kubeconfig=certs-dir/admin.kubeconfig + +echo "Get the pod name" +POD_NAME=$(kubectl get pods -l run=busybox --kubeconfig=certs-dir/admin.kubeconfig -o jsonpath="{.items[0].metadata.name}") + +echo "Is the DNS pod functional?" +kubectl exec -ti --kubeconfig=certs-dir/admin.kubeconfig $POD_NAME -- nslookup kubernetes diff --git a/9_further_tests.sh b/9_further_tests.sh new file mode 100755 index 0000000..3cad8f1 --- /dev/null +++ b/9_further_tests.sh @@ -0,0 +1,55 @@ +#!/bin/bash +## +## Further set of tests to ensure that Kubernetes is working as expected +## +. .gce_kubernetes.config + +echo "######################################" +echo "Set the gcloud compute region and zone" +echo "######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" +echo "" + +echo "Testing the encryption of data at rest via the key created earlier" +kubectl create secret generic super-secret --from-literal="mykey=mydata" --kubeconfig=certs-dir/admin.kubeconfig + +gcloud compute ssh controller-0 \ +--command "sudo ETCDCTL_API=3 etcdctl get \ +--endpoints=https://127.0.0.1:2379 \ +--cacert=/etc/etcd/ca.pem \ +--cert=/etc/etcd/kubernetes.pem \ +--key=/etc/etcd/kubernetes-key.pem \ +/registry/secrets/default/super-secret | hexdump -C" + +echo "Output should be prefixed with k8s:enc:aescbc:v1:key1 + +Testing application (nginx) deployments" +kubectl run nginx --image=nginx --kubeconfig=certs-dir/admin.kubeconfig +echo "Waiting 10 seconds for the pod to start ..." +sleep 10 +kubectl get pods -l run=nginx -o wide --kubeconfig=certs-dir/admin.kubeconfig + +echo "nginx should be listed as running" +POD_NAME=$(kubectl get pods -l run=nginx --kubeconfig=certs-dir/admin.kubeconfig -o jsonpath="{.items[0].metadata.name}") +echo "You'll need to switch to another terminal and test with 'curl --head http://127.0.0.1:8080' Press Ctrl+C once completed" +kubectl port-forward $POD_NAME 8080:80 --kubeconfig=certs-dir/admin.kubeconfig + +echo "Displaying the logs from the nginx container" +kubectl logs $POD_NAME --kubeconfig=certs-dir/admin.kubeconfig + +echo "Executing a command inside a container" +kubectl exec -it $POD_NAME --kubeconfig=certs-dir/admin.kubeconfig -- nginx -v + +echo "Exposing a container as a service (in this example NodePort)" +kubectl expose deployment nginx --port 80 --type NodePort --kubeconfig=certs-dir/admin.kubeconfig +NODE_PORT=$(kubectl get svc nginx --kubeconfig=certs-dir/admin.kubeconfig --output=jsonpath='{range .spec.ports[0]}{.nodePort}') +echo "Creating a firewall rule to allow access to the exposed node" +gcloud compute firewall-rules create kubernetes-centos-allow-nginx-service --allow=tcp:${NODE_PORT} --network $KUBE_NETWORK +echo "Retrieving the external IP" +EXTERNAL_IP=$(gcloud compute instances describe worker-0 --format 'value(networkInterfaces[0].accessConfigs[0].natIP)') +curl -I http://${EXTERNAL_IP}:${NODE_PORT} + +echo "Functional tests should now be completed successfully" diff --git a/certs-dir/admin-csr.json b/certs-dir/admin-csr.json new file mode 100644 index 0000000..5e8d550 --- /dev/null +++ b/certs-dir/admin-csr.json @@ -0,0 +1,16 @@ +{ + "CN": "admin", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "US", + "L": "Portland", + "O": "system:masters", + "OU": "Kubernetes - CentOS", + "ST": "Oregon" + } + ] +} diff --git a/certs-dir/admin-key.pem b/certs-dir/admin-key.pem new file mode 100644 index 0000000..9eeb6de --- /dev/null +++ b/certs-dir/admin-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAvyyv+YOpqaf5hx2sS7NuTui76SedgM4lIwaapTmbeaeEFeKY +seWVxwIsCmB6WniUmCaW2U/bOtWHcoO+hu/vEMF1PLXH40KJBZZAey6Kvi5DtFFA +x7GCbyMSF3Ho7IlVxDWjROp6JF2kUrnPtmSGIL7Zuar2KUPUSOWPJgkFGvBlQbYY +ws6cjDDKqznCrnv6bN4tkOilORSlR4Jy6Qy1ZbPC+Xv1aOpXpPig73R7TSdyyp6h +qkT47VrIJKIPt5TJY2k1EvMCzwKoCbeZBLm8Q9S70f8JMjyecZ7A9hyhlzfQJs6G +DxROy8ZxM8/3euUPi6WI2xjXa63JkjkFv1vTmQIDAQABAoIBAEXXxqaqwmZYFuKV +Jag2cntjrZsdvP4c0hZ8yCzK4hFM8QWp1k1P6itdkMdtveWlnewEVSIpeZeCAU0F +8qqLp9SASRuX8eIGIdcnsIEQXOHkNxn+gWMzZ8rAjlCAbi9UcUAQtIht/ZehmnIa +rlcdXJ9D+nlJi0tCG6FSNL0ZmILhZQT8zx83ltinvDlHYOZWiwKLLXY+mlsCzwQO +XGwEurnDfhkk6mPMdsb2a3WxheetwyZwTo6KR06lX+BEIPXW6YMN1ly5Avivf6Jo +qhz7DGudntDV6Z/FFVOU05+/yhLNtSPzoGO7FYlgJNTAPKSF6h/9rd5zmBENnkOv +mIYWSm0CgYEA37xeG3d4/QMGXQHu6fusaY3gH54MxPr3S6MnQPyCMhblPM7h353y +x/+J1h1ZojFfsQCrci7mIQt9AMClNKAIkGZH77MDvC75W0SXaJQBFgablBclGUtv +9dG2GU9ZmvrB7bBQtlnl13NwzCyT5242m6jRC2tqlNW/Cg1PkffSdicCgYEA2r5C +ZqdPhy06q7/8NFfZRUQ+hFDiWVLbAXHVDBzg1Sk2C8y2+uUJLOv/julyIbSYQ8Jo +O0Ql0vxzP4//Bh3/FWqqmGn7MKq5Lkxzswa37cBRdCFm2oKYufBM/2fbI2jER4ch +e1U3F4HYNAOpqaz1zfe5c4X3/KyPdQN/KQYhQD8CgYAG/Bx8AwjV7Ug/kxXgXCkP +yowasJZ5d9RMmjVZhAiUg754XYUTtBymPcYR1yy7MYtpPTodAHXSxkcKxPjjYbOi +vm60fwQ41caXINcRPr2h8GbfArH3zjEanYdv1thXTEjNoNmEN95DEamN1j5v1TMD +PQv5rUrQhrPGHpBDYFdDtQKBgQCj26QYC7sKUrpBkCZX+3q5ZX7DVWfJa8S81021 +rihJ5X4j5q0nUszFxt+AaaViVEs/bvDvfIQtAYk/kxmBtpSvsWCrnbBZ63mekgyV +jjHXupoJuT+37d6EYtr69Ax3APMPdc2B+vqy9ERF6nx8qyDSsoBMZo22zHkGOaFW +1M2xtwKBgQC3PV+SD0t2fqjNeh06Xn6D5Zqjjq6zpmAIUH1h1fLTApBsyZ9RdLVg +gVtpbBF0lCgZCUb6qXkLFDdIaZHSHR+r4UNhsNYxuPQvj/cVIWGJeljU03FWzRXO +hWHOBeac5co0Gx/vp1CMoADaOTf4h49HkjACTxstfHF2YNLaN3ie3g== +-----END RSA PRIVATE KEY----- diff --git a/certs-dir/admin.csr b/certs-dir/admin.csr new file mode 100644 index 0000000..9995678 --- /dev/null +++ b/certs-dir/admin.csr @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICvTCCAaUCAQAweDELMAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8G +A1UEBxMIUG9ydGxhbmQxFzAVBgNVBAoTDnN5c3RlbTptYXN0ZXJzMRwwGgYDVQQL +ExNLdWJlcm5ldGVzIC0gQ2VudE9TMQ4wDAYDVQQDEwVhZG1pbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAL8sr/mDqamn+YcdrEuzbk7ou+knnYDOJSMG +mqU5m3mnhBXimLHllccCLApgelp4lJgmltlP2zrVh3KDvobv7xDBdTy1x+NCiQWW +QHsuir4uQ7RRQMexgm8jEhdx6OyJVcQ1o0TqeiRdpFK5z7ZkhiC+2bmq9ilD1Ejl +jyYJBRrwZUG2GMLOnIwwyqs5wq57+mzeLZDopTkUpUeCcukMtWWzwvl79WjqV6T4 +oO90e00ncsqeoapE+O1ayCSiD7eUyWNpNRLzAs8CqAm3mQS5vEPUu9H/CTI8nnGe +wPYcoZc30CbOhg8UTsvGcTPP93rlD4uliNsY12utyZI5Bb9b05kCAwEAAaAAMA0G +CSqGSIb3DQEBCwUAA4IBAQCoq9kBIb72a6dNolRP8j/e/iEUcZO+j4M2mpAJUm0s +A4C16vlvRo59htdSAomCDk1DbZPfOrNrt2MYJRBlm/836F541um6RudRUD9fEWZo +jbtKfKyv4rCaUu+glYznUgVGgEpquVaTa1kK2afBQQtxfpX+OPDctFQSiPaA+nWZ +x/5ykTfy9V/i3PNL8PK/oakvrCG4Dl0z3C1C41YjuKgpIRpuB9ntf6u0d/8LX55L +To5+0nCv5xdvflvRIzyUc2KPDylp70/BdfPeDNMthQIlDVZhMwio3tx5O3tmLrJZ +s2NECAchfxxWH2cjvIqxOy6FhizmfIwA96okN08HxUdm +-----END CERTIFICATE REQUEST----- diff --git a/certs-dir/admin.kubeconfig b/certs-dir/admin.kubeconfig new file mode 100644 index 0000000..fa69be4 --- /dev/null +++ b/certs-dir/admin.kubeconfig @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR4RENDQXF5Z0F3SUJBZ0lVS2k0Rm5vaytkOG1OLytUdHR0a1VMd2QwbW53d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2FERUxNQWtHQTFVRUJoTUNWVk14RHpBTkJnTlZCQWdUQms5eVpXZHZiakVSTUE4R0ExVUVCeE1JVUc5eQpkR3hoYm1ReEV6QVJCZ05WQkFvVENrdDFZbVZ5Ym1WMFpYTXhDekFKQmdOVkJBc1RBa05CTVJNd0VRWURWUVFECkV3cExkV0psY201bGRHVnpNQjRYRFRFNE1EY3dOREF4TlRFd01Gb1hEVEl6TURjd016QXhOVEV3TUZvd2FERUwKTUFrR0ExVUVCaE1DVlZNeER6QU5CZ05WQkFnVEJrOXlaV2R2YmpFUk1BOEdBMVVFQnhNSVVHOXlkR3hoYm1ReApFekFSQmdOVkJBb1RDa3QxWW1WeWJtVjBaWE14Q3pBSkJnTlZCQXNUQWtOQk1STXdFUVlEVlFRREV3cExkV0psCmNtNWxkR1Z6TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUE3WDlQR1R4MnQ0NWgKb2NIME9tT0hHS2plN1ZkUXBCL012d0RkYkFadnplbGlqK3RXZjROUlIyNmYvaCt2aUN6bllFWGJPN251bEgvYwovL1FLd3NZak9kbnMzR3JNREsxbUNzanFTS3VvQmZxRWJZVVBHeDc4a2s5UHUzZGV0VFVRclN3djBtd1BnaDdjCmkxMU11QkVVS3UzRVR0bnpOU1AvRTlkZ0w0SHBrelNJMFNQZFpRWVd0S091RFNEYzljQ01jRGRwYVVXU2pKNVkKZ0FwdlhIMmxnTHVpYldZK0VpWEl6WFI0cm1OSklZMHRKNnlnMU9wTkhZbU82SUpHM3FBRytOYXBOb21RdElHbQpnWGJtQy9CQzkva0VjdzdFVDZPQjl0T3ZTRjFRME5LZm9TS0ExUTZjanh3RUVEblh3bHh6ZUk4eUVMOWZWVjJVCjFaUTJnRzQwZFFJREFRQUJvMll3WkRBT0JnTlZIUThCQWY4RUJBTUNBUVl3RWdZRFZSMFRBUUgvQkFnd0JnRUIKL3dJQkFqQWRCZ05WSFE0RUZnUVUzNnJOOGkrQ2hxRlI3Szdpck1kRXFCMnYrZnd3SHdZRFZSMGpCQmd3Rm9BVQozNnJOOGkrQ2hxRlI3Szdpck1kRXFCMnYrZnd3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUNhVVhxeFRXUktjCkxIakNQYXVuMFR3cFhJTk8wMC94L3o5RnBqNUlocndzVktsbytxdW9iamlWeTlWTWY3UVJsYnpWaTBIRlpJU3MKcUJNQWNITnZCeE9tdUNBZTJna0hEV0V2WTNHL1R3aksvL1IraitkOUVOMTBXME5KZEZUQlRyL056T1FVZzVZZQpQbG9zelB4MEpwaTJvTXBUQ2sxQWFoV1A5eEJnQkZWQitWYW8zeTdtb0JHb05pWjM0OHR3NDhPWHA3cEc1U2FnCkE2L0E1WXpHSjRmWU1telU2NXF4ZGROYTMrYTJYbGVZeXk4ZzRvbStmRHVLVDU2SGlaaVBRS0s4T1FXNXNwNTEKOTh4a1pQUVdQbHlmVVI1aURVN3psNHF6VDlQSzU4bUZWRFRibmYzZys5aUtUeUF2NTB2eWZsa0c1MjJzeGRpNQpURkJQUEluWjFqST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + server: https://127.0.0.1:6443 + name: kube-cluster +contexts: +- context: + cluster: kube-cluster + user: admin + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: admin + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ3VENDQXRXZ0F3SUJBZ0lVZUdRR2ZwRW8zY2JxbXZIT0NFb05UaC8ybjY4d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2FERUxNQWtHQTFVRUJoTUNWVk14RHpBTkJnTlZCQWdUQms5eVpXZHZiakVSTUE4R0ExVUVCeE1JVUc5eQpkR3hoYm1ReEV6QVJCZ05WQkFvVENrdDFZbVZ5Ym1WMFpYTXhDekFKQmdOVkJBc1RBa05CTVJNd0VRWURWUVFECkV3cExkV0psY201bGRHVnpNQjRYRFRFNE1EY3dOREF4TlRFd01Gb1hEVEU1TURjd05EQXhOVEV3TUZvd2VERUwKTUFrR0ExVUVCaE1DVlZNeER6QU5CZ05WQkFnVEJrOXlaV2R2YmpFUk1BOEdBMVVFQnhNSVVHOXlkR3hoYm1ReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sd3dHZ1lEVlFRTEV4TkxkV0psY201bGRHVnpJQzBnClEyVnVkRTlUTVE0d0RBWURWUVFERXdWaFpHMXBiakNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0MKQVFvQ2dnRUJBTDhzci9tRHFhbW4rWWNkckV1emJrN291K2tubllET0pTTUdtcVU1bTNtbmhCWGltTEhsbGNjQwpMQXBnZWxwNGxKZ21sdGxQMnpyVmgzS0R2b2J2N3hEQmRUeTF4K05DaVFXV1FIc3VpcjR1UTdSUlFNZXhnbThqCkVoZHg2T3lKVmNRMW8wVHFlaVJkcEZLNXo3WmtoaUMrMmJtcTlpbEQxRWpsanlZSkJScndaVUcyR01MT25Jd3cKeXFzNXdxNTcrbXplTFpEb3BUa1VwVWVDY3VrTXRXV3p3dmw3OVdqcVY2VDRvTzkwZTAwbmNzcWVvYXBFK08xYQp5Q1NpRDdlVXlXTnBOUkx6QXM4Q3FBbTNtUVM1dkVQVXU5SC9DVEk4bm5HZXdQWWNvWmMzMENiT2hnOFVUc3ZHCmNUUFA5M3JsRDR1bGlOc1kxMnV0eVpJNUJiOWIwNWtDQXdFQUFhTi9NSDB3RGdZRFZSMFBBUUgvQkFRREFnV2cKTUIwR0ExVWRKUVFXTUJRR0NDc0dBUVVGQndNQkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwRwpBMVVkRGdRV0JCVElVOXY3bS9lajdxd3BHZHFIbEdwZ3VTV3RoekFmQmdOVkhTTUVHREFXZ0JUZnFzM3lMNEtHCm9WSHNydUtzeDBTb0hhLzUvREFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBeC9uOE9xVkZjc0pkYXNLaTJpTVEKS1FDeGpoeldlL3dSZHVMUTErTmtoZWJmbzhHT2pUWTRsWGt5ekhpK1MyeE5pWWwwdGRCLzh1OTJYSjExUjRpTQo0K09vcE1PVWt5dDVuK2tXMzhyMUQzSnVlNUFjamxBSHNmK2lNakdEQTRrL3FRa2FBQStxMU54YzRtUEZDZ1p2CjBhOFhwcExhWEJIeG1pZHU2dXU3Sithb2lOSlIvQmhSTDZRTzdFdTBMNExpSXFyaFhVTXZYR2pmSURlTXZlUEoKa0JkMERaN2k3RkpiZXhNUEE0S0FxR1lYMXlYRUFKSEY2SGRrWUN2M05GcnRBYVFyS3hpSTliL2NDL1Z1dHR1WApsSldmUS9NTzFtMi9LU2NxbGYxRmgzRXNSeFFuc215VHN6SXZlUlZWek1QTmNBQUYydUhHWC9seUFFL3cvRUd2CkZRPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdnl5ditZT3BxYWY1aHgyc1M3TnVUdWk3NlNlZGdNNGxJd2FhcFRtYmVhZUVGZUtZCnNlV1Z4d0lzQ21CNlduaVVtQ2FXMlUvYk90V0hjb08raHUvdkVNRjFQTFhINDBLSkJaWkFleTZLdmk1RHRGRkEKeDdHQ2J5TVNGM0hvN0lsVnhEV2pST3A2SkYya1VyblB0bVNHSUw3WnVhcjJLVVBVU09XUEpna0ZHdkJsUWJZWQp3czZjakRES3F6bkNybnY2Yk40dGtPaWxPUlNsUjRKeTZReTFaYlBDK1h2MWFPcFhwUGlnNzNSN1RTZHl5cDZoCnFrVDQ3VnJJSktJUHQ1VEpZMmsxRXZNQ3p3S29DYmVaQkxtOFE5UzcwZjhKTWp5ZWNaN0E5aHlobHpmUUpzNkcKRHhST3k4WnhNOC8zZXVVUGk2V0kyeGpYYTYzSmtqa0Z2MXZUbVFJREFRQUJBb0lCQUVYWHhxYXF3bVpZRnVLVgpKYWcyY250anJac2R2UDRjMGhaOHlDeks0aEZNOFFXcDFrMVA2aXRka01kdHZlV2xuZXdFVlNJcGVaZUNBVTBGCjhxcUxwOVNBU1J1WDhlSUdJZGNuc0lFUVhPSGtOeG4rZ1dNelo4ckFqbENBYmk5VWNVQVF0SWh0L1plaG1uSWEKcmxjZFhKOUQrbmxKaTB0Q0c2RlNOTDBabUlMaFpRVDh6eDgzbHRpbnZEbEhZT1pXaXdLTExYWSttbHNDendRTwpYR3dFdXJuRGZoa2s2bVBNZHNiMmEzV3hoZWV0d3lad1RvNktSMDZsWCtCRUlQWFc2WU1OMWx5NUF2aXZmNkpvCnFoejdER3VkbnREVjZaL0ZGVk9VMDUrL3loTE50U1B6b0dPN0ZZbGdKTlRBUEtTRjZoLzlyZDV6bUJFTm5rT3YKbUlZV1NtMENnWUVBMzd4ZUczZDQvUU1HWFFIdTZmdXNhWTNnSDU0TXhQcjNTNk1uUVB5Q01oYmxQTTdoMzUzeQp4LytKMWgxWm9qRmZzUUNyY2k3bUlRdDlBTUNsTktBSWtHWkg3N01EdkM3NVcwU1hhSlFCRmdhYmxCY2xHVXR2CjlkRzJHVTlabXZyQjdiQlF0bG5sMTNOd3pDeVQ1MjQybTZqUkMydHFsTlcvQ2cxUGtmZlNkaWNDZ1lFQTJyNUMKWnFkUGh5MDZxNy84TkZmWlJVUStoRkRpV1ZMYkFYSFZEQnpnMVNrMkM4eTIrdVVKTE92L2p1bHlJYlNZUThKbwpPMFFsMHZ4elA0Ly9CaDMvRldxcW1HbjdNS3E1TGt4enN3YTM3Y0JSZENGbTJvS1l1ZkJNLzJmYkkyakVSNGNoCmUxVTNGNEhZTkFPcHFhejF6ZmU1YzRYMy9LeVBkUU4vS1FZaFFEOENnWUFHL0J4OEF3alY3VWcva3hYZ1hDa1AKeW93YXNKWjVkOVJNbWpWWmhBaVVnNzU0WFlVVHRCeW1QY1lSMXl5N01ZdHBQVG9kQUhYU3hrY0t4UGpqWWJPaQp2bTYwZndRNDFjYVhJTmNSUHIyaDhHYmZBckgzempFYW5ZZHYxdGhYVEVqTm9ObUVOOTVERWFtTjFqNXYxVE1EClBRdjVyVXJRaHJQR0hwQkRZRmREdFFLQmdRQ2oyNlFZQzdzS1VycEJrQ1pYKzNxNVpYN0RWV2ZKYThTODEwMjEKcmloSjVYNGo1cTBuVXN6Rnh0K0FhYVZpVkVzL2J2RHZmSVF0QVlrL2t4bUJ0cFN2c1dDcm5iQlo2M21la2d5VgpqakhYdXBvSnVUKzM3ZDZFWXRyNjlBeDNBUE1QZGMyQit2cXk5RVJGNm54OHF5RFNzb0JNWm8yMnpIa0dPYUZXCjFNMnh0d0tCZ1FDM1BWK1NEMHQyZnFqTmVoMDZYbjZENVpxampxNnpwbUFJVUgxaDFmTFRBcEJzeVo5UmRMVmcKZ1Z0cGJCRjBsQ2daQ1ViNnFYa0xGRGRJYVpIU0hSK3I0VU5oc05ZeHVQUXZqL2NWSVdHSmVsalUwM0ZXelJYTwpoV0hPQmVhYzVjbzBHeC92cDFDTW9BRGFPVGY0aDQ5SGtqQUNUeHN0ZkhGMllOTGFOM2llM2c9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= diff --git a/certs-dir/admin.pem b/certs-dir/admin.pem new file mode 100644 index 0000000..91e268f --- /dev/null +++ b/certs-dir/admin.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID7TCCAtWgAwIBAgIUeGQGfpEo3cbqmvHOCEoNTh/2n68wDQYJKoZIhvcNAQEL +BQAwaDELMAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8GA1UEBxMIUG9y +dGxhbmQxEzARBgNVBAoTCkt1YmVybmV0ZXMxCzAJBgNVBAsTAkNBMRMwEQYDVQQD +EwpLdWJlcm5ldGVzMB4XDTE4MDcwNDAxNTEwMFoXDTE5MDcwNDAxNTEwMFoweDEL +MAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8GA1UEBxMIUG9ydGxhbmQx +FzAVBgNVBAoTDnN5c3RlbTptYXN0ZXJzMRwwGgYDVQQLExNLdWJlcm5ldGVzIC0g +Q2VudE9TMQ4wDAYDVQQDEwVhZG1pbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAL8sr/mDqamn+YcdrEuzbk7ou+knnYDOJSMGmqU5m3mnhBXimLHllccC +LApgelp4lJgmltlP2zrVh3KDvobv7xDBdTy1x+NCiQWWQHsuir4uQ7RRQMexgm8j +Ehdx6OyJVcQ1o0TqeiRdpFK5z7ZkhiC+2bmq9ilD1EjljyYJBRrwZUG2GMLOnIww +yqs5wq57+mzeLZDopTkUpUeCcukMtWWzwvl79WjqV6T4oO90e00ncsqeoapE+O1a +yCSiD7eUyWNpNRLzAs8CqAm3mQS5vEPUu9H/CTI8nnGewPYcoZc30CbOhg8UTsvG +cTPP93rlD4uliNsY12utyZI5Bb9b05kCAwEAAaN/MH0wDgYDVR0PAQH/BAQDAgWg +MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G +A1UdDgQWBBTIU9v7m/ej7qwpGdqHlGpguSWthzAfBgNVHSMEGDAWgBTfqs3yL4KG +oVHsruKsx0SoHa/5/DANBgkqhkiG9w0BAQsFAAOCAQEAx/n8OqVFcsJdasKi2iMQ +KQCxjhzWe/wRduLQ1+Nkhebfo8GOjTY4lXkyzHi+S2xNiYl0tdB/8u92XJ11R4iM +4+OopMOUkyt5n+kW38r1D3Jue5AcjlAHsf+iMjGDA4k/qQkaAA+q1Nxc4mPFCgZv +0a8XppLaXBHxmidu6uu7J+aoiNJR/BhRL6QO7Eu0L4LiIqrhXUMvXGjfIDeMvePJ +kBd0DZ7i7FJbexMPA4KAqGYX1yXEAJHF6HdkYCv3NFrtAaQrKxiI9b/cC/VuttuX +lJWfQ/MO1m2/KScqlf1Fh3EsRxQnsmyTszIveRVVzMPNcAAF2uHGX/lyAE/w/EGv +FQ== +-----END CERTIFICATE----- diff --git a/certs-dir/ca-config.json b/certs-dir/ca-config.json new file mode 100644 index 0000000..a63e0dd --- /dev/null +++ b/certs-dir/ca-config.json @@ -0,0 +1,13 @@ +{ + "signing": { + "default": { + "expiry": "8760h" + }, + "profiles": { + "kubernetes": { + "usages": ["signing", "key encipherment", "server auth", "client auth"], + "expiry": "8760h" + } + } + } +} diff --git a/certs-dir/ca-csr.json b/certs-dir/ca-csr.json new file mode 100644 index 0000000..8145e50 --- /dev/null +++ b/certs-dir/ca-csr.json @@ -0,0 +1,16 @@ +{ + "CN": "Kubernetes", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "US", + "L": "Portland", + "O": "Kubernetes", + "OU": "CA", + "ST": "Oregon" + } + ] +} diff --git a/certs-dir/ca-key.pem b/certs-dir/ca-key.pem new file mode 100644 index 0000000..7b6c34c --- /dev/null +++ b/certs-dir/ca-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA7X9PGTx2t45hocH0OmOHGKje7VdQpB/MvwDdbAZvzelij+tW +f4NRR26f/h+viCznYEXbO7nulH/c//QKwsYjOdns3GrMDK1mCsjqSKuoBfqEbYUP +Gx78kk9Pu3detTUQrSwv0mwPgh7ci11MuBEUKu3ETtnzNSP/E9dgL4HpkzSI0SPd +ZQYWtKOuDSDc9cCMcDdpaUWSjJ5YgApvXH2lgLuibWY+EiXIzXR4rmNJIY0tJ6yg +1OpNHYmO6IJG3qAG+NapNomQtIGmgXbmC/BC9/kEcw7ET6OB9tOvSF1Q0NKfoSKA +1Q6cjxwEEDnXwlxzeI8yEL9fVV2U1ZQ2gG40dQIDAQABAoIBAEJQKMrYGiHYjKQ4 +3rOq9J7LECvfFHIvOuEeIbU2+uLYxp9+y5EhHoG+fsfbGwz/1L8J4lQD73zhUtjY +0GRgJJfTpJJK85yakgJy5Q/Tdj/tGirmWzhsZ4OGx7GFfBlGrthbnSK473+alGQJ +chF3G1V9PDH0Izd0TJXCrmW1dX2XRN0uHqSxEl0fgU4jdnFWyhvksgt+RIOXFxZM +9HrldcWdgX8/NwEolUGOuvWurYL5xBxiAWoVxI42Le7kRSX072en5vy6NpKS8E5A +aCTw6SHyCIJO+ASKEYVOMHMOHMSOas2QvrewreDT3P1pvo/7smlYZfKyjz1OU4WO +4AFtUQECgYEA/BtEcuyPOj+s6tUxcRfqauzKmZpvEWSBt7Jxu+z+ddAEiRTNnEEC +MNtcnk3s0oyQci3oWNeFQ4IuhqQeGFlO++sk99M1H5tNiLsVbFcbpSKAYt7+GPY3 +q6fzojqb6kQYzU+3VLP85aNspN8Tkv1xzg0vkNIjG/ioBNEZ1vAS08ECgYEA8SpI +SULmtYjRPGAeBwCq+uC9b6ANMNCnJWEazzive90oNIu4F3rwfzIYzA83AKqgKI5S +uoJkvBp8JOrW7QDRUy1DzsksTP5gvt1++XTpAz+KMi/AVctabtWEdoUJ9+UOGvQZ +STGn1N2kkwGoX9t6gxoNpTPNmy2gIe7fRisJvbUCgYA9Ek3umSfJMIuvB55Gi0ia +Rmp9809co0BZQ70RjUPHPnEa1BNK+uSQ+VLidnYcOzjyr/RNMUhzJiInM7Kmq1va +4we9kUiZM4IA0XFLIX7s7gKz8NkVKvApgTIf5prGkWCAvALpXgCxkHlNJ00Smprr +E36mUoF20THCmPTtgJLPAQKBgFrhfTYmgPyeH4NqEUbASK6cLKc5IF9p8eMTMeAv +U2oREIqksG4gxSqZvWCAx1B3/DzsTV69rqp4LJpY6T+NFFUM9MWjaA9nuRuUKrpl +BT5VHoYFGqz6Ig0sZ2umfPgvGF6t6C1+Ceoc0F0ZvdLJyeRH2CaOXAgDbJn9A6TU +qSoVAoGBALmlxYN2axBDLNo4h9rDQloIQdn+H4JH0p4KyjMfiwPzXyIcmRdiefQI +W7NICeHhUFg9cTeIesnQMXzPvgvjuWV+PcTPmoKOYgGU9JYmLPDJ2bsYSCZraVQb +rh9eNikuQPA61sigZ+c1pdJMxzvqxyfCJ7MQZDRRH7GAhxn1Hf6P +-----END RSA PRIVATE KEY----- diff --git a/certs-dir/ca.csr b/certs-dir/ca.csr new file mode 100644 index 0000000..ba2fc52 --- /dev/null +++ b/certs-dir/ca.csr @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICrTCCAZUCAQAwaDELMAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8G +A1UEBxMIUG9ydGxhbmQxEzARBgNVBAoTCkt1YmVybmV0ZXMxCzAJBgNVBAsTAkNB +MRMwEQYDVQQDEwpLdWJlcm5ldGVzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA7X9PGTx2t45hocH0OmOHGKje7VdQpB/MvwDdbAZvzelij+tWf4NRR26f +/h+viCznYEXbO7nulH/c//QKwsYjOdns3GrMDK1mCsjqSKuoBfqEbYUPGx78kk9P +u3detTUQrSwv0mwPgh7ci11MuBEUKu3ETtnzNSP/E9dgL4HpkzSI0SPdZQYWtKOu +DSDc9cCMcDdpaUWSjJ5YgApvXH2lgLuibWY+EiXIzXR4rmNJIY0tJ6yg1OpNHYmO +6IJG3qAG+NapNomQtIGmgXbmC/BC9/kEcw7ET6OB9tOvSF1Q0NKfoSKA1Q6cjxwE +EDnXwlxzeI8yEL9fVV2U1ZQ2gG40dQIDAQABoAAwDQYJKoZIhvcNAQELBQADggEB +AA24J8zL73fiAnmpUMAvmyQQ5ZgYp342OIbsuLi7fHToF3J6fSpLY4CPyvN3UF00 +2fmt3CewA+8ytmjQKIGVxghLe6VRRdcYqPOIbfSb2xeNzJj0W6OUXkINwhqIU8Rq +1sOh1oBdd0N5pLZcK1DAEJ04Bvc+53NzntSkwXpoAQga/R0hns4dPRqif13vTCPD +97Bk9FEOVFF7ypkedR/wd/htO9OKga6VchHdtbLGES94yn/PJNPwx6NgPMLJ2kMZ +w2c7P48nvGfOQexPc37XIILJnOVSa8HOqkIlH8venAS5DvcZpqdTkyM9gNv9+/dy +EiqMfTrqWZuArv+fK/0wnxA= +-----END CERTIFICATE REQUEST----- diff --git a/certs-dir/ca.pem b/certs-dir/ca.pem new file mode 100644 index 0000000..243f24a --- /dev/null +++ b/certs-dir/ca.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIIDxDCCAqygAwIBAgIUKi4Fnok+d8mN/+TtttkULwd0mnwwDQYJKoZIhvcNAQEL +BQAwaDELMAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8GA1UEBxMIUG9y +dGxhbmQxEzARBgNVBAoTCkt1YmVybmV0ZXMxCzAJBgNVBAsTAkNBMRMwEQYDVQQD +EwpLdWJlcm5ldGVzMB4XDTE4MDcwNDAxNTEwMFoXDTIzMDcwMzAxNTEwMFowaDEL +MAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8GA1UEBxMIUG9ydGxhbmQx +EzARBgNVBAoTCkt1YmVybmV0ZXMxCzAJBgNVBAsTAkNBMRMwEQYDVQQDEwpLdWJl +cm5ldGVzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA7X9PGTx2t45h +ocH0OmOHGKje7VdQpB/MvwDdbAZvzelij+tWf4NRR26f/h+viCznYEXbO7nulH/c +//QKwsYjOdns3GrMDK1mCsjqSKuoBfqEbYUPGx78kk9Pu3detTUQrSwv0mwPgh7c +i11MuBEUKu3ETtnzNSP/E9dgL4HpkzSI0SPdZQYWtKOuDSDc9cCMcDdpaUWSjJ5Y +gApvXH2lgLuibWY+EiXIzXR4rmNJIY0tJ6yg1OpNHYmO6IJG3qAG+NapNomQtIGm +gXbmC/BC9/kEcw7ET6OB9tOvSF1Q0NKfoSKA1Q6cjxwEEDnXwlxzeI8yEL9fVV2U +1ZQ2gG40dQIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB +/wIBAjAdBgNVHQ4EFgQU36rN8i+ChqFR7K7irMdEqB2v+fwwHwYDVR0jBBgwFoAU +36rN8i+ChqFR7K7irMdEqB2v+fwwDQYJKoZIhvcNAQELBQADggEBACaUXqxTWRKc +LHjCPaun0TwpXINO00/x/z9Fpj5IhrwsVKlo+quobjiVy9VMf7QRlbzVi0HFZISs +qBMAcHNvBxOmuCAe2gkHDWEvY3G/TwjK//R+j+d9EN10W0NJdFTBTr/NzOQUg5Ye +PloszPx0Jpi2oMpTCk1AahWP9xBgBFVB+Vao3y7moBGoNiZ348tw48OXp7pG5Sag +A6/A5YzGJ4fYMmzU65qxddNa3+a2XleYyy8g4om+fDuKT56HiZiPQKK8OQW5sp51 +98xkZPQWPlyfUR5iDU7zl4qzT9PK58mFVDTbnf3g+9iKTyAv50vyflkG522sxdi5 +TFBPPInZ1jI= +-----END CERTIFICATE----- diff --git a/certs-dir/encryption-config.yaml b/certs-dir/encryption-config.yaml new file mode 100644 index 0000000..79a056c --- /dev/null +++ b/certs-dir/encryption-config.yaml @@ -0,0 +1,11 @@ +kind: EncryptionConfig +apiVersion: v1 +resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: P0bSRM4eag88PjJpXOEKEE1DheDfTJpeMNxL2BqCKvw= + - identity: {} diff --git a/certs-dir/kube-controller-manager-csr.json b/certs-dir/kube-controller-manager-csr.json new file mode 100644 index 0000000..67c9957 --- /dev/null +++ b/certs-dir/kube-controller-manager-csr.json @@ -0,0 +1,16 @@ +{ + "CN": "system:kube-controller-manager", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "US", + "L": "Portland", + "O": "system:kube-controller-manager", + "OU": "Kubernetes - CentOS", + "ST": "Oregon" + } + ] +} diff --git a/certs-dir/kube-controller-manager-key.pem b/certs-dir/kube-controller-manager-key.pem new file mode 100644 index 0000000..9670456 --- /dev/null +++ b/certs-dir/kube-controller-manager-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA4L73jeHHKN0Jb0tk+0Hbqf7IgvJsFOBe0do0zN1uQ0+vFk7z +eAzHJeyLo5I3soTp7KotC7OjyS57t0r4WDaaT/pdthYRCt/qeDY96fAq7fGCqvik +ji1BDvigaQ/ioqAfGPpA1ZEK+/DGDu71/odI3MDsnj9w2oMpsSfct1HW3DKCvW+C +f+s8DpaAG5tw9LDK2bdfxm9PHaqVek77tfMyiojj+8NF/13+Ggqd7Qv2d00Z1nby +k1lHhXSrWfAiJbsOvkwOufPVaoW/FwqAgImSzDDjBfVRuvqlOIQdw1vzl5EXcENY +2xejv9vWGmnw+QjXN6u5iBNpvxmmFxF/BXCNVQIDAQABAoIBABPaKonY4LvGUpgH +I+Nh3OJ2RAX1HgD2FMoaqUFTafL+LewdizoWftzluSv4tXzKatNh8Nqwwo350inN +sIQyRo08A10bM+QvBUobGHr6oQAlrjImBnscmjuVUK4XHiDZXgXZzTJQPpUknooh +J4+VKaV+UShlWDWlEmC4sbFrxDvs++QY568e0f5RueMgNppqDDKQm+mUYF7ufYsi +zHzR5r5ZtpL4hM2wAp/0A5nvendST8Bg4QcPeIpL8wF/6ee0FcO3ZVdwvB9HOWC7 +MfLuhhR0N2Hc04QQg2lRV6YnaifYptZcG3Q6iE9dXomFm+o/olxAPVLUDUIz7gir +53bTEeECgYEA6JknS73vyM+ZSWu1122Gd4T9P/4lhks0nc+TK36KWnxX8lgRCUnn +zodDxR/AERU468RFUr7hkmLJH8SMrxv763T6UnUr09h0nOP7cNwsRG6q4rNA+4Jg +8HGtNN2z24rGxqsQheOHiMoZK5iv0VhZGI/pl2+SIRk2bY/JUg7oxO0CgYEA91uR +f8RNve41Umuz0WWHKguBGeR9LCFyc/JbmnWoGbazRNWpqwh5ZnRHmKNqNuSLAC67 +tggvYNvkwT+8AfrYJYztZMLPXcW/NK+miO/Uhw8EQwGMV/NZCGhZMEsZPmxGD+V7 +nxR8DIxZjh8EfDwDbf/0TK2mBzuR7kDiMbTtBQkCgYAv2kpuV748NxGXPFSl5H+U +mUSs5LF9pn1Ftk3V1evK3+gMELg4i/7ny4ZSyMxv90jFoJSOiHzJ7A7qvTogUIaM +k31od3iz1cVv/ateNb3Zulbn2u8Uj2c4MHrs9GurUCrC9rCoiv34U3PIN1p8nEuA +NSNep3KN4q0C72EAw9Z5pQKBgQCSzV6B1JBLC4fL3a2/taDNXDyHkGqVeVhC5fBl +sDRO5nZogOIMZqAeJR6kwT91SpgQSKwQ8bSkqHyQEHJVPnlvd78gUNnfDlBXRXPx +y1x3t3rRBac0P3F0ovFjzv4xZPMD1i9TD0Y78EPqLBBuZvOmIgTsSRbDLiCCpTFZ +TMZqeQKBgQCFQttgSb2/LpOUKA2bQHkJJgMEBd3EvKDDI+GQ8WWeFnkcUccWBOI5 +6qtp9hnlP8DodYS331aP5GMMUmOo0wMgz1UbPUImHxAoV7sNO0oCm14pAzejhu/9 +tqZscDd+QMlJzJtd0lYJ2BQgufifaDDg2H82Kujo2WO11+iLwhLdnQ== +-----END RSA PRIVATE KEY----- diff --git a/certs-dir/kube-controller-manager.csr b/certs-dir/kube-controller-manager.csr new file mode 100644 index 0000000..4bab81d --- /dev/null +++ b/certs-dir/kube-controller-manager.csr @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIC5zCCAc8CAQAwgaExCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZPcmVnb24xETAP +BgNVBAcTCFBvcnRsYW5kMScwJQYDVQQKEx5zeXN0ZW06a3ViZS1jb250cm9sbGVy +LW1hbmFnZXIxHDAaBgNVBAsTE0t1YmVybmV0ZXMgLSBDZW50T1MxJzAlBgNVBAMT +HnN5c3RlbTprdWJlLWNvbnRyb2xsZXItbWFuYWdlcjCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAOC+943hxyjdCW9LZPtB26n+yILybBTgXtHaNMzdbkNP +rxZO83gMxyXsi6OSN7KE6eyqLQuzo8kue7dK+Fg2mk/6XbYWEQrf6ng2PenwKu3x +gqr4pI4tQQ74oGkP4qKgHxj6QNWRCvvwxg7u9f6HSNzA7J4/cNqDKbEn3LdR1twy +gr1vgn/rPA6WgBubcPSwytm3X8ZvTx2qlXpO+7XzMoqI4/vDRf9d/hoKne0L9ndN +GdZ28pNZR4V0q1nwIiW7Dr5MDrnz1WqFvxcKgICJksww4wX1Ubr6pTiEHcNb85eR +F3BDWNsXo7/b1hpp8PkI1zeruYgTab8ZphcRfwVwjVUCAwEAAaAAMA0GCSqGSIb3 +DQEBCwUAA4IBAQCbIwk2f9xHPikvszZad32XLSRfU7BuGjQ2klc0eeZzVI2IAfSX +DaCmzVYpAmqBTesCm6L3wS1cSG81siXyuHE/XM2RHnSkoJ9IuDHHDzyRCOHC0zFQ +MQr2Feulbz+Z4AqrM5s4BrgwcbUbpBkjgd+AnEBZ/MxHJ1gBKN7R0A30mr874YPw +8WFSjgtrWsthyqtAsRSWJxldFc3kZMlWlxQjdOFqY69q1zZksZwGfOQ76Br1Fntd +70fPFV3lwjx2A7SWgk01F2K6YlqEakz6uF8+nHnKmHZi+so/Gx46ZO1f6EWBO6ap +7jjmVsvKS3+FkB/2paZp77QTdOceWgc1U5o1 +-----END CERTIFICATE REQUEST----- diff --git a/certs-dir/kube-controller-manager.kubeconfig b/certs-dir/kube-controller-manager.kubeconfig new file mode 100644 index 0000000..3278624 --- /dev/null +++ b/certs-dir/kube-controller-manager.kubeconfig @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR4RENDQXF5Z0F3SUJBZ0lVS2k0Rm5vaytkOG1OLytUdHR0a1VMd2QwbW53d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2FERUxNQWtHQTFVRUJoTUNWVk14RHpBTkJnTlZCQWdUQms5eVpXZHZiakVSTUE4R0ExVUVCeE1JVUc5eQpkR3hoYm1ReEV6QVJCZ05WQkFvVENrdDFZbVZ5Ym1WMFpYTXhDekFKQmdOVkJBc1RBa05CTVJNd0VRWURWUVFECkV3cExkV0psY201bGRHVnpNQjRYRFRFNE1EY3dOREF4TlRFd01Gb1hEVEl6TURjd016QXhOVEV3TUZvd2FERUwKTUFrR0ExVUVCaE1DVlZNeER6QU5CZ05WQkFnVEJrOXlaV2R2YmpFUk1BOEdBMVVFQnhNSVVHOXlkR3hoYm1ReApFekFSQmdOVkJBb1RDa3QxWW1WeWJtVjBaWE14Q3pBSkJnTlZCQXNUQWtOQk1STXdFUVlEVlFRREV3cExkV0psCmNtNWxkR1Z6TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUE3WDlQR1R4MnQ0NWgKb2NIME9tT0hHS2plN1ZkUXBCL012d0RkYkFadnplbGlqK3RXZjROUlIyNmYvaCt2aUN6bllFWGJPN251bEgvYwovL1FLd3NZak9kbnMzR3JNREsxbUNzanFTS3VvQmZxRWJZVVBHeDc4a2s5UHUzZGV0VFVRclN3djBtd1BnaDdjCmkxMU11QkVVS3UzRVR0bnpOU1AvRTlkZ0w0SHBrelNJMFNQZFpRWVd0S091RFNEYzljQ01jRGRwYVVXU2pKNVkKZ0FwdlhIMmxnTHVpYldZK0VpWEl6WFI0cm1OSklZMHRKNnlnMU9wTkhZbU82SUpHM3FBRytOYXBOb21RdElHbQpnWGJtQy9CQzkva0VjdzdFVDZPQjl0T3ZTRjFRME5LZm9TS0ExUTZjanh3RUVEblh3bHh6ZUk4eUVMOWZWVjJVCjFaUTJnRzQwZFFJREFRQUJvMll3WkRBT0JnTlZIUThCQWY4RUJBTUNBUVl3RWdZRFZSMFRBUUgvQkFnd0JnRUIKL3dJQkFqQWRCZ05WSFE0RUZnUVUzNnJOOGkrQ2hxRlI3Szdpck1kRXFCMnYrZnd3SHdZRFZSMGpCQmd3Rm9BVQozNnJOOGkrQ2hxRlI3Szdpck1kRXFCMnYrZnd3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUNhVVhxeFRXUktjCkxIakNQYXVuMFR3cFhJTk8wMC94L3o5RnBqNUlocndzVktsbytxdW9iamlWeTlWTWY3UVJsYnpWaTBIRlpJU3MKcUJNQWNITnZCeE9tdUNBZTJna0hEV0V2WTNHL1R3aksvL1IraitkOUVOMTBXME5KZEZUQlRyL056T1FVZzVZZQpQbG9zelB4MEpwaTJvTXBUQ2sxQWFoV1A5eEJnQkZWQitWYW8zeTdtb0JHb05pWjM0OHR3NDhPWHA3cEc1U2FnCkE2L0E1WXpHSjRmWU1telU2NXF4ZGROYTMrYTJYbGVZeXk4ZzRvbStmRHVLVDU2SGlaaVBRS0s4T1FXNXNwNTEKOTh4a1pQUVdQbHlmVVI1aURVN3psNHF6VDlQSzU4bUZWRFRibmYzZys5aUtUeUF2NTB2eWZsa0c1MjJzeGRpNQpURkJQUEluWjFqST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + server: https://127.0.0.1:6443 + name: kube-cluster +contexts: +- context: + cluster: kube-cluster + user: system:kube-controller-manager + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: system:kube-controller-manager + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVGekNDQXYrZ0F3SUJBZ0lVSVBFYzFNYWtaeGFrQmYyZkdwVDN1SDZhZjdZd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2FERUxNQWtHQTFVRUJoTUNWVk14RHpBTkJnTlZCQWdUQms5eVpXZHZiakVSTUE4R0ExVUVCeE1JVUc5eQpkR3hoYm1ReEV6QVJCZ05WQkFvVENrdDFZbVZ5Ym1WMFpYTXhDekFKQmdOVkJBc1RBa05CTVJNd0VRWURWUVFECkV3cExkV0psY201bGRHVnpNQjRYRFRFNE1EY3dOREF4TlRFd01Gb1hEVEU1TURjd05EQXhOVEV3TUZvd2dhRXgKQ3pBSkJnTlZCQVlUQWxWVE1ROHdEUVlEVlFRSUV3WlBjbVZuYjI0eEVUQVBCZ05WQkFjVENGQnZjblJzWVc1awpNU2N3SlFZRFZRUUtFeDV6ZVhOMFpXMDZhM1ZpWlMxamIyNTBjbTlzYkdWeUxXMWhibUZuWlhJeEhEQWFCZ05WCkJBc1RFMHQxWW1WeWJtVjBaWE1nTFNCRFpXNTBUMU14SnpBbEJnTlZCQU1USG5ONWMzUmxiVHByZFdKbExXTnYKYm5SeWIyeHNaWEl0YldGdVlXZGxjakNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQgpBT0MrOTQzaHh5amRDVzlMWlB0QjI2bit5SUx5YkJUZ1h0SGFOTXpkYmtOUHJ4Wk84M2dNeHlYc2k2T1NON0tFCjZleXFMUXV6bzhrdWU3ZEsrRmcybWsvNlhiWVdFUXJmNm5nMlBlbndLdTN4Z3FyNHBJNHRRUTc0b0drUDRxS2cKSHhqNlFOV1JDdnZ3eGc3dTlmNkhTTnpBN0o0L2NOcURLYkVuM0xkUjF0d3lncjF2Z24vclBBNldnQnViY1BTdwp5dG0zWDhadlR4MnFsWHBPKzdYek1vcUk0L3ZEUmY5ZC9ob0tuZTBMOW5kTkdkWjI4cE5aUjRWMHExbndJaVc3CkRyNU1Ecm56MVdxRnZ4Y0tnSUNKa3N3dzR3WDFVYnI2cFRpRUhjTmI4NWVSRjNCRFdOc1hvNy9iMWhwcDhQa0kKMXplcnVZZ1RhYjhacGhjUmZ3VndqVlVDQXdFQUFhTi9NSDB3RGdZRFZSMFBBUUgvQkFRREFnV2dNQjBHQTFVZApKUVFXTUJRR0NDc0dBUVVGQndNQkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwR0ExVWREZ1FXCkJCUWZ6UnJveGFOSG1Mc2x3NUdEYlNtUWtzcnNOREFmQmdOVkhTTUVHREFXZ0JUZnFzM3lMNEtHb1ZIc3J1S3MKeDBTb0hhLzUvREFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBc0dGSVgvNVlDcHZ5b090NEpoWVhMalVwbnFjLwo4MW5CcU0yQXJqQXNPeXIwaEtmanIzNk0vVmtwSE1JS3g3WGQxcWR4R0FXazYvWTg2Y3BZdjRWbWwrbFZsNGRwCnBnQStZamprVjVWRHFpNWlzTWxNckQvcWQrYTltR0dMM0Yrazh4VWhrNFZ3VWx5S0NUOUFvVTJWOExqdjRYME4KQVY1eklHR2dPTFFiL1NiV1c3L2FVTUYwODU2Y1ZvTVlHQlp6V1BLb21vOHdtRmxBK2RWZ1VPdXpZZitPcFpqcQpkaUFmcjdTR1NXSlVTc09vMjVNb3NFbUF2T3VGRG9ua0l5eTRQTUpFMFZlb2ZrZ2RkWnlhUno0SGh1Z1ZZRDFFCldlVGFpdWJ0d1Z6RHgyRkg2Zk12RjhJWXpaWXdsRzRpdzRMb01NcEJDYU1kSXN5RkFXY2hlTDBuWWc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBNEw3M2plSEhLTjBKYjB0ayswSGJxZjdJZ3ZKc0ZPQmUwZG8wek4xdVEwK3ZGazd6CmVBekhKZXlMbzVJM3NvVHA3S290QzdPanlTNTd0MHI0V0RhYVQvcGR0aFlSQ3QvcWVEWTk2ZkFxN2ZHQ3F2aWsKamkxQkR2aWdhUS9pb3FBZkdQcEExWkVLKy9ER0R1NzEvb2RJM01Ec25qOXcyb01wc1NmY3QxSFczREtDdlcrQwpmK3M4RHBhQUc1dHc5TERLMmJkZnhtOVBIYXFWZWs3N3RmTXlpb2pqKzhORi8xMytHZ3FkN1F2MmQwMFoxbmJ5CmsxbEhoWFNyV2ZBaUpic092a3dPdWZQVmFvVy9Gd3FBZ0ltU3pERGpCZlZSdXZxbE9JUWR3MXZ6bDVFWGNFTlkKMnhlanY5dldHbW53K1FqWE42dTVpQk5wdnhtbUZ4Ri9CWENOVlFJREFRQUJBb0lCQUJQYUtvblk0THZHVXBnSApJK05oM09KMlJBWDFIZ0QyRk1vYXFVRlRhZkwrTGV3ZGl6b1dmdHpsdVN2NHRYekthdE5oOE5xd3dvMzUwaW5OCnNJUXlSbzA4QTEwYk0rUXZCVW9iR0hyNm9RQWxyakltQm5zY21qdVZVSzRYSGlEWlhnWFp6VEpRUHBVa25vb2gKSjQrVkthVitVU2hsV0RXbEVtQzRzYkZyeER2cysrUVk1NjhlMGY1UnVlTWdOcHBxRERLUW0rbVVZRjd1ZllzaQp6SHpSNXI1WnRwTDRoTTJ3QXAvMEE1bnZlbmRTVDhCZzRRY1BlSXBMOHdGLzZlZTBGY08zWlZkd3ZCOUhPV0M3Ck1mTHVoaFIwTjJIYzA0UVFnMmxSVjZZbmFpZllwdFpjRzNRNmlFOWRYb21GbStvL29seEFQVkxVRFVJejdnaXIKNTNiVEVlRUNnWUVBNkprblM3M3Z5TStaU1d1MTEyMkdkNFQ5UC80bGhrczBuYytUSzM2S1dueFg4bGdSQ1Vubgp6b2REeFIvQUVSVTQ2OFJGVXI3aGttTEpIOFNNcnh2NzYzVDZVblVyMDloMG5PUDdjTndzUkc2cTRyTkErNEpnCjhIR3ROTjJ6MjRyR3hxc1FoZU9IaU1vWks1aXYwVmhaR0kvcGwyK1NJUmsyYlkvSlVnN294TzBDZ1lFQTkxdVIKZjhSTnZlNDFVbXV6MFdXSEtndUJHZVI5TENGeWMvSmJtbldvR2JhelJOV3Bxd2g1Wm5SSG1LTnFOdVNMQUM2Nwp0Z2d2WU52a3dUKzhBZnJZSll6dFpNTFBYY1cvTksrbWlPL1VodzhFUXdHTVYvTlpDR2haTUVzWlBteEdEK1Y3Cm54UjhESXhaamg4RWZEd0RiZi8wVEsybUJ6dVI3a0RpTWJUdEJRa0NnWUF2MmtwdVY3NDhOeEdYUEZTbDVIK1UKbVVTczVMRjlwbjFGdGszVjFldkszK2dNRUxnNGkvN255NFpTeU14djkwakZvSlNPaUh6SjdBN3F2VG9nVUlhTQprMzFvZDNpejFjVnYvYXRlTmIzWnVsYm4ydThVajJjNE1IcnM5R3VyVUNyQzlyQ29pdjM0VTNQSU4xcDhuRXVBCk5TTmVwM0tONHEwQzcyRUF3OVo1cFFLQmdRQ1N6VjZCMUpCTEM0ZkwzYTIvdGFETlhEeUhrR3FWZVZoQzVmQmwKc0RSTzVuWm9nT0lNWnFBZUpSNmt3VDkxU3BnUVNLd1E4YlNrcUh5UUVISlZQbmx2ZDc4Z1VObmZEbEJYUlhQeAp5MXgzdDNyUkJhYzBQM0Ywb3ZGanp2NHhaUE1EMWk5VEQwWTc4RVBxTEJCdVp2T21JZ1RzU1JiRExpQ0NwVEZaClRNWnFlUUtCZ1FDRlF0dGdTYjIvTHBPVUtBMmJRSGtKSmdNRUJkM0V2S0RESStHUThXV2VGbmtjVWNjV0JPSTUKNnF0cDlobmxQOERvZFlTMzMxYVA1R01NVW1PbzB3TWd6MVViUFVJbUh4QW9WN3NOTzBvQ20xNHBBemVqaHUvOQp0cVpzY0RkK1FNbEp6SnRkMGxZSjJCUWd1ZmlmYUREZzJIODJLdWpvMldPMTEraUx3aExkblE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= diff --git a/certs-dir/kube-controller-manager.pem b/certs-dir/kube-controller-manager.pem new file mode 100644 index 0000000..081fbcb --- /dev/null +++ b/certs-dir/kube-controller-manager.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEFzCCAv+gAwIBAgIUIPEc1MakZxakBf2fGpT3uH6af7YwDQYJKoZIhvcNAQEL +BQAwaDELMAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8GA1UEBxMIUG9y +dGxhbmQxEzARBgNVBAoTCkt1YmVybmV0ZXMxCzAJBgNVBAsTAkNBMRMwEQYDVQQD +EwpLdWJlcm5ldGVzMB4XDTE4MDcwNDAxNTEwMFoXDTE5MDcwNDAxNTEwMFowgaEx +CzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZPcmVnb24xETAPBgNVBAcTCFBvcnRsYW5k +MScwJQYDVQQKEx5zeXN0ZW06a3ViZS1jb250cm9sbGVyLW1hbmFnZXIxHDAaBgNV +BAsTE0t1YmVybmV0ZXMgLSBDZW50T1MxJzAlBgNVBAMTHnN5c3RlbTprdWJlLWNv +bnRyb2xsZXItbWFuYWdlcjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AOC+943hxyjdCW9LZPtB26n+yILybBTgXtHaNMzdbkNPrxZO83gMxyXsi6OSN7KE +6eyqLQuzo8kue7dK+Fg2mk/6XbYWEQrf6ng2PenwKu3xgqr4pI4tQQ74oGkP4qKg +Hxj6QNWRCvvwxg7u9f6HSNzA7J4/cNqDKbEn3LdR1twygr1vgn/rPA6WgBubcPSw +ytm3X8ZvTx2qlXpO+7XzMoqI4/vDRf9d/hoKne0L9ndNGdZ28pNZR4V0q1nwIiW7 +Dr5MDrnz1WqFvxcKgICJksww4wX1Ubr6pTiEHcNb85eRF3BDWNsXo7/b1hpp8PkI +1zeruYgTab8ZphcRfwVwjVUCAwEAAaN/MH0wDgYDVR0PAQH/BAQDAgWgMB0GA1Ud +JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQW +BBQfzRroxaNHmLslw5GDbSmQksrsNDAfBgNVHSMEGDAWgBTfqs3yL4KGoVHsruKs +x0SoHa/5/DANBgkqhkiG9w0BAQsFAAOCAQEAsGFIX/5YCpvyoOt4JhYXLjUpnqc/ +81nBqM2ArjAsOyr0hKfjr36M/VkpHMIKx7Xd1qdxGAWk6/Y86cpYv4Vml+lVl4dp +pgA+YjjkV5VDqi5isMlMrD/qd+a9mGGL3F+k8xUhk4VwUlyKCT9AoU2V8Ljv4X0N +AV5zIGGgOLQb/SbWW7/aUMF0856cVoMYGBZzWPKomo8wmFlA+dVgUOuzYf+OpZjq +diAfr7SGSWJUSsOo25MosEmAvOuFDonkIyy4PMJE0VeofkgddZyaRz4HhugVYD1E +WeTaiubtwVzDx2FH6fMvF8IYzZYwlG4iw4LoMMpBCaMdIsyFAWcheL0nYg== +-----END CERTIFICATE----- diff --git a/certs-dir/kube-proxy-csr.json b/certs-dir/kube-proxy-csr.json new file mode 100644 index 0000000..74fe3ef --- /dev/null +++ b/certs-dir/kube-proxy-csr.json @@ -0,0 +1,16 @@ +{ + "CN": "system:kube-proxy", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "US", + "L": "Portland", + "O": "system:node-proxier", + "OU": "Kubernetes - CentOS", + "ST": "Oregon" + } + ] +} diff --git a/certs-dir/kube-proxy-key.pem b/certs-dir/kube-proxy-key.pem new file mode 100644 index 0000000..23b3be9 --- /dev/null +++ b/certs-dir/kube-proxy-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA1/X4U8Rf+oDwjqmM3sxckI/YM7hg0RJX2YlKk+6N+b0xoacO +T2TztB5Y5oLvQ+iCj/7y1NGJIRSztCieqhhf0Vbp+IUXr0ej1zSGAPKkPIs2G+zI +GgceNr22TyKHgq0W0OphQfalROh09o7xm0Ns0QDSsuNCYcidi5PZlL1MAGet9lbo +03zjYs/fYVeD+Lz66pSH3Iub5kBmbbmTK+q+MiIMYKqaXgFXvkroeJo8U3PpFkHJ +3543ev3vGJ/et+mFMDFvgIrQewrjFqldD97Jeu6REqsFU4q2s8lVoT/qNJuV9UlS +8YSoXAgCvPxkRFjYADMucNNZh/T5Bj2g5ph73wIDAQABAoIBAQDMSSbQxpAr/NCR +gx14nERrk0ZnFmCvJPTpGI/tg9nVhfxu+AH3ZH2LwrFBzFXdHRJ0eCzo2BUpr9WV +9F6ydEAfXGIS7oNLES+RIWQpfV2Sz/UtUTywMk+IZMkR7o3VNTrCARRjO5d2qZYK +rYkq4AsNYgPtqEBhE07Ee+V0Z+R+89Yno2YwZZYf3uQagj9zCH9szpe/rdXNAXlV +Ivdu4ZaB7DPe2/kiTwB2c1z+5r1GAsyn6xmoxnLEEMK9MNOeRNjw9d3RdP4cRSZu +aKYB9ORlVVJnQ/6vN+c2CEdaZbFBGtt0jOugbs572uicVq2MrBKaLpNgRsjhz/ub +kmBYbcTBAoGBANiM8G7BHn1fnYLvDRUlVPtaK9atg0AhjsTzRVhRAWGSbp40kfkO +LYM1o460OdRTqs/kFOPCNPknWClLOL46gix6ygcwtJ1XlrG3Q0Ljw1N3VgOCLezj +/EUaOWWRfkZSM97en1DaiV/6IAI1eAr6DHXQxLB03wvMMbz8kzhslY8RAoGBAP9N +h0o+2hbeusoV2QwRvHF7Qgez1CGuJnFX2SBHWsASIf4ckfgpLwV6ZDEdqgzAjBAK +zm+NGGQd//sdD7Y11JkYcvuxrzQEWIXUYZU6E9pmTHtPVTSRlrnB//BQlYwrfuEF +bz7gr56YD+gvcojuFvfcD9B4X5kX1k5/hyjdDzvvAoGAMfXSQrsrXaGBpYOhcQ97 +28Gh+5Vsgo87cRTvGsEVoH04KCfYxa0PswXdBj9uWB41emEUsLdNoW4CWQSRwtGc +1sY9dgs2oFZk3ft5+SSzG76ciZKvJcgzm33BYD2A9Jhifzi9aNoCd7ItUFUlrOKj +nUrdFhjgnMgvcXqUPKBQoQECgYALf+qAJaSdMxsmzyUfrgzW775hqm/7tGR25oYZ +Nh2YWTi+wXqxuukrTgAFVli6ilHExzfVPn3CxLcrEHwMsRAKdVd4juBdb3StUouy +tRL/22vGgiRMYeJwK5Tjtj2GOs9UQ3JnWqeOdTsCMmDuWgsezwLHWSS1OKWmk4f7 +BzoQcQKBgQC+QTjBS8LwNAGSLpJI1wcsL2IGVv5pMFHQlsh+kEOqUMVsQIyS9GL9 +gcVQ5UnxJjN5NlPOdU0V8pz+v6UA19NAO6EGN+ssL+g8YH/2Q0lLJDYYWsM6hEEx +1bB1wWS19gnHgxjBdvmHVuVZQwnIJW9Rhmd0ipF7Y7kWKriGTLQb+A== +-----END RSA PRIVATE KEY----- diff --git a/certs-dir/kube-proxy.csr b/certs-dir/kube-proxy.csr new file mode 100644 index 0000000..596eb05 --- /dev/null +++ b/certs-dir/kube-proxy.csr @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICzzCCAbcCAQAwgYkxCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZPcmVnb24xETAP +BgNVBAcTCFBvcnRsYW5kMRwwGgYDVQQKExNzeXN0ZW06bm9kZS1wcm94aWVyMRww +GgYDVQQLExNLdWJlcm5ldGVzIC0gQ2VudE9TMRowGAYDVQQDExFzeXN0ZW06a3Vi +ZS1wcm94eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANf1+FPEX/qA +8I6pjN7MXJCP2DO4YNESV9mJSpPujfm9MaGnDk9k87QeWOaC70Pogo/+8tTRiSEU +s7QonqoYX9FW6fiFF69Ho9c0hgDypDyLNhvsyBoHHja9tk8ih4KtFtDqYUH2pUTo +dPaO8ZtDbNEA0rLjQmHInYuT2ZS9TABnrfZW6NN842LP32FXg/i8+uqUh9yLm+ZA +Zm25kyvqvjIiDGCqml4BV75K6HiaPFNz6RZByd+eN3r97xif3rfphTAxb4CK0HsK +4xapXQ/eyXrukRKrBVOKtrPJVaE/6jSblfVJUvGEqFwIArz8ZERY2AAzLnDTWYf0 ++QY9oOaYe98CAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4IBAQAVXZ1J6b/v4UnhkWOq +kk4qsqeJ2gt15TZUok2K0Rc8CnYLuJxoruaPsA+k3kBc2B2jSMovwR91jgnobdJh +LVF0u4BGnq6d3BGqFntqXgQMDa5FwrFIEUFUWKbvx1S+XajisdTxyPPjuGvcN4fJ +Q+b1vkG0Esld33BspD3uhcpM3AYasOf6n6D/NR1AF9mjRjHFFBu3Pi9LpheLGiLh +rDxmfCDvMf1rykiLbjaTFA+Qw5cW14zXrajJl3BfWMgrR4WP3RnoUheov9FiOJbN +NU/KUGa8DTzdk5SybkLcYjhzzkPdpJaZ+9Xy0PBlVdOkRhpld3xG6imrLB9l06Gj +Opo6 +-----END CERTIFICATE REQUEST----- diff --git a/certs-dir/kube-proxy.kubeconfig b/certs-dir/kube-proxy.kubeconfig new file mode 100644 index 0000000..cf85be7 --- /dev/null +++ b/certs-dir/kube-proxy.kubeconfig @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR4RENDQXF5Z0F3SUJBZ0lVS2k0Rm5vaytkOG1OLytUdHR0a1VMd2QwbW53d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2FERUxNQWtHQTFVRUJoTUNWVk14RHpBTkJnTlZCQWdUQms5eVpXZHZiakVSTUE4R0ExVUVCeE1JVUc5eQpkR3hoYm1ReEV6QVJCZ05WQkFvVENrdDFZbVZ5Ym1WMFpYTXhDekFKQmdOVkJBc1RBa05CTVJNd0VRWURWUVFECkV3cExkV0psY201bGRHVnpNQjRYRFRFNE1EY3dOREF4TlRFd01Gb1hEVEl6TURjd016QXhOVEV3TUZvd2FERUwKTUFrR0ExVUVCaE1DVlZNeER6QU5CZ05WQkFnVEJrOXlaV2R2YmpFUk1BOEdBMVVFQnhNSVVHOXlkR3hoYm1ReApFekFSQmdOVkJBb1RDa3QxWW1WeWJtVjBaWE14Q3pBSkJnTlZCQXNUQWtOQk1STXdFUVlEVlFRREV3cExkV0psCmNtNWxkR1Z6TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUE3WDlQR1R4MnQ0NWgKb2NIME9tT0hHS2plN1ZkUXBCL012d0RkYkFadnplbGlqK3RXZjROUlIyNmYvaCt2aUN6bllFWGJPN251bEgvYwovL1FLd3NZak9kbnMzR3JNREsxbUNzanFTS3VvQmZxRWJZVVBHeDc4a2s5UHUzZGV0VFVRclN3djBtd1BnaDdjCmkxMU11QkVVS3UzRVR0bnpOU1AvRTlkZ0w0SHBrelNJMFNQZFpRWVd0S091RFNEYzljQ01jRGRwYVVXU2pKNVkKZ0FwdlhIMmxnTHVpYldZK0VpWEl6WFI0cm1OSklZMHRKNnlnMU9wTkhZbU82SUpHM3FBRytOYXBOb21RdElHbQpnWGJtQy9CQzkva0VjdzdFVDZPQjl0T3ZTRjFRME5LZm9TS0ExUTZjanh3RUVEblh3bHh6ZUk4eUVMOWZWVjJVCjFaUTJnRzQwZFFJREFRQUJvMll3WkRBT0JnTlZIUThCQWY4RUJBTUNBUVl3RWdZRFZSMFRBUUgvQkFnd0JnRUIKL3dJQkFqQWRCZ05WSFE0RUZnUVUzNnJOOGkrQ2hxRlI3Szdpck1kRXFCMnYrZnd3SHdZRFZSMGpCQmd3Rm9BVQozNnJOOGkrQ2hxRlI3Szdpck1kRXFCMnYrZnd3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUNhVVhxeFRXUktjCkxIakNQYXVuMFR3cFhJTk8wMC94L3o5RnBqNUlocndzVktsbytxdW9iamlWeTlWTWY3UVJsYnpWaTBIRlpJU3MKcUJNQWNITnZCeE9tdUNBZTJna0hEV0V2WTNHL1R3aksvL1IraitkOUVOMTBXME5KZEZUQlRyL056T1FVZzVZZQpQbG9zelB4MEpwaTJvTXBUQ2sxQWFoV1A5eEJnQkZWQitWYW8zeTdtb0JHb05pWjM0OHR3NDhPWHA3cEc1U2FnCkE2L0E1WXpHSjRmWU1telU2NXF4ZGROYTMrYTJYbGVZeXk4ZzRvbStmRHVLVDU2SGlaaVBRS0s4T1FXNXNwNTEKOTh4a1pQUVdQbHlmVVI1aURVN3psNHF6VDlQSzU4bUZWRFRibmYzZys5aUtUeUF2NTB2eWZsa0c1MjJzeGRpNQpURkJQUEluWjFqST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + server: https://:6443 + name: kube-cluster +contexts: +- context: + cluster: kube-cluster + user: system:kube-proxy + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: system:kube-proxy + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQvekNDQXVlZ0F3SUJBZ0lVWHFuTmY4UzY0Q2lsNEtMOEpIQXkwclVzYktRd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2FERUxNQWtHQTFVRUJoTUNWVk14RHpBTkJnTlZCQWdUQms5eVpXZHZiakVSTUE4R0ExVUVCeE1JVUc5eQpkR3hoYm1ReEV6QVJCZ05WQkFvVENrdDFZbVZ5Ym1WMFpYTXhDekFKQmdOVkJBc1RBa05CTVJNd0VRWURWUVFECkV3cExkV0psY201bGRHVnpNQjRYRFRFNE1EY3dOREF4TlRFd01Gb1hEVEU1TURjd05EQXhOVEV3TUZvd2dZa3gKQ3pBSkJnTlZCQVlUQWxWVE1ROHdEUVlEVlFRSUV3WlBjbVZuYjI0eEVUQVBCZ05WQkFjVENGQnZjblJzWVc1awpNUnd3R2dZRFZRUUtFeE56ZVhOMFpXMDZibTlrWlMxd2NtOTRhV1Z5TVJ3d0dnWURWUVFMRXhOTGRXSmxjbTVsCmRHVnpJQzBnUTJWdWRFOVRNUm93R0FZRFZRUURFeEZ6ZVhOMFpXMDZhM1ZpWlMxd2NtOTRlVENDQVNJd0RRWUoKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTmYxK0ZQRVgvcUE4STZwak43TVhKQ1AyRE80WU5FUwpWOW1KU3BQdWpmbTlNYUduRGs5azg3UWVXT2FDNzBQb2dvLys4dFRSaVNFVXM3UW9ucW9ZWDlGVzZmaUZGNjlICm85YzBoZ0R5cER5TE5odnN5Qm9ISGphOXRrOGloNEt0RnREcVlVSDJwVVRvZFBhTzhadERiTkVBMHJMalFtSEkKbll1VDJaUzlUQUJucmZaVzZOTjg0MkxQMzJGWGcvaTgrdXFVaDl5TG0rWkFabTI1a3l2cXZqSWlER0NxbWw0QgpWNzVLNkhpYVBGTno2UlpCeWQrZU4zcjk3eGlmM3JmcGhUQXhiNENLMEhzSzR4YXBYUS9leVhydWtSS3JCVk9LCnRyUEpWYUUvNmpTYmxmVkpVdkdFcUZ3SUFyejhaRVJZMkFBekxuRFRXWWYwK1FZOW9PYVllOThDQXdFQUFhTi8KTUgwd0RnWURWUjBQQVFIL0JBUURBZ1dnTUIwR0ExVWRKUVFXTUJRR0NDc0dBUVVGQndNQkJnZ3JCZ0VGQlFjRApBakFNQmdOVkhSTUJBZjhFQWpBQU1CMEdBMVVkRGdRV0JCVGFKcEZzUW4wSTFOVjAzVklsaGMxMGlJUHB3ekFmCkJnTlZIU01FR0RBV2dCVGZxczN5TDRLR29WSHNydUtzeDBTb0hhLzUvREFOQmdrcWhraUc5dzBCQVFzRkFBT0MKQVFFQXhkNm9ZbnpDV2dFWG1RZkllQWh6T3BYNnVFbXFqajU2amZPQ1RGd3dnYlByQmtEM2NkSm1Galg4WlVpUwp2TDFHYUxjaDVvYmlDV0FiTGlSVGsxUTBUTFFhTjJCSUZQcDc5ajRYV3JlaFYycmJkY1Q1TjR6MkZzRkhmcGgrCmE0Q3V4c2g3ZHNoOXRHV1JxeVorYUhDdWNNQjl1NnBkV3pId1pYdmY2aVF6OEhJOGhzd21JWUhwSEhYbjZwK0oKa3Y4SVJPWVl0Wk9uaThtY0Z1RnJ5VEtjU20veEVuMDFMd3F3UktwUVE0cFJhTHcrUCs3bTZWVUMyeVNVckl5YwpKczBiS0dHcFluUjRmL1k4UnVRSjZzaHIzV1VKa2x6Tm41d3dLSVV4S3JpNGpjRWpXcXQrQXpseWpCNHhHb1FQCkZNdU0xZ3E3SndLbUVUZGZmb04rdm5WZDdBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBMS9YNFU4UmYrb0R3anFtTTNzeGNrSS9ZTTdoZzBSSlgyWWxLays2TitiMHhvYWNPClQyVHp0QjVZNW9MdlEraUNqLzd5MU5HSklSU3p0Q2llcWhoZjBWYnArSVVYcjBlajF6U0dBUEtrUElzMkcrekkKR2djZU5yMjJUeUtIZ3EwVzBPcGhRZmFsUk9oMDlvN3htME5zMFFEU3N1TkNZY2lkaTVQWmxMMU1BR2V0OWxibwowM3pqWXMvZllWZUQrTHo2NnBTSDNJdWI1a0JtYmJtVEsrcStNaUlNWUtxYVhnRlh2a3JvZUpvOFUzUHBGa0hKCjM1NDNldjN2R0ovZXQrbUZNREZ2Z0lyUWV3cmpGcWxkRDk3SmV1NlJFcXNGVTRxMnM4bFZvVC9xTkp1VjlVbFMKOFlTb1hBZ0N2UHhrUkZqWUFETXVjTk5aaC9UNUJqMmc1cGg3M3dJREFRQUJBb0lCQVFETVNTYlF4cEFyL05DUgpneDE0bkVScmswWm5GbUN2SlBUcEdJL3RnOW5WaGZ4dStBSDNaSDJMd3JGQnpGWGRIUkowZUN6bzJCVXByOVdWCjlGNnlkRUFmWEdJUzdvTkxFUytSSVdRcGZWMlN6L1V0VVR5d01rK0laTWtSN28zVk5UckNBUlJqTzVkMnFaWUsKcllrcTRBc05ZZ1B0cUVCaEUwN0VlK1YwWitSKzg5WW5vMll3WlpZZjN1UWFnajl6Q0g5c3pwZS9yZFhOQVhsVgpJdmR1NFphQjdEUGUyL2tpVHdCMmMxeis1cjFHQXN5bjZ4bW94bkxFRU1LOU1OT2VSTmp3OWQzUmRQNGNSU1p1CmFLWUI5T1JsVlZKblEvNnZOK2MyQ0VkYVpiRkJHdHQwak91Z2JzNTcydWljVnEyTXJCS2FMcE5nUnNqaHovdWIKa21CWWJjVEJBb0dCQU5pTThHN0JIbjFmbllMdkRSVWxWUHRhSzlhdGcwQWhqc1R6UlZoUkFXR1NicDQwa2ZrTwpMWU0xbzQ2ME9kUlRxcy9rRk9QQ05Qa25XQ2xMT0w0NmdpeDZ5Z2N3dEoxWGxyRzNRMExqdzFOM1ZnT0NMZXpqCi9FVWFPV1dSZmtaU005N2VuMURhaVYvNklBSTFlQXI2REhYUXhMQjAzd3ZNTWJ6OGt6aHNsWThSQW9HQkFQOU4KaDBvKzJoYmV1c29WMlF3UnZIRjdRZ2V6MUNHdUpuRlgyU0JIV3NBU0lmNGNrZmdwTHdWNlpERWRxZ3pBakJBSwp6bStOR0dRZC8vc2REN1kxMUprWWN2dXhyelFFV0lYVVlaVTZFOXBtVEh0UFZUU1Jscm5CLy9CUWxZd3JmdUVGCmJ6N2dyNTZZRCtndmNvanVGdmZjRDlCNFg1a1gxazUvaHlqZER6dnZBb0dBTWZYU1Fyc3JYYUdCcFlPaGNROTcKMjhHaCs1VnNnbzg3Y1JUdkdzRVZvSDA0S0NmWXhhMFBzd1hkQmo5dVdCNDFlbUVVc0xkTm9XNENXUVNSd3RHYwoxc1k5ZGdzMm9GWmszZnQ1K1NTekc3NmNpWkt2SmNnem0zM0JZRDJBOUpoaWZ6aTlhTm9DZDdJdFVGVWxyT0tqCm5VcmRGaGpnbk1ndmNYcVVQS0JRb1FFQ2dZQUxmK3FBSmFTZE14c216eVVmcmd6Vzc3NWhxbS83dEdSMjVvWVoKTmgyWVdUaSt3WHF4dXVrclRnQUZWbGk2aWxIRXh6ZlZQbjNDeExjckVId01zUkFLZFZkNGp1QmRiM1N0VW91eQp0UkwvMjJ2R2dpUk1ZZUp3SzVUanRqMkdPczlVUTNKbldxZU9kVHNDTW1EdVdnc2V6d0xIV1NTMU9LV21rNGY3CkJ6b1FjUUtCZ1FDK1FUakJTOEx3TkFHU0xwSkkxd2NzTDJJR1Z2NXBNRkhRbHNoK2tFT3FVTVZzUUl5UzlHTDkKZ2NWUTVVbnhKak41TmxQT2RVMFY4cHordjZVQTE5TkFPNkVHTitzc0wrZzhZSC8yUTBsTEpEWVlXc002aEVFeAoxYkIxd1dTMTlnbkhneGpCZHZtSFZ1VlpRd25JSlc5UmhtZDBpcEY3WTdrV0tyaUdUTFFiK0E9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= diff --git a/certs-dir/kube-proxy.pem b/certs-dir/kube-proxy.pem new file mode 100644 index 0000000..b612560 --- /dev/null +++ b/certs-dir/kube-proxy.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID/zCCAuegAwIBAgIUXqnNf8S64Cil4KL8JHAy0rUsbKQwDQYJKoZIhvcNAQEL +BQAwaDELMAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8GA1UEBxMIUG9y +dGxhbmQxEzARBgNVBAoTCkt1YmVybmV0ZXMxCzAJBgNVBAsTAkNBMRMwEQYDVQQD +EwpLdWJlcm5ldGVzMB4XDTE4MDcwNDAxNTEwMFoXDTE5MDcwNDAxNTEwMFowgYkx +CzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZPcmVnb24xETAPBgNVBAcTCFBvcnRsYW5k +MRwwGgYDVQQKExNzeXN0ZW06bm9kZS1wcm94aWVyMRwwGgYDVQQLExNLdWJlcm5l +dGVzIC0gQ2VudE9TMRowGAYDVQQDExFzeXN0ZW06a3ViZS1wcm94eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBANf1+FPEX/qA8I6pjN7MXJCP2DO4YNES +V9mJSpPujfm9MaGnDk9k87QeWOaC70Pogo/+8tTRiSEUs7QonqoYX9FW6fiFF69H +o9c0hgDypDyLNhvsyBoHHja9tk8ih4KtFtDqYUH2pUTodPaO8ZtDbNEA0rLjQmHI +nYuT2ZS9TABnrfZW6NN842LP32FXg/i8+uqUh9yLm+ZAZm25kyvqvjIiDGCqml4B +V75K6HiaPFNz6RZByd+eN3r97xif3rfphTAxb4CK0HsK4xapXQ/eyXrukRKrBVOK +trPJVaE/6jSblfVJUvGEqFwIArz8ZERY2AAzLnDTWYf0+QY9oOaYe98CAwEAAaN/ +MH0wDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD +AjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBTaJpFsQn0I1NV03VIlhc10iIPpwzAf +BgNVHSMEGDAWgBTfqs3yL4KGoVHsruKsx0SoHa/5/DANBgkqhkiG9w0BAQsFAAOC +AQEAxd6oYnzCWgEXmQfIeAhzOpX6uEmqjj56jfOCTFwwgbPrBkD3cdJmFjX8ZUiS +vL1GaLch5obiCWAbLiRTk1Q0TLQaN2BIFPp79j4XWrehV2rbdcT5N4z2FsFHfph+ +a4Cuxsh7dsh9tGWRqyZ+aHCucMB9u6pdWzHwZXvf6iQz8HI8hswmIYHpHHXn6p+J +kv8IROYYtZOni8mcFuFryTKcSm/xEn01LwqwRKpQQ4pRaLw+P+7m6VUC2ySUrIyc +Js0bKGGpYnR4f/Y8RuQJ6shr3WUJklzNn5wwKIUxKri4jcEjWqt+AzlyjB4xGoQP +FMuM1gq7JwKmETdffoN+vnVd7A== +-----END CERTIFICATE----- diff --git a/certs-dir/kube-scheduler-csr.json b/certs-dir/kube-scheduler-csr.json new file mode 100644 index 0000000..025c533 --- /dev/null +++ b/certs-dir/kube-scheduler-csr.json @@ -0,0 +1,16 @@ +{ + "CN": "system:kube-scheduler", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "US", + "L": "Portland", + "O": "system:kube-scheduler", + "OU": "Kubernetes - CentOS", + "ST": "Oregon" + } + ] +} diff --git a/certs-dir/kube-scheduler-key.pem b/certs-dir/kube-scheduler-key.pem new file mode 100644 index 0000000..db093b0 --- /dev/null +++ b/certs-dir/kube-scheduler-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAr7wRBULEE61hUDCaaAvpFYiFT+2YFbR/mtAnqikGzeaa2XoZ +rGdIUKbZOOkYlfW1cGMSuGR8td0GehDphouL7la+mYzasIjBIGBi3i/YWb1CAKwC +m2uEuhotqjPbP6pHK2fbNV4Fgsq2/x/l3WV3TTf20cNf67eRrKN/5evdBPRkNrmN +2if4O1WZMkOuUdP30bKCdWEM1cv7VFFnJuPADdluiotaPTES9G/iXKNJ6E0SCRTo +EoFmq+H1/WBM18g7L5qWxAkM1ZGu1ZbHZnRaT6XMW+4tE2vTxGvu3ID5McbR1zU4 +wi2pLqf/iUEbkT8U0E+mIVdk0vtuiU9zyDNj+wIDAQABAoIBAB31DWG8M/i2wbQN +1QSPozhMgY1Sj26Dyoz1lw1m2QYYnaUdL2s4YBd/meAdvFcz/64c9YABgVDvUL9W +Srjo6P3EQ9GhNVaKuYFvbOs7dMI3hRzgNcGQ3ptrAwXUuTdtK5y2o6PBV2cob3Gm +WmUXevLq62Ruzdc8/9RSOudTAeZqCp1oeW7v+EzZMRARqo4DS+KJM/kVSw9n9YdV +0+yxDQURm4QYKAQ9UthcBeY64GqeqMmh5X1+RTOiHMPWRNhrkAtbxFk3MgQ5nlFJ +1kOscbdh4pGc5Nz5WEzFDB6g/SsCz/clt4f+GPeLRzrfoy2adh6Fx7p8iQe1Pc6s +kleKdwECgYEA3EgHZ9ThmW7cLqgC9mjcoWBtF6VNWgNc4qSvtJ34IQEE16b1bxYT +ntIgw2mZJmAgcmeBzv2kpv4K5XWclhzHADTied6M2wChJNfQNuEb+jKz/G8QVQKT +2AJg2azlkQm57B7XfY6rST235G2WebLQMKFCjsIM6IPoiqfxFpdrr2ECgYEAzDri +YxP/5yqqCumDqPonx/biGC1DimKK2OiQ2W6AKMNaNoSLSKJ0pB/EwfTyVrv7arzr +M34uuHxzykaeNcxRmoLtR/P6PdosJmJpis2xV94rDbbNA492zAwp2/+8f/zJ081R +NFVio6N7RTioBNSuDMP/SoLZHbZpYVyX/lJ93NsCgYEAyFbqBVDjbPwaXISJTTnJ +jyt3jE3Wr8rnK2nEM2yIQtv+X3OFPGbDqWpKSe88Tl6Wo/XnZS3iFc5ucooow42g +n1t5roTtbjCAXlW5FHBfVYnPkAIixG33sDlZhB9vGh3SbiEOsy6SMaZkHOheNoie +N9wyAEwUGVP+pjxfU8CbO4ECgYEAxN6Scp3J1kXbSs+VOiOCi5Eim9muPM0HOVbe +oqqRYjoGUX1tYqQvujJhkfKUVSW6k0bvl2CwmuhlGfz+7684jrBdGTpH5K3WV14Q +jVwA80nXyM2JTUBTbRjglREt3VaKG+DUwZT5k/K4lp6p4JY//VBGXMZLkwfPB6qh +XeiZKXcCgYAX/CVgcRJ6UZUbPc8DOgkMKIJvGogKxtR4cZFTt1G6fERdwE3pBSbq +4qvzd4n1XlqLzatJ1NzIkIQN15009/YGrv5JrDYbRCgLnMcKHe1/lvv/TaMay8QO +ulvkXaGtxlZyZSPhEe5H2EiEoveG1klDlx+uW/uGTJWtkBy/r2sTRA== +-----END RSA PRIVATE KEY----- diff --git a/certs-dir/kube-scheduler.csr b/certs-dir/kube-scheduler.csr new file mode 100644 index 0000000..14a6862 --- /dev/null +++ b/certs-dir/kube-scheduler.csr @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIC1TCCAb0CAQAwgY8xCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZPcmVnb24xETAP +BgNVBAcTCFBvcnRsYW5kMR4wHAYDVQQKExVzeXN0ZW06a3ViZS1zY2hlZHVsZXIx +HDAaBgNVBAsTE0t1YmVybmV0ZXMgLSBDZW50T1MxHjAcBgNVBAMTFXN5c3RlbTpr +dWJlLXNjaGVkdWxlcjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK+8 +EQVCxBOtYVAwmmgL6RWIhU/tmBW0f5rQJ6opBs3mmtl6GaxnSFCm2TjpGJX1tXBj +ErhkfLXdBnoQ6YaLi+5WvpmM2rCIwSBgYt4v2Fm9QgCsAptrhLoaLaoz2z+qRytn +2zVeBYLKtv8f5d1ld0039tHDX+u3kayjf+Xr3QT0ZDa5jdon+DtVmTJDrlHT99Gy +gnVhDNXL+1RRZybjwA3ZboqLWj0xEvRv4lyjSehNEgkU6BKBZqvh9f1gTNfIOy+a +lsQJDNWRrtWWx2Z0Wk+lzFvuLRNr08Rr7tyA+THG0dc1OMItqS6n/4lBG5E/FNBP +piFXZNL7bolPc8gzY/sCAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4IBAQCZu7jNPSkf +q2TBwTZKDyUpXa09Cr39QV1SCBUh4x41byri14gZSLkvCtVW/vBrff4K4VApdX1C +RdqYZu8vmn4mDGaFCgN76+q9lRM56yK3Xx8Y/iJhPwVHFLkttDMtVyJuttf3Mzea +Iv2jN8gVaHet+XdZPRKRTLzGm9azUA4fWA3MnFAcCxuEwbAf9g9EwppFYrhKpEvc +MbsOR9zzqHc0JqS9Ss6pccW4sWR4K/m2xz8WZDvjFyyE5qDarTorbxzleJdiABbj +w6QD1JJTjhNexh3bb04zFgjWaBSzxp5y5GtF9E2Rtg36j1j7uCtO5+i2Zt6grXDK +zJS0P1DWCUmm +-----END CERTIFICATE REQUEST----- diff --git a/certs-dir/kube-scheduler.kubeconfig b/certs-dir/kube-scheduler.kubeconfig new file mode 100644 index 0000000..da955d2 --- /dev/null +++ b/certs-dir/kube-scheduler.kubeconfig @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR4RENDQXF5Z0F3SUJBZ0lVS2k0Rm5vaytkOG1OLytUdHR0a1VMd2QwbW53d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2FERUxNQWtHQTFVRUJoTUNWVk14RHpBTkJnTlZCQWdUQms5eVpXZHZiakVSTUE4R0ExVUVCeE1JVUc5eQpkR3hoYm1ReEV6QVJCZ05WQkFvVENrdDFZbVZ5Ym1WMFpYTXhDekFKQmdOVkJBc1RBa05CTVJNd0VRWURWUVFECkV3cExkV0psY201bGRHVnpNQjRYRFRFNE1EY3dOREF4TlRFd01Gb1hEVEl6TURjd016QXhOVEV3TUZvd2FERUwKTUFrR0ExVUVCaE1DVlZNeER6QU5CZ05WQkFnVEJrOXlaV2R2YmpFUk1BOEdBMVVFQnhNSVVHOXlkR3hoYm1ReApFekFSQmdOVkJBb1RDa3QxWW1WeWJtVjBaWE14Q3pBSkJnTlZCQXNUQWtOQk1STXdFUVlEVlFRREV3cExkV0psCmNtNWxkR1Z6TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUE3WDlQR1R4MnQ0NWgKb2NIME9tT0hHS2plN1ZkUXBCL012d0RkYkFadnplbGlqK3RXZjROUlIyNmYvaCt2aUN6bllFWGJPN251bEgvYwovL1FLd3NZak9kbnMzR3JNREsxbUNzanFTS3VvQmZxRWJZVVBHeDc4a2s5UHUzZGV0VFVRclN3djBtd1BnaDdjCmkxMU11QkVVS3UzRVR0bnpOU1AvRTlkZ0w0SHBrelNJMFNQZFpRWVd0S091RFNEYzljQ01jRGRwYVVXU2pKNVkKZ0FwdlhIMmxnTHVpYldZK0VpWEl6WFI0cm1OSklZMHRKNnlnMU9wTkhZbU82SUpHM3FBRytOYXBOb21RdElHbQpnWGJtQy9CQzkva0VjdzdFVDZPQjl0T3ZTRjFRME5LZm9TS0ExUTZjanh3RUVEblh3bHh6ZUk4eUVMOWZWVjJVCjFaUTJnRzQwZFFJREFRQUJvMll3WkRBT0JnTlZIUThCQWY4RUJBTUNBUVl3RWdZRFZSMFRBUUgvQkFnd0JnRUIKL3dJQkFqQWRCZ05WSFE0RUZnUVUzNnJOOGkrQ2hxRlI3Szdpck1kRXFCMnYrZnd3SHdZRFZSMGpCQmd3Rm9BVQozNnJOOGkrQ2hxRlI3Szdpck1kRXFCMnYrZnd3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUNhVVhxeFRXUktjCkxIakNQYXVuMFR3cFhJTk8wMC94L3o5RnBqNUlocndzVktsbytxdW9iamlWeTlWTWY3UVJsYnpWaTBIRlpJU3MKcUJNQWNITnZCeE9tdUNBZTJna0hEV0V2WTNHL1R3aksvL1IraitkOUVOMTBXME5KZEZUQlRyL056T1FVZzVZZQpQbG9zelB4MEpwaTJvTXBUQ2sxQWFoV1A5eEJnQkZWQitWYW8zeTdtb0JHb05pWjM0OHR3NDhPWHA3cEc1U2FnCkE2L0E1WXpHSjRmWU1telU2NXF4ZGROYTMrYTJYbGVZeXk4ZzRvbStmRHVLVDU2SGlaaVBRS0s4T1FXNXNwNTEKOTh4a1pQUVdQbHlmVVI1aURVN3psNHF6VDlQSzU4bUZWRFRibmYzZys5aUtUeUF2NTB2eWZsa0c1MjJzeGRpNQpURkJQUEluWjFqST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + server: https://127.0.0.1:6443 + name: kube-cluster +contexts: +- context: + cluster: kube-cluster + user: system:kube-scheduler + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: system:kube-scheduler + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVCVENDQXUyZ0F3SUJBZ0lVYkVkU2RFUFAwajBqWURveXJmV2hsdU9sOWo0d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2FERUxNQWtHQTFVRUJoTUNWVk14RHpBTkJnTlZCQWdUQms5eVpXZHZiakVSTUE4R0ExVUVCeE1JVUc5eQpkR3hoYm1ReEV6QVJCZ05WQkFvVENrdDFZbVZ5Ym1WMFpYTXhDekFKQmdOVkJBc1RBa05CTVJNd0VRWURWUVFECkV3cExkV0psY201bGRHVnpNQjRYRFRFNE1EY3dOREF4TlRFd01Gb1hEVEU1TURjd05EQXhOVEV3TUZvd2dZOHgKQ3pBSkJnTlZCQVlUQWxWVE1ROHdEUVlEVlFRSUV3WlBjbVZuYjI0eEVUQVBCZ05WQkFjVENGQnZjblJzWVc1awpNUjR3SEFZRFZRUUtFeFZ6ZVhOMFpXMDZhM1ZpWlMxelkyaGxaSFZzWlhJeEhEQWFCZ05WQkFzVEUwdDFZbVZ5CmJtVjBaWE1nTFNCRFpXNTBUMU14SGpBY0JnTlZCQU1URlhONWMzUmxiVHByZFdKbExYTmphR1ZrZFd4bGNqQ0MKQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFLKzhFUVZDeEJPdFlWQXdtbWdMNlJXSQpoVS90bUJXMGY1clFKNm9wQnMzbW10bDZHYXhuU0ZDbTJUanBHSlgxdFhCakVyaGtmTFhkQm5vUTZZYUxpKzVXCnZwbU0yckNJd1NCZ1l0NHYyRm05UWdDc0FwdHJoTG9hTGFvejJ6K3FSeXRuMnpWZUJZTEt0djhmNWQxbGQwMDMKOXRIRFgrdTNrYXlqZitYcjNRVDBaRGE1amRvbitEdFZtVEpEcmxIVDk5R3lnblZoRE5YTCsxUlJaeWJqd0EzWgpib3FMV2oweEV2UnY0bHlqU2VoTkVna1U2QktCWnF2aDlmMWdUTmZJT3krYWxzUUpETldScnRXV3gyWjBXaytsCnpGdnVMUk5yMDhScjd0eUErVEhHMGRjMU9NSXRxUzZuLzRsQkc1RS9GTkJQcGlGWFpOTDdib2xQYzhnelkvc0MKQXdFQUFhTi9NSDB3RGdZRFZSMFBBUUgvQkFRREFnV2dNQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CQmdncgpCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwR0ExVWREZ1FXQkJTcDMrT3hzUTR6ZWdzNEZBYWc3THVSCmVLVkhQekFmQmdOVkhTTUVHREFXZ0JUZnFzM3lMNEtHb1ZIc3J1S3N4MFNvSGEvNS9EQU5CZ2txaGtpRzl3MEIKQVFzRkFBT0NBUUVBT1VKWElkYmxkTmVwMDI1WjNXQ1dNdGZ3YTltcytodzR4dkJNZDBtOC9VQzFZRkNBalgyZQpGOHF3aEZjd20rNFM1bGVvaFVoMUozdUY2L3N0czFkVFFwWVdPNzRKTTJ5d3VmY2gvaWh0THlCUlVMM2Uvb1NaCmVKeXhKdkR2Z0VIWHUxL1h1UzRvVVpmMmtVTFZweXdCaUxzVmR3WVd4WnlIMHJScFV3b0FkbDNRNHd2MmxKN2YKRVJ1NFAxcnNUQWtFMlNEb3ROczR3d0FraGJINVM0enlOT0MrUE8waE9pSUhUZ1FPVXFkZUZKZ2tDVTdIUFhhTAplb3VPbzlhc3JRbUJZMXNkSVkvUFJCTkQ2WWRVYURZWlp1QnNoQllGV3ZpZWY5N2lhUFp3VWR0Si9xenNSRFRTCjNYRWV1WDBya0tnN05OOVEwWjJ2RzF0MXUxTkU4V3Q0YkE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBcjd3UkJVTEVFNjFoVURDYWFBdnBGWWlGVCsyWUZiUi9tdEFucWlrR3plYWEyWG9aCnJHZElVS2JaT09rWWxmVzFjR01TdUdSOHRkMEdlaERwaG91TDdsYSttWXphc0lqQklHQmkzaS9ZV2IxQ0FLd0MKbTJ1RXVob3RxalBiUDZwSEsyZmJOVjRGZ3NxMi94L2wzV1YzVFRmMjBjTmY2N2VScktOLzVldmRCUFJrTnJtTgoyaWY0TzFXWk1rT3VVZFAzMGJLQ2RXRU0xY3Y3VkZGbkp1UEFEZGx1aW90YVBURVM5Ry9pWEtOSjZFMFNDUlRvCkVvRm1xK0gxL1dCTTE4ZzdMNXFXeEFrTTFaR3UxWmJIWm5SYVQ2WE1XKzR0RTJ2VHhHdnUzSUQ1TWNiUjF6VTQKd2kycExxZi9pVUVia1Q4VTBFK21JVmRrMHZ0dWlVOXp5RE5qK3dJREFRQUJBb0lCQUIzMURXRzhNL2kyd2JRTgoxUVNQb3poTWdZMVNqMjZEeW96MWx3MW0yUVlZbmFVZEwyczRZQmQvbWVBZHZGY3ovNjRjOVlBQmdWRHZVTDlXClNyam82UDNFUTlHaE5WYUt1WUZ2Yk9zN2RNSTNoUnpnTmNHUTNwdHJBd1hVdVRkdEs1eTJvNlBCVjJjb2IzR20KV21VWGV2THE2MlJ1emRjOC85UlNPdWRUQWVacUNwMW9lVzd2K0V6Wk1SQVJxbzREUytLSk0va1ZTdzluOVlkVgowK3l4RFFVUm00UVlLQVE5VXRoY0JlWTY0R3FlcU1taDVYMStSVE9pSE1QV1JOaHJrQXRieEZrM01nUTVubEZKCjFrT3NjYmRoNHBHYzVOejVXRXpGREI2Zy9Tc0N6L2NsdDRmK0dQZUxSenJmb3kyYWRoNkZ4N3A4aVFlMVBjNnMKa2xlS2R3RUNnWUVBM0VnSFo5VGhtVzdjTHFnQzltamNvV0J0RjZWTldnTmM0cVN2dEozNElRRUUxNmIxYnhZVApudElndzJtWkptQWdjbWVCenYya3B2NEs1WFdjbGh6SEFEVGllZDZNMndDaEpOZlFOdUViK2pLei9HOFFWUUtUCjJBSmcyYXpsa1FtNTdCN1hmWTZyU1QyMzVHMldlYkxRTUtGQ2pzSU02SVBvaXFmeEZwZHJyMkVDZ1lFQXpEcmkKWXhQLzV5cXFDdW1EcVBvbngvYmlHQzFEaW1LSzJPaVEyVzZBS01OYU5vU0xTS0owcEIvRXdmVHlWcnY3YXJ6cgpNMzR1dUh4enlrYWVOY3hSbW9MdFIvUDZQZG9zSm1KcGlzMnhWOTRyRGJiTkE0OTJ6QXdwMi8rOGYvekowODFSCk5GVmlvNk43UlRpb0JOU3VETVAvU29MWkhiWnBZVnlYL2xKOTNOc0NnWUVBeUZicUJWRGpiUHdhWElTSlRUbkoKanl0M2pFM1dyOHJuSzJuRU0yeUlRdHYrWDNPRlBHYkRxV3BLU2U4OFRsNldvL1huWlMzaUZjNXVjb29vdzQyZwpuMXQ1cm9UdGJqQ0FYbFc1RkhCZlZZblBrQUlpeEczM3NEbFpoQjl2R2gzU2JpRU9zeTZTTWFaa0hPaGVOb2llCk45d3lBRXdVR1ZQK3BqeGZVOENiTzRFQ2dZRUF4TjZTY3AzSjFrWGJTcytWT2lPQ2k1RWltOW11UE0wSE9WYmUKb3FxUllqb0dVWDF0WXFRdnVqSmhrZktVVlNXNmswYnZsMkN3bXVobEdmeis3Njg0anJCZEdUcEg1SzNXVjE0UQpqVndBODBuWHlNMkpUVUJUYlJqZ2xSRXQzVmFLRytEVXdaVDVrL0s0bHA2cDRKWS8vVkJHWE1aTGt3ZlBCNnFoClhlaVpLWGNDZ1lBWC9DVmdjUko2VVpVYlBjOERPZ2tNS0lKdkdvZ0t4dFI0Y1pGVHQxRzZmRVJkd0UzcEJTYnEKNHF2emQ0bjFYbHFMemF0SjFOeklrSVFOMTUwMDkvWUdydjVKckRZYlJDZ0xuTWNLSGUxL2x2di9UYU1heThRTwp1bHZrWGFHdHhsWnlaU1BoRWU1SDJFaUVvdmVHMWtsRGx4K3VXL3VHVEpXdGtCeS9yMnNUUkE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= diff --git a/certs-dir/kube-scheduler.pem b/certs-dir/kube-scheduler.pem new file mode 100644 index 0000000..e1cd2fd --- /dev/null +++ b/certs-dir/kube-scheduler.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEBTCCAu2gAwIBAgIUbEdSdEPP0j0jYDoyrfWhluOl9j4wDQYJKoZIhvcNAQEL +BQAwaDELMAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8GA1UEBxMIUG9y +dGxhbmQxEzARBgNVBAoTCkt1YmVybmV0ZXMxCzAJBgNVBAsTAkNBMRMwEQYDVQQD +EwpLdWJlcm5ldGVzMB4XDTE4MDcwNDAxNTEwMFoXDTE5MDcwNDAxNTEwMFowgY8x +CzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZPcmVnb24xETAPBgNVBAcTCFBvcnRsYW5k +MR4wHAYDVQQKExVzeXN0ZW06a3ViZS1zY2hlZHVsZXIxHDAaBgNVBAsTE0t1YmVy +bmV0ZXMgLSBDZW50T1MxHjAcBgNVBAMTFXN5c3RlbTprdWJlLXNjaGVkdWxlcjCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK+8EQVCxBOtYVAwmmgL6RWI +hU/tmBW0f5rQJ6opBs3mmtl6GaxnSFCm2TjpGJX1tXBjErhkfLXdBnoQ6YaLi+5W +vpmM2rCIwSBgYt4v2Fm9QgCsAptrhLoaLaoz2z+qRytn2zVeBYLKtv8f5d1ld003 +9tHDX+u3kayjf+Xr3QT0ZDa5jdon+DtVmTJDrlHT99GygnVhDNXL+1RRZybjwA3Z +boqLWj0xEvRv4lyjSehNEgkU6BKBZqvh9f1gTNfIOy+alsQJDNWRrtWWx2Z0Wk+l +zFvuLRNr08Rr7tyA+THG0dc1OMItqS6n/4lBG5E/FNBPpiFXZNL7bolPc8gzY/sC +AwEAAaN/MH0wDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr +BgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBSp3+OxsQ4zegs4FAag7LuR +eKVHPzAfBgNVHSMEGDAWgBTfqs3yL4KGoVHsruKsx0SoHa/5/DANBgkqhkiG9w0B +AQsFAAOCAQEAOUJXIdbldNep025Z3WCWMtfwa9ms+hw4xvBMd0m8/UC1YFCAjX2e +F8qwhFcwm+4S5leohUh1J3uF6/sts1dTQpYWO74JM2ywufch/ihtLyBRUL3e/oSZ +eJyxJvDvgEHXu1/XuS4oUZf2kULVpywBiLsVdwYWxZyH0rRpUwoAdl3Q4wv2lJ7f +ERu4P1rsTAkE2SDotNs4wwAkhbH5S4zyNOC+PO0hOiIHTgQOUqdeFJgkCU7HPXaL +eouOo9asrQmBY1sdIY/PRBND6YdUaDYZZuBshBYFWvief97iaPZwUdtJ/qzsRDTS +3XEeuX0rkKg7NN9Q0Z2vG1t1u1NE8Wt4bA== +-----END CERTIFICATE----- diff --git a/certs-dir/kubernetes-csr.json b/certs-dir/kubernetes-csr.json new file mode 100644 index 0000000..34b592b --- /dev/null +++ b/certs-dir/kubernetes-csr.json @@ -0,0 +1,16 @@ +{ + "CN": "kubernetes", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "US", + "L": "Portland", + "O": "Kubernetes", + "OU": "Kubernetes - CentOS", + "ST": "Oregon" + } + ] +} diff --git a/certs-dir/kubernetes-key.pem b/certs-dir/kubernetes-key.pem new file mode 100644 index 0000000..8cd1ec9 --- /dev/null +++ b/certs-dir/kubernetes-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAy85wXKLOv+pNzOxqX/MxI6gkedi1JXpj7c3sxHlFiaVrFLBT ++Yz1GkDMWLs0Us/4UsrDu57PnIW1SuLv2UDTNOopWm05Mm7Af70QBbqWxbzqqaF5 +GE2V7aZZebXrEUr+E9/WngpWKP0E3hGqLBwyKmhyfGsKvhqkIinZqkbXhIxnbDkN +ya9zq46CwAUMjgUVP5khdq8hhvfjAPmnrLSUDY7iOiT2+S5dDuu8qs53B+2xdiMT +KQHLjb2YSrzE8BO+ZZcDfvZL3A+OiyYInh1IfdGIEnuccnyuMU40108rKd9nscLd +uSGWISMMvrNWoSJ+Mn5vDB1NcJFWj42JxspRvQIDAQABAoIBAQCLH2viQJ0DsuzT +A1uGDhuwLHph1sWpRMfsMFL8t4/5azsSj19zjX5PIoab14bbNO5djd5ZOAqe3UMp +UIrL8g17JJGAEekmfSXYzPWgA4QijVhe3UQti+DZrmGLzNwfHOMeLAF0NrgkGLv2 +33Yb0E+KVWw5dFSL+uGqFIlqTBuYdYpJXtI2W4ZsqGepuT/0IBxOpOBhgbbJKAFZ +eQh1iWHKe+JnUDs28n/TJL5ceQ9Mkdu2oWcokGwN7estD5BBkdb4tv5QBbr4xMAN +5ORDNXCXeg93Cn3PuU73lk4AYusons3IcsqDuLGTSAyAvpbtWp8nMhLbSsya4JM6 +zuMyOcqBAoGBAM6+vPzaICmNMm2nMZO2IYQ8/Vuk9TcLAXpbZIVnDEe6mrJZA9Fa +Cci1lPoSeCXwe+g959tMFySsm0M5/ZdarycSzDFPZeFxqqhvnpLd2SoNQ8IAVmXz +VcBnYai9JbiL+T3w37OYAaDh/Ltdlpj9m9/Nna7x5j5GVDgoQepRT3zdAoGBAPxc +eRNlNl8XA2DyHW9e5rp5wgJb/RsdBM8aOXIA+phIdgEnynqtaQl8t4Ks5SXCK1wR +r12YoPHXCMr8U0UtDSaYk4mUq/l3IL80FAitVSTcUCOIfTqapa4oMqjewMtpscdP +jCyDPpaDe3HNosJ67bE0VA+ScvA1gr3k59koEuphAoGBAMFzko0aYVvgBd66wlYP +5x7AnuWA3f6qKrDeEZoOCVyYzCWzyzG6iUsoNYUlgQ0nEBoIywnVEha79/AYtCRK +s3fhS/Wb8J3Z6TEAeKW+NnT1woAmJrHw1j1ZvvTQQBqFxythV0DKLEPpEb4UXVU8 +gP0Sfch0CtgcoLOHX94sRS91AoGAeFpFp9lSJ+FazCZLoWyaDv64ZtWmDQM24uo3 +0UXkPTH5jS2D5CPFfy/KePipw+spMJyCjNmYkB2kZOIXeFs29Vw4qJmNtsh4hpd4 +ona/r3eehIO4bXWJZFhpaJ3rRlde4JXwXN4uwCtueOTAJY3ow+UX3eFTDXoHozaC +187DzqECgYB+Nq+Y3C7gXfpMv0JHayeV6j3rXQd4na1tFb8skCUOWqfC91raJiik +wiTH2na3hC2eFkcg+Egg9+CG0ZaPIEEXslZ4FupMA0txdZAHrV68DrZMRQGQAFc4 +wBi362aVVxtZer5NYeSGl2lNPf4er2F+y6YzUkrHaSuKdcmlaijKUw== +-----END RSA PRIVATE KEY----- diff --git a/certs-dir/kubernetes.csr b/certs-dir/kubernetes.csr new file mode 100644 index 0000000..8f945fe --- /dev/null +++ b/certs-dir/kubernetes.csr @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICvjCCAaYCAQAweTELMAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8G +A1UEBxMIUG9ydGxhbmQxEzARBgNVBAoTCkt1YmVybmV0ZXMxHDAaBgNVBAsTE0t1 +YmVybmV0ZXMgLSBDZW50T1MxEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLznBcos6/6k3M7Gpf8zEjqCR52LUlemPt +zezEeUWJpWsUsFP5jPUaQMxYuzRSz/hSysO7ns+chbVK4u/ZQNM06ilabTkybsB/ +vRAFupbFvOqpoXkYTZXtpll5tesRSv4T39aeClYo/QTeEaosHDIqaHJ8awq+GqQi +KdmqRteEjGdsOQ3Jr3OrjoLABQyOBRU/mSF2ryGG9+MA+aestJQNjuI6JPb5Ll0O +67yqzncH7bF2IxMpAcuNvZhKvMTwE75llwN+9kvcD46LJgieHUh90YgSe5xyfK4x +TjTXTysp32exwt25IZYhIwy+s1ahIn4yfm8MHU1wkVaPjYnGylG9AgMBAAGgADAN +BgkqhkiG9w0BAQsFAAOCAQEAR7n+yC+uXE49FZ/+NdwGpXWJVh5Bc2CYJ5sbwe3a +YtQu/4X5XQLKCGzWuBW+Q7taw800jrc8D2zNC4VtlLIoKH6D5phiO2pwq/4g8cl4 +ax4LRXoOrbcDdR6+WMCtPHmod7EKoSIKrzYSiL3Axcil06e3SOSi82cqa2q05akE +FwR5VXAgIr7RvVNeADn7t130x6HHap3AcGlqRrK4UuNDWizOsblsri/sx0F05a5C +qDmcSnoLttF3pIasJm13XJzcJ/b7jDFDJcJDntbMb0mTS7NTEWxHqEsZ/VfU+Jmh +2Ml5S0HuaC1Z4Ws+qMY4AhWbZBzxzVDy1zqGflUdR/SI4A== +-----END CERTIFICATE REQUEST----- diff --git a/certs-dir/kubernetes.pem b/certs-dir/kubernetes.pem new file mode 100644 index 0000000..09f96ed --- /dev/null +++ b/certs-dir/kubernetes.pem @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEKTCCAxGgAwIBAgIUfS7PAfCmBij80ccLBuoOXpzKHbgwDQYJKoZIhvcNAQEL +BQAwaDELMAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8GA1UEBxMIUG9y +dGxhbmQxEzARBgNVBAoTCkt1YmVybmV0ZXMxCzAJBgNVBAsTAkNBMRMwEQYDVQQD +EwpLdWJlcm5ldGVzMB4XDTE4MDcwNDAxNTEwMFoXDTE5MDcwNDAxNTEwMFoweTEL +MAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8GA1UEBxMIUG9ydGxhbmQx +EzARBgNVBAoTCkt1YmVybmV0ZXMxHDAaBgNVBAsTE0t1YmVybmV0ZXMgLSBDZW50 +T1MxEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDLznBcos6/6k3M7Gpf8zEjqCR52LUlemPtzezEeUWJpWsUsFP5jPUa +QMxYuzRSz/hSysO7ns+chbVK4u/ZQNM06ilabTkybsB/vRAFupbFvOqpoXkYTZXt +pll5tesRSv4T39aeClYo/QTeEaosHDIqaHJ8awq+GqQiKdmqRteEjGdsOQ3Jr3Or +joLABQyOBRU/mSF2ryGG9+MA+aestJQNjuI6JPb5Ll0O67yqzncH7bF2IxMpAcuN +vZhKvMTwE75llwN+9kvcD46LJgieHUh90YgSe5xyfK4xTjTXTysp32exwt25IZYh +Iwy+s1ahIn4yfm8MHU1wkVaPjYnGylG9AgMBAAGjgbkwgbYwDgYDVR0PAQH/BAQD +AgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA +MB0GA1UdDgQWBBSjYtUJ8CtwIZgMJwPsivXwjfkuMTAfBgNVHSMEGDAWgBTfqs3y +L4KGoVHsruKsx0SoHa/5/DA3BgNVHREEMDAuggCCEmt1YmVybmV0ZXMuZGVmYXVs +dIcECiAAAYcECvAACocECvAAC4cEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAcNh4 +lI4xCNpKjorfL4yl9ATQuwF70LlUftubB/n4jQudonD+dGV5N5RtihJp6NPJ0b00 ++EaPEF5W9W01zrecAB+WnN+ihQ6//AEXfxuakP6KgTMQTKqZ6XgbGqe6/08stW/j +Lhl4LJPhNotBQ9QXgFrHgTX4TvAV1/Y8luwIIwjCLSMb4Mu6SJ3Lsf1NZ9aUfKT6 +ZqE/SjK5E25MwjqBr4gmMoqzRGQv4cKng/XzXyHhWKwEDXZ5el1/4dALnWSkImIq +btOEjBLQq4MXPvVbowHtNpPOJTlTvzpT+jNzCCGQcLLIAsAYeYTPKHWHGNtwpoqP +eZ31Scj6x5ckP0QOrw== +-----END CERTIFICATE----- diff --git a/certs-dir/service-account-csr.json b/certs-dir/service-account-csr.json new file mode 100644 index 0000000..462d9c6 --- /dev/null +++ b/certs-dir/service-account-csr.json @@ -0,0 +1,16 @@ +{ + "CN": "service-accounts", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "US", + "L": "Portland", + "O": "Kubernetes", + "OU": "Kubernetes - CentOS", + "ST": "Oregon" + } + ] +} diff --git a/certs-dir/service-account-key.pem b/certs-dir/service-account-key.pem new file mode 100644 index 0000000..28bd10f --- /dev/null +++ b/certs-dir/service-account-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAtx0Z2dZbcff823ZRbT4tu3zpYIUeaE81MikMdt/Jh2Y9XhRQ +P2Jsof9uRJ1zqEIZPM3Tn9/SAWh+Jolmsg4Py4eizyGKxAFhMqcWKa+lTwZH09Ln +Cy6e9Ee96wK9kslTjCfziMhXdL6nBAWhtgOLSUkSTgLkL2gl/HHBxW66bKlIn+EY +8GHamTS9yPqCvurMgVoJaBLltbdvSWyjkzIgaslbCtoJ7myXlpMQgjPWW90iIUlv +bld1+nx7eaoJghj2Weqv1hGHc0z/qiDgMVoFFQ7BtE0pu7ctMQ/fIjh9bkKVo7Xo +5swZOJl0wd9Ey20CZPNCKXZSeG4J+gwgqllJiwIDAQABAoIBAGUUkl2Wn5oVC6bV +9HL6XOJfPLDEKxnnh3TuBexk8mlkvcF+MyIaId93EhacV+jsu+O9tvFM6y9N0D1T +obXuXze54ty2JlS2ckI0y94tEHLEA8hhZprcnHA6XhMpyx4JNq5qq1u5Q9W1HJry +xOalpDDLPmiD3hRxSG7HGPD0g8JQ7TXdhnp6qguvQJj1U45LT8nWSmfmBmV9QYaq +1zjkssuYgBARl3Dg7BoGcqGpuktVnPAe2y8dhvLfG1gQiZpn5XGq8efkJ0ZjeML+ +BzEho/jBTUhBzpec4yB0xM75o0Jj1IWd0cjPLhNXFN1jkFYOKdogrOiVLeAaZMPV +MNhsj6ECgYEA2CwHzDkC1PYvzrbE5yq2Lh/Zv1W3Vp4b17jUpBm8b1ISRqyXcHRF +43LhQX74luPUDUFaAun9xR+N7aMRiSFkVKQfPGv/JKd/ac1dasOaI19r/cKut/P0 +qFAye/vjAHrzvwBtjEDnXNVEJKkv1jlA00a6d7t5IrIkLLmQhJNJ2BECgYEA2NnX +eLeBLspKbawMk1oTE4rclYShTRLXPszNtT7pLCUC/yncyfX0talFNfcXz12K2ivm +p3hSmZQP5zmhL7yGPYp046VbV3LtIWzhnd5mDdzqHZxpjp7Tt2L/wU8eYhTZXXWz +oQWkt51qIUTjVOL7CigIcCB9EQ3kU7x11KWRQ9sCgYEArhH+kXfTDPPVGWrZcaBW +JRwmtkjqokgc+X/vUtkagLWg2+I87CPk5ztpodkBileZOLdIKRKM87/uBM6L31XR +OcsTEA+S28w48FSbhc5Of29mQWgUUzKR7zrs/COf7pITVtmcXRUieQGOpsrKJ4Hg +7oCExMdkywiR0qX2dps4GlECgYBSoM8lxGaI+bPajlBMZWoXbDYZEuyTfbZey5Mm +mM4ocAHwUHc8zWdtQQ6KPZjo/u/Fqiy9a2+t7WkR5jexBNjx4iwgAgYIEFajAeYG +TZP9fEwI9GyUV5i8rBBgDFG7dc5Nemfz1z6oaGbkPmeRFNwrRo3Qi9CcCvsRnSIY +4O21OQKBgQCpzzWS4J46jPjaay8O4lvfVbn5u8PoobcTHQlwra/BHvctD3CSDERA +iu5do7YnHqhLgQmYYnjWePJrrOKifVKx3LIBbYSXAbBot3v5Qw5rdIfv5VKCbmq8 +uPOn3BiuuZQw9e30ZMxXUj3hNyWLqN6fcBodIqcq6R54E2tSgP2gHg== +-----END RSA PRIVATE KEY----- diff --git a/certs-dir/service-account.csr b/certs-dir/service-account.csr new file mode 100644 index 0000000..762635e --- /dev/null +++ b/certs-dir/service-account.csr @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICxDCCAawCAQAwfzELMAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8G +A1UEBxMIUG9ydGxhbmQxEzARBgNVBAoTCkt1YmVybmV0ZXMxHDAaBgNVBAsTE0t1 +YmVybmV0ZXMgLSBDZW50T1MxGTAXBgNVBAMTEHNlcnZpY2UtYWNjb3VudHMwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3HRnZ1ltx9/zbdlFtPi27fOlg +hR5oTzUyKQx238mHZj1eFFA/Ymyh/25EnXOoQhk8zdOf39IBaH4miWayDg/Lh6LP +IYrEAWEypxYpr6VPBkfT0ucLLp70R73rAr2SyVOMJ/OIyFd0vqcEBaG2A4tJSRJO +AuQvaCX8ccHFbrpsqUif4RjwYdqZNL3I+oK+6syBWgloEuW1t29JbKOTMiBqyVsK +2gnubJeWkxCCM9Zb3SIhSW9uV3X6fHt5qgmCGPZZ6q/WEYdzTP+qIOAxWgUVDsG0 +TSm7ty0xD98iOH1uQpWjtejmzBk4mXTB30TLbQJk80IpdlJ4bgn6DCCqWUmLAgMB +AAGgADANBgkqhkiG9w0BAQsFAAOCAQEAVCq69W5ik/fQN8v9p+3BeW8YXDqu3fLB +JQmcs+vkisbimseYiGqIMki4rt4PzdgTyh+CeYnlgB7V7BPd4Iwwn0hIZpQHgs++ +gk0Dw1b8sApraNBv7innGV0K7qeKNiWGk1hDPfGlteUaMQzI+AUBoYt8u4GDLulY +XqnR90g0wrHYbu+iqOZgdP/HrTBpMKskkB62sekgixRGMsrIuyRm3jTepJqf+lly +IyiD9k9E1BVSPT7SZUrbBbSuhkaE1gIEuZdsCYVgYpP7pKXaBvxprqF/SIhl2H/5 +I8dotCPcnBydkcr+HUkddP3E5W8khowt1pypL6P4/5+JxRz13oyz/A== +-----END CERTIFICATE REQUEST----- diff --git a/certs-dir/service-account.pem b/certs-dir/service-account.pem new file mode 100644 index 0000000..bb246b4 --- /dev/null +++ b/certs-dir/service-account.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID9DCCAtygAwIBAgIUZ2y4xZXmHejF4V/wupN82PtnOpkwDQYJKoZIhvcNAQEL +BQAwaDELMAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8GA1UEBxMIUG9y +dGxhbmQxEzARBgNVBAoTCkt1YmVybmV0ZXMxCzAJBgNVBAsTAkNBMRMwEQYDVQQD +EwpLdWJlcm5ldGVzMB4XDTE4MDcwNDAxNTEwMFoXDTE5MDcwNDAxNTEwMFowfzEL +MAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8GA1UEBxMIUG9ydGxhbmQx +EzARBgNVBAoTCkt1YmVybmV0ZXMxHDAaBgNVBAsTE0t1YmVybmV0ZXMgLSBDZW50 +T1MxGTAXBgNVBAMTEHNlcnZpY2UtYWNjb3VudHMwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQC3HRnZ1ltx9/zbdlFtPi27fOlghR5oTzUyKQx238mHZj1e +FFA/Ymyh/25EnXOoQhk8zdOf39IBaH4miWayDg/Lh6LPIYrEAWEypxYpr6VPBkfT +0ucLLp70R73rAr2SyVOMJ/OIyFd0vqcEBaG2A4tJSRJOAuQvaCX8ccHFbrpsqUif +4RjwYdqZNL3I+oK+6syBWgloEuW1t29JbKOTMiBqyVsK2gnubJeWkxCCM9Zb3SIh +SW9uV3X6fHt5qgmCGPZZ6q/WEYdzTP+qIOAxWgUVDsG0TSm7ty0xD98iOH1uQpWj +tejmzBk4mXTB30TLbQJk80IpdlJ4bgn6DCCqWUmLAgMBAAGjfzB9MA4GA1UdDwEB +/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/ +BAIwADAdBgNVHQ4EFgQULvG/qjHA5aCxOiwZ9OnXLeW68OQwHwYDVR0jBBgwFoAU +36rN8i+ChqFR7K7irMdEqB2v+fwwDQYJKoZIhvcNAQELBQADggEBACxsoOIYYfyC +MPgDK7CrSIpaa3+dtbXrNOmSfuZIt+/SyARRK8c3H/TBPtlUJvYBDRwh2Dy85/+/ +InLfPR0dAKBY6EGNJ6mH+Yc4FJUXk6/kqbPc1zWg1oonCO9H1px1E1IZe0TAF6gK +0cGNtzR6JtxlSWqOemwFRDCKLvjdGrB5JC/OarJuYfu2tWUmwNk7Mtxr/PKGe9yv +TCpUh+pGJmHLnKE4B+ncG0IPI4sBAMc1MlfFLEX59Sbn9+ofem/3ue2UFt7DMD0V +n0ZgeHbNMrBfLOiBFjQPNh4QuXb2s0qsVXe8RpOc85qoIgrc30tSNKV0rqyQ3LV2 +Au20tqMPajc= +-----END CERTIFICATE----- diff --git a/certs-dir/worker-0-csr.json b/certs-dir/worker-0-csr.json new file mode 100644 index 0000000..2262a70 --- /dev/null +++ b/certs-dir/worker-0-csr.json @@ -0,0 +1,16 @@ +{ + "CN": "system:node:worker-0", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "US", + "L": "Portland", + "O": "system:nodes", + "OU": "Kubernetes - CentOS", + "ST": "Oregon" + } + ] +} diff --git a/certs-dir/worker-0-key.pem b/certs-dir/worker-0-key.pem new file mode 100644 index 0000000..3104bb7 --- /dev/null +++ b/certs-dir/worker-0-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEArmaKsGNE1WRu4XUdLBU3FogoiBD9DwQI/HKSluyIUdo94YiD ++/ehP6CmjlloQen3+MIp15ZicVrniAv5zmcMcv5iPRnV9sF8XBSS6jJS3aqh2n6F +n0BanGpSwrJJd/WcOEnnqVtryQGKHL2jPUC95xehPbhOsY1fpk124oJKoiLlEomI +R0RPaGOf24zRgpU7aueC5u5lG+147C2IMtpPxfWcGUYHmIdaU9qvcQz5/YlGSBBe +YaaiRSTb6m2Y9IXR2d6h95JnatbWcuO8U9Mhmq1uhhm+ukA2u4Jrd7NMLwEC/TG+ +hCzK3Avi2WFp5fqkXRaeJQAUFoS588XZTHVCBQIDAQABAoIBAF0zz8qUQqUIF3nF +7OEyNU6FVaPh8Ful/Gdtk4m0vuV/jLKLEn4FyNcIM/q/LUiMkUkP+uyMIfASdMnw +HICB8dVCvZ2JwR8uKBfNpvYiDscH+ZebJi4FZmdRKwmB3mGNkP9D/9oyUVgKypSx +hBuoDdUCe9lQNj4NH6DwpShIGTqsKMxnLpRgz1Ura3M275xZGR3xqQI5/t9+V2m3 +Rcpgr9GN9lvlGah7V38KLckOctkY4knNrv4TL+CQWfqVTxzwzJuxijoQJMnfZcSQ +EAth1vCULjjzW0BuGJou4ko+i+wayJZik0JV4fCpgNEg3owWYj3LhL3QEPrFoVhP ++3xQf8ECgYEAwvb4+KMZvqOeYQygh96zDVICGfzcgKgHsTLMfZl7dQx0u5QcEjHi +u6+DuwnZrXJRgXgMHWq8QDiIVxcyPOEmjc4Oa9bHY1v51BXIO78MMoi7//PFWRbv +BuocK+XtyImIAFqy++hds6KpRQWwjF0WRruXPAhOMG/ItHbP9TSLLWcCgYEA5P9/ +Shblp+F2ERdp3pvAG69UdcAQP0CT3abLO/BzIP6E6ZHxyGS8PlFLSHciBURg2fXo +eQOHsYv3bXjz80YC7Ho547YtYLmTzVttcwxmyQtQDUqg963GL6P3/eUIXReup8Hf +Z/AP2U+FksGNMs/165Nc+1z7OYd1/2v+SJRChbMCgYEAmFf30AFSep7LgTYmskSY +UM5GChyGCJpD+HijRXmK+LlU9cYT8Tu92b6aBqr5xwhPHAW8IbhnhR8nmjXr8ZpV +T/U1W5qmjsMuAkVLAzSTyWEC1kubZ7P97r88aCjwlwSwmuZEeYbmwRlXfzGk9OlA +IOEwmZ47yqlugWVdtlLqdf0CgYAQ/8pyNIrt0HXURPmt9b/oDjbl2tEZ8PBJFvWl +SG8zdtFlCVl6d8fllGFTSsyyuWW7KF8OhK2RB4Phk4YAfCUBav1bIw3xTTVknDHe +xX/n0rgplG2olTTOGozxQwxA5N4pQ+IStCZpClcvEsEpkXh++VyAqGFZ3u1qfZjN +yHu7swKBgQCngawlrXp+OZye7PIsvVmg1bE81Gsc9FCEZHIsEGUwBp868rfl+0ay +RJbEvYYkabkKuIJP6zoaHfozLqpB+dQX4xdpv8A7B3vyRbOHWlNLwdMdf/Is1W1f +fQXkX9DzPfMIUkcozmQjZyRCHVEanlqwhmstLl8/0LpMKmJu7xuA5A== +-----END RSA PRIVATE KEY----- diff --git a/certs-dir/worker-0.csr b/certs-dir/worker-0.csr new file mode 100644 index 0000000..ceaf7a0 --- /dev/null +++ b/certs-dir/worker-0.csr @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICyzCCAbMCAQAwgYUxCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZPcmVnb24xETAP +BgNVBAcTCFBvcnRsYW5kMRUwEwYDVQQKEwxzeXN0ZW06bm9kZXMxHDAaBgNVBAsT +E0t1YmVybmV0ZXMgLSBDZW50T1MxHTAbBgNVBAMTFHN5c3RlbTpub2RlOndvcmtl +ci0wMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArmaKsGNE1WRu4XUd +LBU3FogoiBD9DwQI/HKSluyIUdo94YiD+/ehP6CmjlloQen3+MIp15ZicVrniAv5 +zmcMcv5iPRnV9sF8XBSS6jJS3aqh2n6Fn0BanGpSwrJJd/WcOEnnqVtryQGKHL2j +PUC95xehPbhOsY1fpk124oJKoiLlEomIR0RPaGOf24zRgpU7aueC5u5lG+147C2I +MtpPxfWcGUYHmIdaU9qvcQz5/YlGSBBeYaaiRSTb6m2Y9IXR2d6h95JnatbWcuO8 +U9Mhmq1uhhm+ukA2u4Jrd7NMLwEC/TG+hCzK3Avi2WFp5fqkXRaeJQAUFoS588XZ +THVCBQIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBADa7iOhoCYIiTbXM/6Ob188m +6GTfi7NI12wt407jx/tRr/rIorOwH1BhkUmL79sPRA5tlrEnvBjzCU4epbjH4cjX +2NzSnFkHOV9Sf7VaBMoTrHfsA/wg7TWrXJV+UoY2Y5+2nyB4KXKaENnSIaBzZ8pD +RAFENV5/6n0o2Kk38qRa2pNz6lUdjW4xQ8w4Fs7ERvQCW1Vm005/pcCM0RsB4jgD +4VsG2K3v07njEHoqXqyU/bMFiFJnzjsuIfiM5De4tX80DJgCrm9ItY8Yea4qwPbu +lYmcvVb2nQXghAny69eFKGmTpn5bwCyFLBIZ1c7or3SdqxL2TYEu7IqkwvyKcvU= +-----END CERTIFICATE REQUEST----- diff --git a/certs-dir/worker-0.kubeconfig b/certs-dir/worker-0.kubeconfig new file mode 100644 index 0000000..96dd89f --- /dev/null +++ b/certs-dir/worker-0.kubeconfig @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR4RENDQXF5Z0F3SUJBZ0lVS2k0Rm5vaytkOG1OLytUdHR0a1VMd2QwbW53d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2FERUxNQWtHQTFVRUJoTUNWVk14RHpBTkJnTlZCQWdUQms5eVpXZHZiakVSTUE4R0ExVUVCeE1JVUc5eQpkR3hoYm1ReEV6QVJCZ05WQkFvVENrdDFZbVZ5Ym1WMFpYTXhDekFKQmdOVkJBc1RBa05CTVJNd0VRWURWUVFECkV3cExkV0psY201bGRHVnpNQjRYRFRFNE1EY3dOREF4TlRFd01Gb1hEVEl6TURjd016QXhOVEV3TUZvd2FERUwKTUFrR0ExVUVCaE1DVlZNeER6QU5CZ05WQkFnVEJrOXlaV2R2YmpFUk1BOEdBMVVFQnhNSVVHOXlkR3hoYm1ReApFekFSQmdOVkJBb1RDa3QxWW1WeWJtVjBaWE14Q3pBSkJnTlZCQXNUQWtOQk1STXdFUVlEVlFRREV3cExkV0psCmNtNWxkR1Z6TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUE3WDlQR1R4MnQ0NWgKb2NIME9tT0hHS2plN1ZkUXBCL012d0RkYkFadnplbGlqK3RXZjROUlIyNmYvaCt2aUN6bllFWGJPN251bEgvYwovL1FLd3NZak9kbnMzR3JNREsxbUNzanFTS3VvQmZxRWJZVVBHeDc4a2s5UHUzZGV0VFVRclN3djBtd1BnaDdjCmkxMU11QkVVS3UzRVR0bnpOU1AvRTlkZ0w0SHBrelNJMFNQZFpRWVd0S091RFNEYzljQ01jRGRwYVVXU2pKNVkKZ0FwdlhIMmxnTHVpYldZK0VpWEl6WFI0cm1OSklZMHRKNnlnMU9wTkhZbU82SUpHM3FBRytOYXBOb21RdElHbQpnWGJtQy9CQzkva0VjdzdFVDZPQjl0T3ZTRjFRME5LZm9TS0ExUTZjanh3RUVEblh3bHh6ZUk4eUVMOWZWVjJVCjFaUTJnRzQwZFFJREFRQUJvMll3WkRBT0JnTlZIUThCQWY4RUJBTUNBUVl3RWdZRFZSMFRBUUgvQkFnd0JnRUIKL3dJQkFqQWRCZ05WSFE0RUZnUVUzNnJOOGkrQ2hxRlI3Szdpck1kRXFCMnYrZnd3SHdZRFZSMGpCQmd3Rm9BVQozNnJOOGkrQ2hxRlI3Szdpck1kRXFCMnYrZnd3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUNhVVhxeFRXUktjCkxIakNQYXVuMFR3cFhJTk8wMC94L3o5RnBqNUlocndzVktsbytxdW9iamlWeTlWTWY3UVJsYnpWaTBIRlpJU3MKcUJNQWNITnZCeE9tdUNBZTJna0hEV0V2WTNHL1R3aksvL1IraitkOUVOMTBXME5KZEZUQlRyL056T1FVZzVZZQpQbG9zelB4MEpwaTJvTXBUQ2sxQWFoV1A5eEJnQkZWQitWYW8zeTdtb0JHb05pWjM0OHR3NDhPWHA3cEc1U2FnCkE2L0E1WXpHSjRmWU1telU2NXF4ZGROYTMrYTJYbGVZeXk4ZzRvbStmRHVLVDU2SGlaaVBRS0s4T1FXNXNwNTEKOTh4a1pQUVdQbHlmVVI1aURVN3psNHF6VDlQSzU4bUZWRFRibmYzZys5aUtUeUF2NTB2eWZsa0c1MjJzeGRpNQpURkJQUEluWjFqST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + server: https://:6443 + name: kube-cluster +contexts: +- context: + cluster: kube-cluster + user: system:node:worker-0 + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: system:node:worker-0 + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVGakNDQXY2Z0F3SUJBZ0lVVkpVUUFoUklaYURTT08rUnEwemNqVVNGR3ZVd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2FERUxNQWtHQTFVRUJoTUNWVk14RHpBTkJnTlZCQWdUQms5eVpXZHZiakVSTUE4R0ExVUVCeE1JVUc5eQpkR3hoYm1ReEV6QVJCZ05WQkFvVENrdDFZbVZ5Ym1WMFpYTXhDekFKQmdOVkJBc1RBa05CTVJNd0VRWURWUVFECkV3cExkV0psY201bGRHVnpNQjRYRFRFNE1EY3dOREF4TlRFd01Gb1hEVEU1TURjd05EQXhOVEV3TUZvd2dZVXgKQ3pBSkJnTlZCQVlUQWxWVE1ROHdEUVlEVlFRSUV3WlBjbVZuYjI0eEVUQVBCZ05WQkFjVENGQnZjblJzWVc1awpNUlV3RXdZRFZRUUtFd3h6ZVhOMFpXMDZibTlrWlhNeEhEQWFCZ05WQkFzVEUwdDFZbVZ5Ym1WMFpYTWdMU0JEClpXNTBUMU14SFRBYkJnTlZCQU1URkhONWMzUmxiVHB1YjJSbE9uZHZjbXRsY2kwd01JSUJJakFOQmdrcWhraUcKOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXJtYUtzR05FMVdSdTRYVWRMQlUzRm9nb2lCRDlEd1FJL0hLUwpsdXlJVWRvOTRZaUQrL2VoUDZDbWpsbG9RZW4zK01JcDE1WmljVnJuaUF2NXptY01jdjVpUFJuVjlzRjhYQlNTCjZqSlMzYXFoMm42Rm4wQmFuR3BTd3JKSmQvV2NPRW5ucVZ0cnlRR0tITDJqUFVDOTV4ZWhQYmhPc1kxZnBrMTIKNG9KS29pTGxFb21JUjBSUGFHT2YyNHpSZ3BVN2F1ZUM1dTVsRysxNDdDMklNdHBQeGZXY0dVWUhtSWRhVTlxdgpjUXo1L1lsR1NCQmVZYWFpUlNUYjZtMlk5SVhSMmQ2aDk1Sm5hdGJXY3VPOFU5TWhtcTF1aGhtK3VrQTJ1NEpyCmQ3Tk1Md0VDL1RHK2hDekszQXZpMldGcDVmcWtYUmFlSlFBVUZvUzU4OFhaVEhWQ0JRSURBUUFCbzRHWk1JR1cKTUE0R0ExVWREd0VCL3dRRUF3SUZvREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJdwpEQVlEVlIwVEFRSC9CQUl3QURBZEJnTlZIUTRFRmdRVWVBOHBIeEE4WGRSaXVlQXg4RG5WR094Z1ZORXdId1lEClZSMGpCQmd3Rm9BVTM2ck44aStDaHFGUjdLN2lyTWRFcUIyditmd3dGd1lEVlIwUkJCQXdEb0lJZDI5eWEyVnkKTFRDQ0FJSUFNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUIxbUdQdjJITFk5bWZjd2lqaHhTdHFmcFBCVVNxcwozS2NJOEZmaEpMaTVzWVpQSzJsS0lTNVpEaDNUUmwrSU1ETkhNNEt2enczakM2cDlmUHh5aGVnbHRDb2NNYkFBClhkWVBJNktaNlArMDhCbld4eXIwYnBlODM0QWNxTmhuYkhpbTBXdHdRb2xPUmUxMDZuWnpWaS9FRFNOVVdiblEKMGIzZ09uVXNnKzRFRkhvR2syVndIMnc5VXhVeE9GRFZWelo1VUJ6ZGFzSDJoc0srVzFVZHZGQXlXdzV2S3U5MQpNRU9RTjUxTXg0Y0NuUVJWbkY4RXBBc0t0dWVEcXFzcW5UWklzR2R5Z1FOaEdEWWNtSWsvNGxqRTAzOVNYN2Q3CjluYVNMS2lNQnlvN00zcWlTenZDNCtVaG9LZUlnRFljUm5IN01NMTdUWDA2ajYvTkZ2YkZwbHJ4Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBcm1hS3NHTkUxV1J1NFhVZExCVTNGb2dvaUJEOUR3UUkvSEtTbHV5SVVkbzk0WWlECisvZWhQNkNtamxsb1FlbjMrTUlwMTVaaWNWcm5pQXY1em1jTWN2NWlQUm5WOXNGOFhCU1M2akpTM2FxaDJuNkYKbjBCYW5HcFN3ckpKZC9XY09Fbm5xVnRyeVFHS0hMMmpQVUM5NXhlaFBiaE9zWTFmcGsxMjRvSktvaUxsRW9tSQpSMFJQYUdPZjI0elJncFU3YXVlQzV1NWxHKzE0N0MySU10cFB4ZldjR1VZSG1JZGFVOXF2Y1F6NS9ZbEdTQkJlCllhYWlSU1RiNm0yWTlJWFIyZDZoOTVKbmF0YldjdU84VTlNaG1xMXVoaG0rdWtBMnU0SnJkN05NTHdFQy9URysKaEN6SzNBdmkyV0ZwNWZxa1hSYWVKUUFVRm9TNTg4WFpUSFZDQlFJREFRQUJBb0lCQUYweno4cVVRcVVJRjNuRgo3T0V5TlU2RlZhUGg4RnVsL0dkdGs0bTB2dVYvakxLTEVuNEZ5TmNJTS9xL0xVaU1rVWtQK3V5TUlmQVNkTW53CkhJQ0I4ZFZDdloySndSOHVLQmZOcHZZaURzY0grWmViSmk0RlptZFJLd21CM21HTmtQOUQvOW95VVZnS3lwU3gKaEJ1b0RkVUNlOWxRTmo0Tkg2RHdwU2hJR1Rxc0tNeG5McFJnejFVcmEzTTI3NXhaR1IzeHFRSTUvdDkrVjJtMwpSY3BncjlHTjlsdmxHYWg3VjM4S0xja09jdGtZNGtuTnJ2NFRMK0NRV2ZxVlR4end6SnV4aWpvUUpNbmZaY1NRCkVBdGgxdkNVTGpqelcwQnVHSm91NGtvK2krd2F5SlppazBKVjRmQ3BnTkVnM293V1lqM0xoTDNRRVByRm9WaFAKKzN4UWY4RUNnWUVBd3ZiNCtLTVp2cU9lWVF5Z2g5NnpEVklDR2Z6Y2dLZ0hzVExNZlpsN2RReDB1NVFjRWpIaQp1NitEdXduWnJYSlJnWGdNSFdxOFFEaUlWeGN5UE9FbWpjNE9hOWJIWTF2NTFCWElPNzhNTW9pNy8vUEZXUmJ2CkJ1b2NLK1h0eUltSUFGcXkrK2hkczZLcFJRV3dqRjBXUnJ1WFBBaE9NRy9JdEhiUDlUU0xMV2NDZ1lFQTVQOS8KU2hibHArRjJFUmRwM3B2QUc2OVVkY0FRUDBDVDNhYkxPL0J6SVA2RTZaSHh5R1M4UGxGTFNIY2lCVVJnMmZYbwplUU9Ic1l2M2JYano4MFlDN0hvNTQ3WXRZTG1UelZ0dGN3eG15UXRRRFVxZzk2M0dMNlAzL2VVSVhSZXVwOEhmClovQVAyVStGa3NHTk1zLzE2NU5jKzF6N09ZZDEvMnYrU0pSQ2hiTUNnWUVBbUZmMzBBRlNlcDdMZ1RZbXNrU1kKVU01R0NoeUdDSnBEK0hpalJYbUsrTGxVOWNZVDhUdTkyYjZhQnFyNXh3aFBIQVc4SWJobmhSOG5talhyOFpwVgpUL1UxVzVxbWpzTXVBa1ZMQXpTVHlXRUMxa3ViWjdQOTdyODhhQ2p3bHdTd211WkVlWWJtd1JsWGZ6R2s5T2xBCklPRXdtWjQ3eXFsdWdXVmR0bExxZGYwQ2dZQVEvOHB5TklydDBIWFVSUG10OWIvb0RqYmwydEVaOFBCSkZ2V2wKU0c4emR0RmxDVmw2ZDhmbGxHRlRTc3l5dVdXN0tGOE9oSzJSQjRQaGs0WUFmQ1VCYXYxYkl3M3hUVFZrbkRIZQp4WC9uMHJncGxHMm9sVFRPR296eFF3eEE1TjRwUStJU3RDWnBDbGN2RXNFcGtYaCsrVnlBcUdGWjN1MXFmWmpOCnlIdTdzd0tCZ1FDbmdhd2xyWHArT1p5ZTdQSXN2Vm1nMWJFODFHc2M5RkNFWkhJc0VHVXdCcDg2OHJmbCswYXkKUkpiRXZZWWthYmtLdUlKUDZ6b2FIZm96THFwQitkUVg0eGRwdjhBN0IzdnlSYk9IV2xOTHdkTWRmL0lzMVcxZgpmUVhrWDlEelBmTUlVa2Nvem1Ralp5UkNIVkVhbmxxd2htc3RMbDgvMExwTUttSnU3eHVBNUE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= diff --git a/certs-dir/worker-0.pem b/certs-dir/worker-0.pem new file mode 100644 index 0000000..9aebdbe --- /dev/null +++ b/certs-dir/worker-0.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEFjCCAv6gAwIBAgIUVJUQAhRIZaDSOO+Rq0zcjUSFGvUwDQYJKoZIhvcNAQEL +BQAwaDELMAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8GA1UEBxMIUG9y +dGxhbmQxEzARBgNVBAoTCkt1YmVybmV0ZXMxCzAJBgNVBAsTAkNBMRMwEQYDVQQD +EwpLdWJlcm5ldGVzMB4XDTE4MDcwNDAxNTEwMFoXDTE5MDcwNDAxNTEwMFowgYUx +CzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZPcmVnb24xETAPBgNVBAcTCFBvcnRsYW5k +MRUwEwYDVQQKEwxzeXN0ZW06bm9kZXMxHDAaBgNVBAsTE0t1YmVybmV0ZXMgLSBD +ZW50T1MxHTAbBgNVBAMTFHN5c3RlbTpub2RlOndvcmtlci0wMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEArmaKsGNE1WRu4XUdLBU3FogoiBD9DwQI/HKS +luyIUdo94YiD+/ehP6CmjlloQen3+MIp15ZicVrniAv5zmcMcv5iPRnV9sF8XBSS +6jJS3aqh2n6Fn0BanGpSwrJJd/WcOEnnqVtryQGKHL2jPUC95xehPbhOsY1fpk12 +4oJKoiLlEomIR0RPaGOf24zRgpU7aueC5u5lG+147C2IMtpPxfWcGUYHmIdaU9qv +cQz5/YlGSBBeYaaiRSTb6m2Y9IXR2d6h95JnatbWcuO8U9Mhmq1uhhm+ukA2u4Jr +d7NMLwEC/TG+hCzK3Avi2WFp5fqkXRaeJQAUFoS588XZTHVCBQIDAQABo4GZMIGW +MA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw +DAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUeA8pHxA8XdRiueAx8DnVGOxgVNEwHwYD +VR0jBBgwFoAU36rN8i+ChqFR7K7irMdEqB2v+fwwFwYDVR0RBBAwDoIId29ya2Vy +LTCCAIIAMA0GCSqGSIb3DQEBCwUAA4IBAQB1mGPv2HLY9mfcwijhxStqfpPBUSqs +3KcI8FfhJLi5sYZPK2lKIS5ZDh3TRl+IMDNHM4Kvzw3jC6p9fPxyhegltCocMbAA +XdYPI6KZ6P+08BnWxyr0bpe834AcqNhnbHim0WtwQolORe106nZzVi/EDSNUWbnQ +0b3gOnUsg+4EFHoGk2VwH2w9UxUxOFDVVzZ5UBzdasH2hsK+W1UdvFAyWw5vKu91 +MEOQN51Mx4cCnQRVnF8EpAsKtueDqqsqnTZIsGdygQNhGDYcmIk/4ljE039SX7d7 +9naSLKiMByo7M3qiSzvC4+UhoKeIgDYcRnH7MM17TX06j6/NFvbFplrx +-----END CERTIFICATE----- diff --git a/certs-dir/worker-1-csr.json b/certs-dir/worker-1-csr.json new file mode 100644 index 0000000..85ca09d --- /dev/null +++ b/certs-dir/worker-1-csr.json @@ -0,0 +1,16 @@ +{ + "CN": "system:node:worker-1", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "US", + "L": "Portland", + "O": "system:nodes", + "OU": "Kubernetes - CentOS", + "ST": "Oregon" + } + ] +} diff --git a/certs-dir/worker-1-key.pem b/certs-dir/worker-1-key.pem new file mode 100644 index 0000000..174772e --- /dev/null +++ b/certs-dir/worker-1-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAvnKFvCMAOAKxMgHwHUrW4us7CIbUgCD0ghyo/cGTs80nXylw +geG1WhkVT+G0l1hy60fMIYH9W08otMy6S43LAXkADI76ndcsFSGqxcvCCTP5AMZo +RUv3OdAkEwkaOM+m2zNIsjThQFlYKIAGBMByPEQxuMimXyZ11/YNOVf1ATudybUd +wKAQw5hreHbo8CW7pPbONDbZrI7px0RWIHmVp7VnD6liGg4zJcgMvK/jQ+6noWDK +XSsKs/HT3h7pt2UIIyyTd6d8Em/NTQaAVsAjiwVX+mviZa75qmgHT8HNZF0A9EoS +bB2UvU+MrcpwGLj3pA6sXfWzLPLXl3I0+9pHZQIDAQABAoIBAHswaQFBQqr4UtYe +GstX1ZgCwLTLQjl/F6nyD/3UcU4krgsIKxZYgY4+G2MIhGBBflRipNAn3VJq6dHJ +e9fTaDUTp8x3z/S5cnJYzLYVDvYVAERYEqqAcPKciDkn8iY9a5jum19qhSmyoSvR +ZBM+yKXjFqaIY9W4cxG2vucQ1hJs9YlizXZV9bFsndCjRlkYGq7CFjT86bahrIXG +5pRZ1xFcBJ+r7S+N4I0dxYqaQT5/prWS/tVzDAuIWEBm0PheG3L0mn1IK/re9BQc +pF7bgjwKA10LKykwx+deMopuUi6lMY1H3UqtlM3djyp2JGBBp3vGD5euR+o67JDm +CFuXmiECgYEA++BOt7xgF3yhFjdSj5mM5uDwT5f9Nr77KTgCqO/cDQop48J6/Rvf +0gosy8wr083dX8haV6B2D/pYuhmJWOcTzoAzMFcx3J7NC2VYFpt4YBD69BXpZsM2 +q0Ndo8+zZwaQXxxRVPES4K3W2muaidM7Rx4/KSfsHMROB2r0K2ClHKcCgYEAwZC/ +TDYuzIuBaRQcC60VQeg61W5DNblIwSSEUcQO2cIxw2NN6YmKN8iqphm588ZCI9MS +mI/c7G76tt7rW5R3LK7PX0Aiqk2lRqUnF0q3Z1mx41e9hIu9bsUdEtEqS7eQoPWH +iC2hVxbZSbaiBGD96JQ428deqmgMA0OFX4qvgRMCgYEAo/gaRrJzO9b+ZyAMtz8W +4xZ8WtRvGzyo9ORpluDPOA3txz/MIQoMYwgMsX9w6KmT+87KQVFMiopHSJ9nMmD8 +G+qJaGbbJuyaP1jyR4Vej+t983hwQV3EMSnhw8pq2B8FzWT57Xoi9/EzdW2nZSIF +7P5bz8cVhoSk519RJQv9cPECgYAUuNJB7msEKssn3KdD+LuV2q6xuqQnVDadtrMT +rdGJLmj1aeQlZDwPzMO4FHj7x/W8sBQFkSEtjpP4j9TScQpxFiXNJKiRYs6WyYdt +zunjfo60r+fyEKOvABsksI++m2A4WxCvW3MyR5frdEnT/i4wf+22WZmg1JK0pr3Y +hJhVUQKBgFmf/hVk79gQ+p5azLqjZeK2Mj6ExE1kj6j6y6SJtwHj/DiiReLvEnqF +FvOsVjdS/dTGYLEfP2cD/RwedSsCbRW3VNHkXAzp69lACln+XtnDmB2sSRsfvC9O +qVfYEgXtEgAWJMIEJIWIIdOr5ZpcUP/1Hh3Phl1URzGvaCAsmI9n +-----END RSA PRIVATE KEY----- diff --git a/certs-dir/worker-1.csr b/certs-dir/worker-1.csr new file mode 100644 index 0000000..9670e3a --- /dev/null +++ b/certs-dir/worker-1.csr @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICyzCCAbMCAQAwgYUxCzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZPcmVnb24xETAP +BgNVBAcTCFBvcnRsYW5kMRUwEwYDVQQKEwxzeXN0ZW06bm9kZXMxHDAaBgNVBAsT +E0t1YmVybmV0ZXMgLSBDZW50T1MxHTAbBgNVBAMTFHN5c3RlbTpub2RlOndvcmtl +ci0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvnKFvCMAOAKxMgHw +HUrW4us7CIbUgCD0ghyo/cGTs80nXylwgeG1WhkVT+G0l1hy60fMIYH9W08otMy6 +S43LAXkADI76ndcsFSGqxcvCCTP5AMZoRUv3OdAkEwkaOM+m2zNIsjThQFlYKIAG +BMByPEQxuMimXyZ11/YNOVf1ATudybUdwKAQw5hreHbo8CW7pPbONDbZrI7px0RW +IHmVp7VnD6liGg4zJcgMvK/jQ+6noWDKXSsKs/HT3h7pt2UIIyyTd6d8Em/NTQaA +VsAjiwVX+mviZa75qmgHT8HNZF0A9EoSbB2UvU+MrcpwGLj3pA6sXfWzLPLXl3I0 ++9pHZQIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBADku4LtyAocUg9SvpZS1Vpmb +85ZPJmALX4lgP/IxPq4eryMKfPnwNrRE3cxc43SEukMfskAd0I8XCQJYqbEIb0hp +sDc7QTHtUZyihjyFCGhqUg157aYZSjSPsm7Hhdy+kIWU2BPgO/fFrpqUjh+ndJPA +HCG23Vn9ueTMh/wDzDCfBQlkMHGkNPKad/3JZQt1nulFM9ym5DwCbhlT8gi6LPZ+ +ikeW2k6zEEBsXVkFnMmIm2gXS7HQrDZo+igMu0L/YX75E+MCNcVZ+qlCx22B8796 +lcL/1PBmR2FDgtvNYNWWsm0bIGPmutz2Uc53c0pZiAPPtpgohNSTflqbMtfgmK8= +-----END CERTIFICATE REQUEST----- diff --git a/certs-dir/worker-1.kubeconfig b/certs-dir/worker-1.kubeconfig new file mode 100644 index 0000000..40e20ce --- /dev/null +++ b/certs-dir/worker-1.kubeconfig @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR4RENDQXF5Z0F3SUJBZ0lVS2k0Rm5vaytkOG1OLytUdHR0a1VMd2QwbW53d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2FERUxNQWtHQTFVRUJoTUNWVk14RHpBTkJnTlZCQWdUQms5eVpXZHZiakVSTUE4R0ExVUVCeE1JVUc5eQpkR3hoYm1ReEV6QVJCZ05WQkFvVENrdDFZbVZ5Ym1WMFpYTXhDekFKQmdOVkJBc1RBa05CTVJNd0VRWURWUVFECkV3cExkV0psY201bGRHVnpNQjRYRFRFNE1EY3dOREF4TlRFd01Gb1hEVEl6TURjd016QXhOVEV3TUZvd2FERUwKTUFrR0ExVUVCaE1DVlZNeER6QU5CZ05WQkFnVEJrOXlaV2R2YmpFUk1BOEdBMVVFQnhNSVVHOXlkR3hoYm1ReApFekFSQmdOVkJBb1RDa3QxWW1WeWJtVjBaWE14Q3pBSkJnTlZCQXNUQWtOQk1STXdFUVlEVlFRREV3cExkV0psCmNtNWxkR1Z6TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUE3WDlQR1R4MnQ0NWgKb2NIME9tT0hHS2plN1ZkUXBCL012d0RkYkFadnplbGlqK3RXZjROUlIyNmYvaCt2aUN6bllFWGJPN251bEgvYwovL1FLd3NZak9kbnMzR3JNREsxbUNzanFTS3VvQmZxRWJZVVBHeDc4a2s5UHUzZGV0VFVRclN3djBtd1BnaDdjCmkxMU11QkVVS3UzRVR0bnpOU1AvRTlkZ0w0SHBrelNJMFNQZFpRWVd0S091RFNEYzljQ01jRGRwYVVXU2pKNVkKZ0FwdlhIMmxnTHVpYldZK0VpWEl6WFI0cm1OSklZMHRKNnlnMU9wTkhZbU82SUpHM3FBRytOYXBOb21RdElHbQpnWGJtQy9CQzkva0VjdzdFVDZPQjl0T3ZTRjFRME5LZm9TS0ExUTZjanh3RUVEblh3bHh6ZUk4eUVMOWZWVjJVCjFaUTJnRzQwZFFJREFRQUJvMll3WkRBT0JnTlZIUThCQWY4RUJBTUNBUVl3RWdZRFZSMFRBUUgvQkFnd0JnRUIKL3dJQkFqQWRCZ05WSFE0RUZnUVUzNnJOOGkrQ2hxRlI3Szdpck1kRXFCMnYrZnd3SHdZRFZSMGpCQmd3Rm9BVQozNnJOOGkrQ2hxRlI3Szdpck1kRXFCMnYrZnd3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUNhVVhxeFRXUktjCkxIakNQYXVuMFR3cFhJTk8wMC94L3o5RnBqNUlocndzVktsbytxdW9iamlWeTlWTWY3UVJsYnpWaTBIRlpJU3MKcUJNQWNITnZCeE9tdUNBZTJna0hEV0V2WTNHL1R3aksvL1IraitkOUVOMTBXME5KZEZUQlRyL056T1FVZzVZZQpQbG9zelB4MEpwaTJvTXBUQ2sxQWFoV1A5eEJnQkZWQitWYW8zeTdtb0JHb05pWjM0OHR3NDhPWHA3cEc1U2FnCkE2L0E1WXpHSjRmWU1telU2NXF4ZGROYTMrYTJYbGVZeXk4ZzRvbStmRHVLVDU2SGlaaVBRS0s4T1FXNXNwNTEKOTh4a1pQUVdQbHlmVVI1aURVN3psNHF6VDlQSzU4bUZWRFRibmYzZys5aUtUeUF2NTB2eWZsa0c1MjJzeGRpNQpURkJQUEluWjFqST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + server: https://:6443 + name: kube-cluster +contexts: +- context: + cluster: kube-cluster + user: system:node:worker-1 + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: system:node:worker-1 + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVGakNDQXY2Z0F3SUJBZ0lVVFBsZHJtTEEvUWpMRngreEhiT09teXJSYXBVd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2FERUxNQWtHQTFVRUJoTUNWVk14RHpBTkJnTlZCQWdUQms5eVpXZHZiakVSTUE4R0ExVUVCeE1JVUc5eQpkR3hoYm1ReEV6QVJCZ05WQkFvVENrdDFZbVZ5Ym1WMFpYTXhDekFKQmdOVkJBc1RBa05CTVJNd0VRWURWUVFECkV3cExkV0psY201bGRHVnpNQjRYRFRFNE1EY3dOREF4TlRFd01Gb1hEVEU1TURjd05EQXhOVEV3TUZvd2dZVXgKQ3pBSkJnTlZCQVlUQWxWVE1ROHdEUVlEVlFRSUV3WlBjbVZuYjI0eEVUQVBCZ05WQkFjVENGQnZjblJzWVc1awpNUlV3RXdZRFZRUUtFd3h6ZVhOMFpXMDZibTlrWlhNeEhEQWFCZ05WQkFzVEUwdDFZbVZ5Ym1WMFpYTWdMU0JEClpXNTBUMU14SFRBYkJnTlZCQU1URkhONWMzUmxiVHB1YjJSbE9uZHZjbXRsY2kweE1JSUJJakFOQmdrcWhraUcKOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXZuS0Z2Q01BT0FLeE1nSHdIVXJXNHVzN0NJYlVnQ0QwZ2h5bwovY0dUczgwblh5bHdnZUcxV2hrVlQrRzBsMWh5NjBmTUlZSDlXMDhvdE15NlM0M0xBWGtBREk3Nm5kY3NGU0dxCnhjdkNDVFA1QU1ab1JVdjNPZEFrRXdrYU9NK20yek5Jc2pUaFFGbFlLSUFHQk1CeVBFUXh1TWltWHlaMTEvWU4KT1ZmMUFUdWR5YlVkd0tBUXc1aHJlSGJvOENXN3BQYk9ORGJackk3cHgwUldJSG1WcDdWbkQ2bGlHZzR6SmNnTQp2Sy9qUSs2bm9XREtYU3NLcy9IVDNoN3B0MlVJSXl5VGQ2ZDhFbS9OVFFhQVZzQWppd1ZYK212aVphNzVxbWdIClQ4SE5aRjBBOUVvU2JCMlV2VStNcmNwd0dMajNwQTZzWGZXekxQTFhsM0kwKzlwSFpRSURBUUFCbzRHWk1JR1cKTUE0R0ExVWREd0VCL3dRRUF3SUZvREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJdwpEQVlEVlIwVEFRSC9CQUl3QURBZEJnTlZIUTRFRmdRVUdEZDR5TGZEVlVlOS9DNmdrV0JtU2JNaWFySXdId1lEClZSMGpCQmd3Rm9BVTM2ck44aStDaHFGUjdLN2lyTWRFcUIyditmd3dGd1lEVlIwUkJCQXdEb0lJZDI5eWEyVnkKTFRHQ0FJSUFNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUJSUGp5Q0dOTzJIazRxanhOR3poL1dNTmp5eklrSAo3T2ZnV2pORDltN3haY0VIVWZROVV3L0I4TlMrZHIxWkltVjU2SEhZbzI4REs0V1djV0FsTTdydFFpNHppLzNXCkd5ZVNpWXJHSGhGdm9RcWFWd3l6eHVMZlozelgzeHBBaUxwVmE4SGZkR0hJSmNIU3pTa29LcFNBRlcrdHJiMU8KMFZBcmRPV1JNeW1XUDRmb2llcjhCWllGRXJnd2pFbVhybHVUNDdraFlDdi9abEN4Z1dBQkVaNlhRejA1U2lLRApFNjZRNDFneWsxUk1QWTYzL3hZbm12L1hJbUpsRWprSU15dzlBQmlIZ3IzTVp1eFVPMkdEQ1JaMEV0Q01hUGJEClF5TzIwTlJIRm9uK09vYjRTZXpDQk9RQ3QxWUJqWDJPRUpvbkI4R3lhN3NPdVRWZzk1ODZNTkxECi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdm5LRnZDTUFPQUt4TWdId0hVclc0dXM3Q0liVWdDRDBnaHlvL2NHVHM4MG5YeWx3CmdlRzFXaGtWVCtHMGwxaHk2MGZNSVlIOVcwOG90TXk2UzQzTEFYa0FESTc2bmRjc0ZTR3F4Y3ZDQ1RQNUFNWm8KUlV2M09kQWtFd2thT00rbTJ6TklzalRoUUZsWUtJQUdCTUJ5UEVReHVNaW1YeVoxMS9ZTk9WZjFBVHVkeWJVZAp3S0FRdzVocmVIYm84Q1c3cFBiT05EYlpySTdweDBSV0lIbVZwN1ZuRDZsaUdnNHpKY2dNdksvalErNm5vV0RLClhTc0tzL0hUM2g3cHQyVUlJeXlUZDZkOEVtL05UUWFBVnNBaml3VlgrbXZpWmE3NXFtZ0hUOEhOWkYwQTlFb1MKYkIyVXZVK01yY3B3R0xqM3BBNnNYZld6TFBMWGwzSTArOXBIWlFJREFRQUJBb0lCQUhzd2FRRkJRcXI0VXRZZQpHc3RYMVpnQ3dMVExRamwvRjZueUQvM1VjVTRrcmdzSUt4WllnWTQrRzJNSWhHQkJmbFJpcE5BbjNWSnE2ZEhKCmU5ZlRhRFVUcDh4M3ovUzVjbkpZekxZVkR2WVZBRVJZRXFxQWNQS2NpRGtuOGlZOWE1anVtMTlxaFNteW9TdlIKWkJNK3lLWGpGcWFJWTlXNGN4RzJ2dWNRMWhKczlZbGl6WFpWOWJGc25kQ2pSbGtZR3E3Q0ZqVDg2YmFocklYRwo1cFJaMXhGY0JKK3I3UytONEkwZHhZcWFRVDUvcHJXUy90VnpEQXVJV0VCbTBQaGVHM0wwbW4xSUsvcmU5QlFjCnBGN2JnandLQTEwTEt5a3d4K2RlTW9wdVVpNmxNWTFIM1VxdGxNM2RqeXAySkdCQnAzdkdENWV1UitvNjdKRG0KQ0Z1WG1pRUNnWUVBKytCT3Q3eGdGM3loRmpkU2o1bU01dUR3VDVmOU5yNzdLVGdDcU8vY0RRb3A0OEo2L1J2ZgowZ29zeTh3cjA4M2RYOGhhVjZCMkQvcFl1aG1KV09jVHpvQXpNRmN4M0o3TkMyVllGcHQ0WUJENjlCWHBac00yCnEwTmRvOCt6WndhUVh4eFJWUEVTNEszVzJtdWFpZE03Ung0L0tTZnNITVJPQjJyMEsyQ2xIS2NDZ1lFQXdaQy8KVERZdXpJdUJhUlFjQzYwVlFlZzYxVzVETmJsSXdTU0VVY1FPMmNJeHcyTk42WW1LTjhpcXBobTU4OFpDSTlNUwptSS9jN0c3NnR0N3JXNVIzTEs3UFgwQWlxazJsUnFVbkYwcTNaMW14NDFlOWhJdTlic1VkRXRFcVM3ZVFvUFdICmlDMmhWeGJaU2JhaUJHRDk2SlE0MjhkZXFtZ01BME9GWDRxdmdSTUNnWUVBby9nYVJySnpPOWIrWnlBTXR6OFcKNHhaOFd0UnZHenlvOU9ScGx1RFBPQTN0eHovTUlRb01Zd2dNc1g5dzZLbVQrODdLUVZGTWlvcEhTSjluTW1EOApHK3FKYUdiYkp1eWFQMWp5UjRWZWordDk4M2h3UVYzRU1Tbmh3OHBxMkI4RnpXVDU3WG9pOS9FemRXMm5aU0lGCjdQNWJ6OGNWaG9TazUxOVJKUXY5Y1BFQ2dZQVV1TkpCN21zRUtzc24zS2REK0x1VjJxNnh1cVFuVkRhZHRyTVQKcmRHSkxtajFhZVFsWkR3UHpNTzRGSGo3eC9XOHNCUUZrU0V0anBQNGo5VFNjUXB4RmlYTkpLaVJZczZXeVlkdAp6dW5qZm82MHIrZnlFS092QUJza3NJKyttMkE0V3hDdlczTXlSNWZyZEVuVC9pNHdmKzIyV1ptZzFKSzBwcjNZCmhKaFZVUUtCZ0ZtZi9oVms3OWdRK3A1YXpMcWpaZUsyTWo2RXhFMWtqNmo2eTZTSnR3SGovRGlpUmVMdkVucUYKRnZPc1ZqZFMvZFRHWUxFZlAyY0QvUndlZFNzQ2JSVzNWTkhrWEF6cDY5bEFDbG4rWHRuRG1CMnNTUnNmdkM5TwpxVmZZRWdYdEVnQVdKTUlFSklXSUlkT3I1WnBjVVAvMUhoM1BobDFVUnpHdmFDQXNtSTluCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/certs-dir/worker-1.pem b/certs-dir/worker-1.pem new file mode 100644 index 0000000..152fa79 --- /dev/null +++ b/certs-dir/worker-1.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEFjCCAv6gAwIBAgIUTPldrmLA/QjLFx+xHbOOmyrRapUwDQYJKoZIhvcNAQEL +BQAwaDELMAkGA1UEBhMCVVMxDzANBgNVBAgTBk9yZWdvbjERMA8GA1UEBxMIUG9y +dGxhbmQxEzARBgNVBAoTCkt1YmVybmV0ZXMxCzAJBgNVBAsTAkNBMRMwEQYDVQQD +EwpLdWJlcm5ldGVzMB4XDTE4MDcwNDAxNTEwMFoXDTE5MDcwNDAxNTEwMFowgYUx +CzAJBgNVBAYTAlVTMQ8wDQYDVQQIEwZPcmVnb24xETAPBgNVBAcTCFBvcnRsYW5k +MRUwEwYDVQQKEwxzeXN0ZW06bm9kZXMxHDAaBgNVBAsTE0t1YmVybmV0ZXMgLSBD +ZW50T1MxHTAbBgNVBAMTFHN5c3RlbTpub2RlOndvcmtlci0xMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvnKFvCMAOAKxMgHwHUrW4us7CIbUgCD0ghyo +/cGTs80nXylwgeG1WhkVT+G0l1hy60fMIYH9W08otMy6S43LAXkADI76ndcsFSGq +xcvCCTP5AMZoRUv3OdAkEwkaOM+m2zNIsjThQFlYKIAGBMByPEQxuMimXyZ11/YN +OVf1ATudybUdwKAQw5hreHbo8CW7pPbONDbZrI7px0RWIHmVp7VnD6liGg4zJcgM +vK/jQ+6noWDKXSsKs/HT3h7pt2UIIyyTd6d8Em/NTQaAVsAjiwVX+mviZa75qmgH +T8HNZF0A9EoSbB2UvU+MrcpwGLj3pA6sXfWzLPLXl3I0+9pHZQIDAQABo4GZMIGW +MA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw +DAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUGDd4yLfDVUe9/C6gkWBmSbMiarIwHwYD +VR0jBBgwFoAU36rN8i+ChqFR7K7irMdEqB2v+fwwFwYDVR0RBBAwDoIId29ya2Vy +LTGCAIIAMA0GCSqGSIb3DQEBCwUAA4IBAQBRPjyCGNO2Hk4qjxNGzh/WMNjyzIkH +7OfgWjND9m7xZcEHUfQ9Uw/B8NS+dr1ZImV56HHYo28DK4WWcWAlM7rtQi4zi/3W +GyeSiYrGHhFvoQqaVwyzxuLfZ3zX3xpAiLpVa8HfdGHIJcHSzSkoKpSAFW+trb1O +0VArdOWRMymWP4foier8BZYFErgwjEmXrluT47khYCv/ZlCxgWABEZ6XQz05SiKD +E66Q41gyk1RMPY63/xYnmv/XImJlEjkIMyw9ABiHgr3MZuxUO2GDCRZ0EtCMaPbD +QyO20NRHFon+Oob4SezCBOQCt1YBjX2OEJonB8Gya7sOuTVg9586MNLD +-----END CERTIFICATE----- diff --git a/cni/bridge b/cni/bridge new file mode 100755 index 0000000..2f6dc50 Binary files /dev/null and b/cni/bridge differ diff --git a/cni/cni-plugins-amd64-v0.6.0.tgz b/cni/cni-plugins-amd64-v0.6.0.tgz new file mode 100644 index 0000000..bb2b555 Binary files /dev/null and b/cni/cni-plugins-amd64-v0.6.0.tgz differ diff --git a/cni/dhcp b/cni/dhcp new file mode 100755 index 0000000..3397eb1 Binary files /dev/null and b/cni/dhcp differ diff --git a/cni/flannel b/cni/flannel new file mode 100755 index 0000000..1d5cab4 Binary files /dev/null and b/cni/flannel differ diff --git a/cni/host-local b/cni/host-local new file mode 100755 index 0000000..3a6a847 Binary files /dev/null and b/cni/host-local differ diff --git a/cni/ipvlan b/cni/ipvlan new file mode 100755 index 0000000..02b77c6 Binary files /dev/null and b/cni/ipvlan differ diff --git a/cni/loopback b/cni/loopback new file mode 100755 index 0000000..b1f69ef Binary files /dev/null and b/cni/loopback differ diff --git a/cni/macvlan b/cni/macvlan new file mode 100755 index 0000000..a8fc171 Binary files /dev/null and b/cni/macvlan differ diff --git a/cni/portmap b/cni/portmap new file mode 100755 index 0000000..d21fbe4 Binary files /dev/null and b/cni/portmap differ diff --git a/cni/ptp b/cni/ptp new file mode 100755 index 0000000..8505632 Binary files /dev/null and b/cni/ptp differ diff --git a/cni/sample b/cni/sample new file mode 100755 index 0000000..a657dc3 Binary files /dev/null and b/cni/sample differ diff --git a/cni/tuning b/cni/tuning new file mode 100755 index 0000000..ace28fe Binary files /dev/null and b/cni/tuning differ diff --git a/cni/vlan b/cni/vlan new file mode 100755 index 0000000..470e1f8 Binary files /dev/null and b/cni/vlan differ diff --git a/controller-0/apiserver b/controller-0/apiserver new file mode 100644 index 0000000..23b620c --- /dev/null +++ b/controller-0/apiserver @@ -0,0 +1,26 @@ +### +# kubernetes system config +# +# The following values are used to configure the kube-apiserver +# + +# The address on the local server to listen to. +#KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1" + +# The port on the local server to listen on. +#KUBE_API_PORT="--port=6443" + +# Port minions listen on +KUBELET_PORT="--kubelet-port=10250" + +# Comma separated list of nodes in the etcd cluster +KUBE_ETCD_SERVERS="--etcd-servers=https://10.240.0.10:2379,https://10.240.0.11:2379" + +# Address range to use for services +KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.32.0.0/16" + +# default admission control policies +KUBE_ADMISSION_CONTROL="--enable-admission-plugins=Initializers,NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota" + +# Add your own! +KUBE_API_ARGS="--advertise-address=10.240.0.10 --allow-privileged=true --apiserver-count=2 --audit-log-maxage=30 --audit-log-maxbackup=3 audit-log-maxsize=100 audit-log-path=/var/log/audit/audit.log --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --client-ca-file=/var/lib/kubernetes/ca.pem --enable-swagger-ui=true --etcd-cafile=/var/lib/kubernetes/ca.pem --etcd-certfile=/var/lib/kubernetes/kubernetes.pem --etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem --event-ttl=1h --experimental-encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml --kubelet-certificate-authority=/var/lib/kubernetes/ca.pem --kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem --kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem --kubelet-https=true --runtime-config=api/all --service-account-key-file=/var/lib/kubernetes/service-account.pem --service-node-port-range=30000-32767 --tls-cert-file=/var/lib/kubernetes/kubernetes.pem --tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem --v=2" diff --git a/controller-0/config b/controller-0/config new file mode 100644 index 0000000..e57d165 --- /dev/null +++ b/controller-0/config @@ -0,0 +1,13 @@ + +# This file controls the state of SELinux on the system. +# SELINUX= can take one of these three values: +# enforcing - SELinux security policy is enforced. +# permissive - SELinux prints warnings instead of enforcing. +# disabled - No SELinux policy is loaded. +#SELINUX=enforcing +SELINUX=permissive +# SELINUXTYPE= can take one of three two values: +# targeted - Targeted processes are protected, +# minimum - Modification of targeted policy. Only selected processes are protected. +# mls - Multi Level Security protection. +SELINUXTYPE=targeted diff --git a/controller-0/controller-manager b/controller-0/controller-manager new file mode 100644 index 0000000..545f850 --- /dev/null +++ b/controller-0/controller-manager @@ -0,0 +1,7 @@ +### +# The following values are used to configure the kubernetes controller-manager + +# defaults from config and apiserver should be adequate + +# Add your own! +KUBE_CONTROLLER_MANAGER_ARGS="--address=0.0.0.0 --cluster-cidr=10.200.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/var/lib/kubernetes/ca.pem --cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem --kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig --leader-elect=true --root-ca-file=/var/lib/kubernetes/ca.pem --service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem --service-cluster-ip-range=10.32.0.0/24 --use-service-account-credentials=true --v=2" diff --git a/controller-0/etcd.conf b/controller-0/etcd.conf new file mode 100644 index 0000000..7e9f1b9 --- /dev/null +++ b/controller-0/etcd.conf @@ -0,0 +1,69 @@ +#[Member] +#ETCD_CORS="" +ETCD_DATA_DIR="/var/lib/etcd/default.etcd" +#ETCD_WAL_DIR="" +ETCD_LISTEN_PEER_URLS="https://10.240.0.10:2380" +ETCD_LISTEN_CLIENT_URLS="https://10.240.0.10:2379,https://127.0.0.1:2379" +#ETCD_MAX_SNAPSHOTS="5" +#ETCD_MAX_WALS="5" +ETCD_NAME="controller-0" +#ETCD_SNAPSHOT_COUNT="100000" +#ETCD_HEARTBEAT_INTERVAL="100" +#ETCD_ELECTION_TIMEOUT="1000" +#ETCD_QUOTA_BACKEND_BYTES="0" +#ETCD_MAX_REQUEST_BYTES="1572864" +#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" +#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" +#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" +# +#[Clustering] +ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.240.0.10:2380" +ETCD_ADVERTISE_CLIENT_URLS="https://10.240.0.10:2379" +#ETCD_DISCOVERY="" +#ETCD_DISCOVERY_FALLBACK="proxy" +#ETCD_DISCOVERY_PROXY="" +#ETCD_DISCOVERY_SRV="" +ETCD_INITIAL_CLUSTER="controller-0=https://10.240.0.10:2380,controller-1=https://10.240.0.11:2380" +ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" +ETCD_INITIAL_CLUSTER_STATE="new" +#ETCD_STRICT_RECONFIG_CHECK="true" +#ETCD_ENABLE_V2="true" +# +#[Proxy] +#ETCD_PROXY="off" +#ETCD_PROXY_FAILURE_WAIT="5000" +#ETCD_PROXY_REFRESH_INTERVAL="30000" +#ETCD_PROXY_DIAL_TIMEOUT="1000" +#ETCD_PROXY_WRITE_TIMEOUT="5000" +#ETCD_PROXY_READ_TIMEOUT="0" +# +#[Security] +ETCD_CERT_FILE="/etc/etcd/kubernetes.pem" +ETCD_KEY_FILE="/etc/etcd/kubernetes-key.pem" +ETCD_CLIENT_CERT_AUTH="true" +ETCD_TRUSTED_CA_FILE="/etc/etcd/ca.pem" +#ETCD_AUTO_TLS="false" +ETCD_PEER_CERT_FILE="/etc/etcd/kubernetes.pem" +ETCD_PEER_KEY_FILE="/etc/etcd/kubernetes-key.pem" +ETCD_PEER_CLIENT_CERT_AUTH="true" +ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ca.pem" +#ETCD_PEER_AUTO_TLS="false" +# +#[Logging] +#ETCD_DEBUG="false" +#ETCD_LOG_PACKAGE_LEVELS="" +#ETCD_LOG_OUTPUT="default" +# +#[Unsafe] +#ETCD_FORCE_NEW_CLUSTER="false" +# +#[Version] +#ETCD_VERSION="false" +#ETCD_AUTO_COMPACTION_RETENTION="0" +# +#[Profiling] +#ETCD_ENABLE_PPROF="false" +#ETCD_METRICS="basic" +# +#[Auth] +#ETCD_AUTH_TOKEN="simple" diff --git a/controller-0/etcd.service b/controller-0/etcd.service new file mode 100644 index 0000000..2306643 --- /dev/null +++ b/controller-0/etcd.service @@ -0,0 +1,18 @@ +[Unit] +Description=Etcd Server +After=network.target +After=network-online.target +Wants=network-online.target + +[Service] +Type=notify +WorkingDirectory=/var/lib/etcd/ +EnvironmentFile=-/etc/etcd/etcd.conf +User=etcd +# set GOMAXPROCS to number of processors +ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd --name=${ETCD_NAME} --data-dir=${ETCD_DATA_DIR} --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS} --listen-peer-urls=${ETCD_LISTEN_PEER_URLS} --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} --initial-cluster=${ETCD_INITIAL_CLUSTER} --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} --initial-cluster-state=${ETCD_INITIAL_CLUSTER_STATE} --cert-file=${ETCD_CERT_FILE} --key-file=${ETCD_KEY_FILE} --client-cert-auth=${ETCD_CLIENT_CERT_AUTH} --trusted-ca-file=${ETCD_TRUSTED_CA_FILE} --peer-cert-file=${ETCD_PEER_CERT_FILE} --peer-key-file=${ETCD_PEER_KEY_FILE} --peer-client-cert-auth=${ETCD_PEER_CLIENT_CERT_AUTH} --peer-trusted-ca-file=${ETCD_PEER_TRUSTED_CA_FILE}" +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/controller-0/kube-apiserver.service b/controller-0/kube-apiserver.service new file mode 100644 index 0000000..5cebb13 --- /dev/null +++ b/controller-0/kube-apiserver.service @@ -0,0 +1,27 @@ +[Unit] +Description=Kubernetes API Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=network.target +After=etcd.service + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/apiserver +#User=kube +ExecStart=/usr/local/bin/kube-apiserver \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_ETCD_SERVERS \ + $KUBE_API_ADDRESS \ + $KUBE_API_PORT \ + $KUBELET_PORT \ + $KUBE_ALLOW_PRIV \ + $KUBE_SERVICE_ADDRESSES \ + $KUBE_ADMISSION_CONTROL \ + $KUBE_API_ARGS +Restart=on-failure +Type=notify +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/controller-0/kube-apiserver_rbac.yaml b/controller-0/kube-apiserver_rbac.yaml new file mode 100644 index 0000000..ac17cf6 --- /dev/null +++ b/controller-0/kube-apiserver_rbac.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: system:kube-apiserver + namespace: "" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kube-apiserver-to-kubelet +subjects: + - apiGroup: rbac.authorization.k8s.io + kind: User + name: kubernetes diff --git a/controller-0/kube-controller-manager.service b/controller-0/kube-controller-manager.service new file mode 100644 index 0000000..a8effcd --- /dev/null +++ b/controller-0/kube-controller-manager.service @@ -0,0 +1,18 @@ +[Unit] +Description=Kubernetes Controller Manager +Documentation=https://github.com/GoogleCloudPlatform/kubernetes + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/controller-manager +#User=kube +ExecStart=/usr/local/bin/kube-controller-manager \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_MASTER \ + $KUBE_CONTROLLER_MANAGER_ARGS +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/controller-0/kube-scheduler.service b/controller-0/kube-scheduler.service new file mode 100644 index 0000000..f85e0db --- /dev/null +++ b/controller-0/kube-scheduler.service @@ -0,0 +1,13 @@ +[Unit] +Description=Kubernetes Scheduler +Documentation=https://github.com/kubernetes/kubernetes + +[Service] +ExecStart=/usr/local/bin/kube-scheduler \ + --config=/etc/kubernetes/kube-scheduler.yaml \ + --v=2 +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target diff --git a/controller-0/kube-scheduler.yaml b/controller-0/kube-scheduler.yaml new file mode 100644 index 0000000..051cc66 --- /dev/null +++ b/controller-0/kube-scheduler.yaml @@ -0,0 +1,6 @@ +apiVersion: componentconfig/v1alpha1 +kind: KubeSchedulerConfiguration +clientConnection: + kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig" +leaderElection: + leaderElect: true diff --git a/controller-0/kubernetes.default.svc.cluster.local.conf b/controller-0/kubernetes.default.svc.cluster.local.conf new file mode 100644 index 0000000..8d8ffe8 --- /dev/null +++ b/controller-0/kubernetes.default.svc.cluster.local.conf @@ -0,0 +1,9 @@ +server { + listen 80; + server_name kubernetes.default.svc.cluster.local; + + location /healthz { + proxy_pass https://127.0.0.1:6443/healthz; + proxy_ssl_trusted_certificate /var/lib/kubernetes/ca.pem; + } +} diff --git a/controller-0/kubernetes.repo b/controller-0/kubernetes.repo new file mode 100644 index 0000000..8094327 --- /dev/null +++ b/controller-0/kubernetes.repo @@ -0,0 +1,8 @@ +[kubernetes] +name=Kubernetes +baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg + https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg diff --git a/controller-0/rbac_authorizations.yaml b/controller-0/rbac_authorizations.yaml new file mode 100644 index 0000000..92b3dbb --- /dev/null +++ b/controller-0/rbac_authorizations.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:kube-apiserver-to-kubelet +rules: + - apiGroups: + - "" + resources: + - nodes/proxy + - nodes/stats + - nodes/log + - nodes/spec + - nodes/metrics + verbs: + - "*" diff --git a/controller-0/scheduler b/controller-0/scheduler new file mode 100644 index 0000000..8a134a7 --- /dev/null +++ b/controller-0/scheduler @@ -0,0 +1,7 @@ +### +# kubernetes scheduler config + +# default config should be adequate + +# Add your own! +KUBE_SCHEDULER_ARGS="--config=/etc/kubernetes/kube-scheduler.yaml --v=2" diff --git a/controller-1/apiserver b/controller-1/apiserver new file mode 100644 index 0000000..49854fb --- /dev/null +++ b/controller-1/apiserver @@ -0,0 +1,26 @@ +### +# kubernetes system config +# +# The following values are used to configure the kube-apiserver +# + +# The address on the local server to listen to. +#KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1" + +# The port on the local server to listen on. +#KUBE_API_PORT="--port=6443" + +# Port minions listen on +KUBELET_PORT="--kubelet-port=10250" + +# Comma separated list of nodes in the etcd cluster +KUBE_ETCD_SERVERS="--etcd-servers=https://10.240.0.10:2379,https://10.240.0.11:2379" + +# Address range to use for services +KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.32.0.0/16" + +# default admission control policies +KUBE_ADMISSION_CONTROL="--enable-admission-plugins=Initializers,NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota" + +# Add your own! +KUBE_API_ARGS="--advertise-address=10.240.0.11 --allow-privileged=true --apiserver-count=2 --audit-log-maxage=30 --audit-log-maxbackup=3 audit-log-maxsize=100 audit-log-path=/var/log/audit/audit.log --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --client-ca-file=/var/lib/kubernetes/ca.pem --enable-swagger-ui=true --etcd-cafile=/var/lib/kubernetes/ca.pem --etcd-certfile=/var/lib/kubernetes/kubernetes.pem --etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem --event-ttl=1h --experimental-encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml --kubelet-certificate-authority=/var/lib/kubernetes/ca.pem --kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem --kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem --kubelet-https=true --runtime-config=api/all --service-account-key-file=/var/lib/kubernetes/service-account.pem --service-node-port-range=30000-32767 --tls-cert-file=/var/lib/kubernetes/kubernetes.pem --tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem --v=2" diff --git a/controller-1/config b/controller-1/config new file mode 100644 index 0000000..e57d165 --- /dev/null +++ b/controller-1/config @@ -0,0 +1,13 @@ + +# This file controls the state of SELinux on the system. +# SELINUX= can take one of these three values: +# enforcing - SELinux security policy is enforced. +# permissive - SELinux prints warnings instead of enforcing. +# disabled - No SELinux policy is loaded. +#SELINUX=enforcing +SELINUX=permissive +# SELINUXTYPE= can take one of three two values: +# targeted - Targeted processes are protected, +# minimum - Modification of targeted policy. Only selected processes are protected. +# mls - Multi Level Security protection. +SELINUXTYPE=targeted diff --git a/controller-1/controller-manager b/controller-1/controller-manager new file mode 100644 index 0000000..545f850 --- /dev/null +++ b/controller-1/controller-manager @@ -0,0 +1,7 @@ +### +# The following values are used to configure the kubernetes controller-manager + +# defaults from config and apiserver should be adequate + +# Add your own! +KUBE_CONTROLLER_MANAGER_ARGS="--address=0.0.0.0 --cluster-cidr=10.200.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/var/lib/kubernetes/ca.pem --cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem --kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig --leader-elect=true --root-ca-file=/var/lib/kubernetes/ca.pem --service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem --service-cluster-ip-range=10.32.0.0/24 --use-service-account-credentials=true --v=2" diff --git a/controller-1/etcd.conf b/controller-1/etcd.conf new file mode 100644 index 0000000..9b509e0 --- /dev/null +++ b/controller-1/etcd.conf @@ -0,0 +1,69 @@ +#[Member] +#ETCD_CORS="" +ETCD_DATA_DIR="/var/lib/etcd/default.etcd" +#ETCD_WAL_DIR="" +ETCD_LISTEN_PEER_URLS="https://10.240.0.11:2380" +ETCD_LISTEN_CLIENT_URLS="https://10.240.0.11:2379,https://127.0.0.1:2379" +#ETCD_MAX_SNAPSHOTS="5" +#ETCD_MAX_WALS="5" +ETCD_NAME="controller-1" +#ETCD_SNAPSHOT_COUNT="100000" +#ETCD_HEARTBEAT_INTERVAL="100" +#ETCD_ELECTION_TIMEOUT="1000" +#ETCD_QUOTA_BACKEND_BYTES="0" +#ETCD_MAX_REQUEST_BYTES="1572864" +#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" +#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" +#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" +# +#[Clustering] +ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.240.0.11:2380" +ETCD_ADVERTISE_CLIENT_URLS="https://10.240.0.11:2379" +#ETCD_DISCOVERY="" +#ETCD_DISCOVERY_FALLBACK="proxy" +#ETCD_DISCOVERY_PROXY="" +#ETCD_DISCOVERY_SRV="" +ETCD_INITIAL_CLUSTER="controller-0=https://10.240.0.10:2380,controller-1=https://10.240.0.11:2380" +ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" +ETCD_INITIAL_CLUSTER_STATE="new" +#ETCD_STRICT_RECONFIG_CHECK="true" +#ETCD_ENABLE_V2="true" +# +#[Proxy] +#ETCD_PROXY="off" +#ETCD_PROXY_FAILURE_WAIT="5000" +#ETCD_PROXY_REFRESH_INTERVAL="30000" +#ETCD_PROXY_DIAL_TIMEOUT="1000" +#ETCD_PROXY_WRITE_TIMEOUT="5000" +#ETCD_PROXY_READ_TIMEOUT="0" +# +#[Security] +ETCD_CERT_FILE="/etc/etcd/kubernetes.pem" +ETCD_KEY_FILE="/etc/etcd/kubernetes-key.pem" +ETCD_CLIENT_CERT_AUTH="true" +ETCD_TRUSTED_CA_FILE="/etc/etcd/ca.pem" +#ETCD_AUTO_TLS="false" +ETCD_PEER_CERT_FILE="/etc/etcd/kubernetes.pem" +ETCD_PEER_KEY_FILE="/etc/etcd/kubernetes-key.pem" +ETCD_PEER_CLIENT_CERT_AUTH="true" +ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ca.pem" +#ETCD_PEER_AUTO_TLS="false" +# +#[Logging] +#ETCD_DEBUG="false" +#ETCD_LOG_PACKAGE_LEVELS="" +#ETCD_LOG_OUTPUT="default" +# +#[Unsafe] +#ETCD_FORCE_NEW_CLUSTER="false" +# +#[Version] +#ETCD_VERSION="false" +#ETCD_AUTO_COMPACTION_RETENTION="0" +# +#[Profiling] +#ETCD_ENABLE_PPROF="false" +#ETCD_METRICS="basic" +# +#[Auth] +#ETCD_AUTH_TOKEN="simple" diff --git a/controller-1/etcd.service b/controller-1/etcd.service new file mode 100644 index 0000000..2306643 --- /dev/null +++ b/controller-1/etcd.service @@ -0,0 +1,18 @@ +[Unit] +Description=Etcd Server +After=network.target +After=network-online.target +Wants=network-online.target + +[Service] +Type=notify +WorkingDirectory=/var/lib/etcd/ +EnvironmentFile=-/etc/etcd/etcd.conf +User=etcd +# set GOMAXPROCS to number of processors +ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd --name=${ETCD_NAME} --data-dir=${ETCD_DATA_DIR} --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS} --listen-peer-urls=${ETCD_LISTEN_PEER_URLS} --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} --initial-cluster=${ETCD_INITIAL_CLUSTER} --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} --initial-cluster-state=${ETCD_INITIAL_CLUSTER_STATE} --cert-file=${ETCD_CERT_FILE} --key-file=${ETCD_KEY_FILE} --client-cert-auth=${ETCD_CLIENT_CERT_AUTH} --trusted-ca-file=${ETCD_TRUSTED_CA_FILE} --peer-cert-file=${ETCD_PEER_CERT_FILE} --peer-key-file=${ETCD_PEER_KEY_FILE} --peer-client-cert-auth=${ETCD_PEER_CLIENT_CERT_AUTH} --peer-trusted-ca-file=${ETCD_PEER_TRUSTED_CA_FILE}" +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/controller-1/kube-apiserver.service b/controller-1/kube-apiserver.service new file mode 100644 index 0000000..5cebb13 --- /dev/null +++ b/controller-1/kube-apiserver.service @@ -0,0 +1,27 @@ +[Unit] +Description=Kubernetes API Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=network.target +After=etcd.service + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/apiserver +#User=kube +ExecStart=/usr/local/bin/kube-apiserver \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_ETCD_SERVERS \ + $KUBE_API_ADDRESS \ + $KUBE_API_PORT \ + $KUBELET_PORT \ + $KUBE_ALLOW_PRIV \ + $KUBE_SERVICE_ADDRESSES \ + $KUBE_ADMISSION_CONTROL \ + $KUBE_API_ARGS +Restart=on-failure +Type=notify +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/controller-1/kube-apiserver_rbac.yaml b/controller-1/kube-apiserver_rbac.yaml new file mode 100644 index 0000000..ac17cf6 --- /dev/null +++ b/controller-1/kube-apiserver_rbac.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: system:kube-apiserver + namespace: "" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kube-apiserver-to-kubelet +subjects: + - apiGroup: rbac.authorization.k8s.io + kind: User + name: kubernetes diff --git a/controller-1/kube-controller-manager.service b/controller-1/kube-controller-manager.service new file mode 100644 index 0000000..a8effcd --- /dev/null +++ b/controller-1/kube-controller-manager.service @@ -0,0 +1,18 @@ +[Unit] +Description=Kubernetes Controller Manager +Documentation=https://github.com/GoogleCloudPlatform/kubernetes + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/controller-manager +#User=kube +ExecStart=/usr/local/bin/kube-controller-manager \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_MASTER \ + $KUBE_CONTROLLER_MANAGER_ARGS +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/controller-1/kube-scheduler.service b/controller-1/kube-scheduler.service new file mode 100644 index 0000000..f85e0db --- /dev/null +++ b/controller-1/kube-scheduler.service @@ -0,0 +1,13 @@ +[Unit] +Description=Kubernetes Scheduler +Documentation=https://github.com/kubernetes/kubernetes + +[Service] +ExecStart=/usr/local/bin/kube-scheduler \ + --config=/etc/kubernetes/kube-scheduler.yaml \ + --v=2 +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target diff --git a/controller-1/kube-scheduler.yaml b/controller-1/kube-scheduler.yaml new file mode 100644 index 0000000..051cc66 --- /dev/null +++ b/controller-1/kube-scheduler.yaml @@ -0,0 +1,6 @@ +apiVersion: componentconfig/v1alpha1 +kind: KubeSchedulerConfiguration +clientConnection: + kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig" +leaderElection: + leaderElect: true diff --git a/controller-1/kubernetes.default.svc.cluster.local.conf b/controller-1/kubernetes.default.svc.cluster.local.conf new file mode 100644 index 0000000..8d8ffe8 --- /dev/null +++ b/controller-1/kubernetes.default.svc.cluster.local.conf @@ -0,0 +1,9 @@ +server { + listen 80; + server_name kubernetes.default.svc.cluster.local; + + location /healthz { + proxy_pass https://127.0.0.1:6443/healthz; + proxy_ssl_trusted_certificate /var/lib/kubernetes/ca.pem; + } +} diff --git a/controller-1/kubernetes.repo b/controller-1/kubernetes.repo new file mode 100644 index 0000000..8094327 --- /dev/null +++ b/controller-1/kubernetes.repo @@ -0,0 +1,8 @@ +[kubernetes] +name=Kubernetes +baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg + https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg diff --git a/controller-1/rbac_authorizations.yaml b/controller-1/rbac_authorizations.yaml new file mode 100644 index 0000000..92b3dbb --- /dev/null +++ b/controller-1/rbac_authorizations.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:kube-apiserver-to-kubelet +rules: + - apiGroups: + - "" + resources: + - nodes/proxy + - nodes/stats + - nodes/log + - nodes/spec + - nodes/metrics + verbs: + - "*" diff --git a/controller-1/scheduler b/controller-1/scheduler new file mode 100644 index 0000000..8a134a7 --- /dev/null +++ b/controller-1/scheduler @@ -0,0 +1,7 @@ +### +# kubernetes scheduler config + +# default config should be adequate + +# Add your own! +KUBE_SCHEDULER_ARGS="--config=/etc/kubernetes/kube-scheduler.yaml --v=2" diff --git a/controller_manager_setup.sh b/controller_manager_setup.sh new file mode 100755 index 0000000..f1e08c8 --- /dev/null +++ b/controller_manager_setup.sh @@ -0,0 +1,19 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## + +sudo curl https://storage.googleapis.com/kubernetes-release/release/v1.10.3/bin/linux/amd64/kube-controller-manager -o /usr/local/bin/kube-controller-manager +sudo chmod 755 /usr/local/bin/kube-controller-manager + +sudo mkdir -p /var/lib/kubernetes + +sudo mv kube-controller-manager.kubeconfig /var/lib/kubernetes/ +sudo mv kube-controller-manager.service /etc/systemd/system/ +sudo mv controller-manager /etc/kubernetes/ + +sudo systemctl daemon-reload +sudo systemctl start kube-controller-manager +sudo systemctl enable kube-controller-manager + +echo "The next step is to create the Kubernetes Scheduler" diff --git a/controller_setup.sh b/controller_setup.sh new file mode 100755 index 0000000..daaa6d6 --- /dev/null +++ b/controller_setup.sh @@ -0,0 +1,56 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## +sudo mv kubernetes.repo /etc/yum.d.repos/ +sudo yum install -y docker etcd kubectl +sudo systemctl start docker && sudo systemctl enable docker + +sudo setenforce 0 +sudo mv config /etc/selinux/ +sudo mkdir -p /etc/kubernetes + +sudo cp ca.pem kubernetes-key.pem kubernetes.pem /etc/etcd/ +sudo mv etcd.conf /etc/etcd/ +sudo chown etcd:etcd /etc/etcd/*.pem +sudo rm -rf /var/lib/etcd/default.etcd +sudo systemctl start etcd +sudo systemctl enable etcd +sudo etcdctl --ca-file /etc/etcd/ca.pem --cert-file /etc/etcd/kubernetes.pem --key-file /etc/etcd/kubernetes-key.pem --endpoints https://127.0.0.1:2379 member list + +sudo curl https://storage.googleapis.com/kubernetes-release/release/v1.10.3/bin/linux/amd64/kube-apiserver -o /usr/local/bin/kube-apiserver +sudo curl https://storage.googleapis.com/kubernetes-release/release/v1.10.3/bin/linux/amd64/kube-controller-manager -o /usr/local/bin/kube-controller-manager +sudo curl https://storage.googleapis.com/kubernetes-release/release/v1.10.3/bin/linux/amd64/kube-scheduler -o /usr/local/bin/kube-scheduler +sudo chmod 755 /usr/local/bin/kub* + +sudo mkdir -p /var/lib/kubernetes +sudo mv ca.pem ca-key.pem kubernetes.pem kubernetes-key.pem service-account.pem service-account-key.pem encryption-config.yaml /var/lib/kubernetes/ + +sudo mv kube-apiserver.service /etc/systemd/system/ +sudo mv apiserver /etc/kubernetes/ + +sudo mv kube-controller-manager.kubeconfig /var/lib/kubernetes/ +sudo mv kube-controller-manager.service /etc/systemd/system/ +sudo mv controller-manager /etc/kubernetes/ + +sudo mv kube-scheduler.kubeconfig /var/lib/kubernetes/ +sudo mv kube-scheduler.service /etc/systemd/system/ +sudo mv scheduler /etc/kubernetes/ + +sudo systemctl daemon-reload +sudo systemctl start kube-apiserver kube-controller-manager kube-scheduler +sudo systemctl enable kube-apiserver kube-controller kube-scheduler + +sudo yum install -y nginx + +sudo mv kubernetes.default.svc.cluster.local.conf /etc/nginx/conf.d/kubernetes.default.svc.cluster.local.conf +sudo systemctl start nginx && sudo systemctl enable nginx + +kubectl get componentstatuses --kubeconfig admin.kubeconfig + +curl -H "Host: kubernetes.default.svc.cluster.local" -i http://127.0.0.1/healthz + +kubectl apply -f rbac_authorizations.yaml --kubeconfig admin.kubeconfig +kubectl apply -f kube-apiserver_rbac.yaml --kubeconfig admin.kubeconfig + +echo "The next step is to create the Load Balancer" diff --git a/controller_uninstall.sh b/controller_uninstall.sh new file mode 100755 index 0000000..feeebe9 --- /dev/null +++ b/controller_uninstall.sh @@ -0,0 +1,19 @@ +#!/bin/bash +## +## Script to uninstall Kubernetes from the controllers +## +sudo rm /etc/nginx/conf.d/kubernetes.default.svc.cluster.local.conf +sudo yum remove -y nginx + +sudo systemctl stop kube-scheduler kube-controller-manager kube-apiserver && sudo systemctl disable kube-scheduler kube-controller-manager kube-apiserver + +sudo rm -f /etc/kubernetes/scheduler /etc/systemd/system/kube-scheduler.service /var/lib/kubernetes/kube-scheduler.kubeconfig +sudo rm -f /etc/kubernetes/controller-manager /etc/systemd/system/kube-controller-manager.service /var/lib/kubernetes/kube-controller-manager.kubeconfig +sudo rm -f /etc/kubernetes/apiserver /etc/systemd/system/kube-apiserver.service +sudo rm -rf /var/lib/kubernetes +sudo rm -f /usr/local/bin/kub* + +sudo systemctl stop etcd && sudo systemctl disable etcd +sudo yum remove -y etcd kubectl +sudo rm -rf /etc/etcd /var/lib/etcd + diff --git a/controllers b/controllers new file mode 100644 index 0000000..ce01362 --- /dev/null +++ b/controllers @@ -0,0 +1 @@ +hello diff --git a/ctrl-mgr_vars b/ctrl-mgr_vars new file mode 100644 index 0000000..860ca83 --- /dev/null +++ b/ctrl-mgr_vars @@ -0,0 +1 @@ +KUBE_POD_CIDR=10.200.0.0/16 diff --git a/docs/0_Environment_Setup.md b/docs/0_Environment_Setup.md new file mode 100644 index 0000000..34b7f21 --- /dev/null +++ b/docs/0_Environment_Setup.md @@ -0,0 +1,49 @@ +### Setting up Kubernetes on CentOS on GCE ### + +Google Cloud provides US$300 of free credit for one year when first signing up for Google Cloud. This credit can be used to demonstrate Kubernetes running on CentOS and provide a great learning playground at no cost. I have written a series of scripts (heavily inspired/copied from [Kubernetes: The Hard Way by Kelsey Hightower](https://github.com/kelseyhightower/kubernetes-the-hard-way)) to automate this deployment using CentOS on GCE. + +This [first script](https://gogs.ervine.org/jonny/gce-centos-kubernetes/src/master/scripts/0_env_setup.sh) is used to collect details that will be used to set up the Kubernetes environment on the GCE Cloud. A series of questions will be asked and the responses saved to a config file for later use. + +**You must have logged into the Google Cloud via the SDK for this whole process to work correctly.** + +The information you're asked for is as follows: + + - Google Compute Region to use. This can be any Google Compute Region - obviously if it is geographically closer to you, the response should be faster (or of lower latency). Nothing that is performed in this set of scripts/exercises requires the region to be nearby though, and the US regions tend to be the lowest cost options on the Google Cloud. As such, the default region to use is US-West-1 (us-west-1) + + - Google Compute Zone to use. This will depend upon the region chosen previously. No error checking is performed, so you will need to be certain that you choose a valid zone for the previously selected region. If the zone does not exist in the region, then the later scripts will simply fail. Again, a default is chosen based on the lowest cost option, and is set to US West1 C (us-west1-c) + + - Google Compute Project to use. The scripts will not create a new project, so you should have a project already created on the Google Cloud Console. You will need to provide the unique name of the project here. + + - Kubernetes version to use (tested with 1.10.2, 1.10.3, 1.10.5, and 1.10.6 - 1.10.6 is the default. 1.11.x have not been tested and might work) + + - The scripts will create a new network (VPC) to operate on. This needs to be given a name. The default is set to kubernetes (avoid using underscores in Google Cloud object names). + + - Within this network a subnet for the Kubernetes nodes and masters must be created, and also given a name. The default is set to kubernetes-subnet. + + - A public IP address will be created for the Kubernetes cluster, and will also have a name. The default is set to kube-public-ip. + + - The Kubernetes subnet should have a CIDR network address associated to it. This is the address range that the masters and nodes will share and communicate over. By default (and in most Kubernetes examples) this is set to 10.240.0.0/24. You can change this if you want - however there should be no compelling reason to. + + - The script will then attempt to calculate the network part of the CIDR address entered previously. If using the default values, this should be 10.240.0 - if you are using your own network address values, you need to ensure that this is set correctly. + + - The CIDR network address used by the Kubernetes pods is set next. This is the network range used by the containers running in the pods. This should be a separate network address space to that used by the nodes and masters. By default, this is set to 10.200.0.0/16. The overall address space is set to use a Class B pool, with each Kubernetes node using a Class C sub-division of this space. e.g. 10.200.0.0/16 is the overall network, and node1 will use 10.200.1.0/24 for the pods hosted on node1. + + - Again, the script attempts to calculate the network part of the address specified, and should default to 10.200 - if using your own values, ensure this is set correctly. + + - The script will also attempt to calculate the address space prefix (netmask) to be used for the nodes. Given the defaults above, the prefix would be set to 24. + + - A firewall rule will be created to allow traffic to flow between nodes internally. This firewall rule requires a name. The default is set to kubernetes-allow-internal. + + - A second firewall rule to allow traffic from the external (Internet) network must be created to allow traffic such as SSH, Kubernetes, and HTTP. This rule name is set by default to kubernetes-allow-external + + - The script will then ask how many master nodes in the Kubernetes the user wants to create. The default is set to 2 (to demonstrate a multi-master configuration). The more nodes created, the more expensive the cluster will be. + + - Finally, the number of worker nodes is required. As per the master nodes, the default is set to 2, but this can be increased. More nodes will be more expensive. + +The environment script saves these variables in a file named .gce_kubernetes.config for use in subsequent scripts. + +The script is set up to use Google Compute Engine g1-small instances which have few resources. The resulting Kubernetes cluster will not be able to host many containers, but should be able to demonstrate small deployments. The instance size can be increased or the number of nodes can be increased (both at increased cost). It would be possible to prompt for this setting as well in a future version. + +Subsequent scripts are then used to create the Kubernetes controller and worker nodes useing the information previously supplied. Sticking to the default values should just work. The final script should perform a series of tests to validate that the kubernetes cluster is functional and accessible. + +There is a [final script](https://gogs.ervine.org/jonny/gce-centos-kubernetes/src/master/scripts/17_uninstall_everything.sh) present that should tear down the existing configuration and delete all GCE resources which should mean billing caused by the kubernetes environment is stopped. diff --git a/docs/10_Create_kube-proxy_Instances.md b/docs/10_Create_kube-proxy_Instances.md new file mode 100644 index 0000000..e5e6f58 --- /dev/null +++ b/docs/10_Create_kube-proxy_Instances.md @@ -0,0 +1,8 @@ +This script installs kube-proxy on the nodes designated as worker nodes. + +The script creates a kube-proxy installation script from a template - prefilling the clusterCIDR address from the environment variables prior to copying. + +The script then copies the kube-proxy setup script to each worker node, and then executes via sudo the script on each node. This is done serially - could potentially be with just one loop though. + +The kube-proxy systemd service script is created and filled with the correct configuration parameters. The configuration is filled and then systemctl reloaded. The kube-proxy binary is downloaded and the service is started and enabled. + diff --git a/docs/11_Create_client_kubectl_admin.md b/docs/11_Create_client_kubectl_admin.md new file mode 100644 index 0000000..6fde5db --- /dev/null +++ b/docs/11_Create_client_kubectl_admin.md @@ -0,0 +1,18 @@ +This script creates an admin.kubeconfig file to use with kubectl to interact and administer the kubernetes cluster. + +The directory is changed to the directory where the cryptographic material was created, and any existing admin.kubeconfig file is deleted. + +The script then populates the admin.kubeconfig file with information from the environment variables and from the GCE environment. + +A final test is then run to use the new admin.kubeconfig file to verify that it is working as expected. Output similar to the below should be observed: + +NAME STATUS MESSAGE ERROR +scheduler Healthy ok +controller-manager Healthy ok +etcd-0 Healthy {"health": "true"} +etcd-1 Healthy {"health": "true"} + +NAME STATUS ROLES AGE VERSION +worker-0 Ready 27m v1.10.3 +worker-1 Ready 26m v1.10.3 + diff --git a/docs/12_Create_Pod_Routing_Rules.md b/docs/12_Create_Pod_Routing_Rules.md new file mode 100644 index 0000000..224485d --- /dev/null +++ b/docs/12_Create_Pod_Routing_Rules.md @@ -0,0 +1,11 @@ +The next step is create the pod routing rules to allow pods to communicate with one another. + +These rules are set up on the Google Compute Engine - and use values from the environment variables specified at the start. + +This should be very straightforward, and a summary of the rules created should be printed when the script finishes, which should look similar to: + +NAME NETWORK DEST_RANGE NEXT_HOP PRIORITY +default-route-00c2371bebacb414 kubernetes 10.240.0.0/24 kubernetes 1000 +default-route-13b1fd44c0bcabff kubernetes 0.0.0.0/0 default-internet-gateway 1000 +kubernetes-pod-route-worker-0 kubernetes 10.200.0.0/24 10.240.0.20 1000 +kubernetes-pod-route-worker-1 kubernetes 10.200.1.0/24 10.240.0.21 1000 diff --git a/docs/13_Create_kube-dns_instances.md b/docs/13_Create_kube-dns_instances.md new file mode 100644 index 0000000..6a40493 --- /dev/null +++ b/docs/13_Create_kube-dns_instances.md @@ -0,0 +1,9 @@ +The next step downloads and runs the kube-dns pods to ensure that name resolution services can be provided to pods. The yaml description for kube-dns is taken directly from Kelsey Hightower's examples, however kube-dns is a fairly common deployment, so there should be plenty of alternative exampels if required. The example from Kubernetes the Hard Way is used here because I have earlier set the clusterDNS IP address to a value that is also set in the kube-dns yaml file from the Kelsey Hightower source. + +Once the kube-dns pods have been deployed a test busybox pod is deployed and DNS services are tested by performing a lookup against the kubernetes service, which should return a name/IP adress pair. + +Server: 10.32.0.10 +Address 1: 10.32.0.10 kube-dns.kube-system.svc.cluster.local + +Name: kubernetes +Address 1: 10.32.0.1 kubernetes.default.svc.cluster.local diff --git a/docs/14_Additional_Functional_Tests.md b/docs/14_Additional_Functional_Tests.md new file mode 100644 index 0000000..75256ff --- /dev/null +++ b/docs/14_Additional_Functional_Tests.md @@ -0,0 +1,23 @@ +This is the final set of functional tests. + +Test One: +Confirming the encryption of data at rest using the encryption key that was created earlier in the exercises. +A kubernetes secret is created from the command line, and then the contents of the stored secret (in etcd) are checked. If all is working correctly, the secret contents should be encrypted, and this is confirmed by finding the encryption header in etcd output: + +Prefix should be: k8s:enc:aescbc:v1:key1 + + +Test Two: +NGINX Deployment +A single pod nginx deployment is created and is then made available to the client workstation be using the kubectl port-forward function. This exposes the nginx port 80 to the local client on port 8080 and can be tested via a web browser or a utulity like curl or wget. + +Test Three: +The logs from the nginx pod are diplayed on the screen using the kubectl command. + +Test Four: +A command is executed inside the running pod - the version of nginx is displayed through the nginx -v command. + +Test Five: +The nginx instance is exposed via a NodePort service and a firewall rule created to allow the outside world to communicate with the exposed service. Note to self: Shouldn't this be exposed through the GCE load balancer rather than a NodePort? + +Future: Add in the untrusted workload via gVisor ... needs runsc installed on the worker nodes. diff --git a/docs/15_Delete_kube-dns_Deployment.md b/docs/15_Delete_kube-dns_Deployment.md new file mode 100644 index 0000000..35f0005 --- /dev/null +++ b/docs/15_Delete_kube-dns_Deployment.md @@ -0,0 +1 @@ +This script was written as a clean up for the kube-dns pod/deployment only. This allows you to roll back the kube-dns deployment. diff --git a/docs/16_Uninstall_etcd.md b/docs/16_Uninstall_etcd.md new file mode 100644 index 0000000..99361cd --- /dev/null +++ b/docs/16_Uninstall_etcd.md @@ -0,0 +1,3 @@ +This script should clean up and remove the etcd instances from the controller nodes. + +Useful for cleaning up servers without actually deleting them. diff --git a/docs/17_Uninstall_Everything.md b/docs/17_Uninstall_Everything.md new file mode 100644 index 0000000..487c4e8 --- /dev/null +++ b/docs/17_Uninstall_Everything.md @@ -0,0 +1 @@ +This is the complete clean up script and should delete all the cloud resources that have been previously set up. This should mean no further billing is performed. The script will also clean up local resources that were created, so that if the scripts are re-run, new resources are created for the new environment being used. diff --git a/docs/1_GCloud_Resource_Setup.md b/docs/1_GCloud_Resource_Setup.md new file mode 100644 index 0000000..976db6b --- /dev/null +++ b/docs/1_GCloud_Resource_Setup.md @@ -0,0 +1,13 @@ +This script will create the Google Cloud resources used by our Kubernetes project. + +First off, the variables created by answering the environment set up script are read. + +The network, subnet, and firewall rules are created. The script should print out the firewall rules when these commands complete - however it can take a bit of time after creating a firewall rule for it to show in the firewall rule list. + +A public IP address is then created. + +The Kubernetes Master node instances are then created. These instances use a 200GB boot disk (this could probably be reduced) and will be assigned to the Kubernetes subnet CIDR created in the environment set up script (default was 10.240.0.0/24). The script will create the controller instances with IP addresses of 10.240.0.1X where X is the instance number. If you chose to have 9 controllers, the you woulc have IP addresses: 10.240.0.10 through to 10.240.0.18. The script has been tested with up to 9 controllers. 11 and more would then take IP addresses to 10.240.0.110 and beyond, which should work, but hasn't been tested. + +Similarly, the worker nodes are created next. They have a similar set up as the master nodes and are added to the same network. By default, they will use IP addresses of type 10.240.0.2X where X is the instance number. The same limits as that for the master nodes applies. They also have the pod CIDR network address for that node added as metadata to each instance. + +Once the instances are created, they are then updated via a looped yum update command. The more instances being created, the longer this will take. diff --git a/docs/2_Certificate_and_Authentication_Creation.md b/docs/2_Certificate_and_Authentication_Creation.md new file mode 100644 index 0000000..2975dcc --- /dev/null +++ b/docs/2_Certificate_and_Authentication_Creation.md @@ -0,0 +1,36 @@ +This script will automate the creation of the PKI material, kubeconfigs, and other authentication files for the Kubernetes cluster. + +We start by downloading the CloudFlare SSL utilities. The PKI cryptographic material can be created using many different utilities (openssl, easy-rsa etc.), however the cfssl tools were used by Kelsey Hightower, and have been used in many other tutorials as well. + +This script will create a directory called certs-dir into which all the other output files will be saved. + +The first file created is the self-signed Certificate Authority. If you have your own CA you want to use, you'll need to perform this step manually, making sure you create all the necessary files. This CA will be used to sign all subsequent PKI files listed below. [Files created: ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem] + +Key material is then created for the 'admin' user of our Kubernetes cluster.[Files created: admin.csr admin-csr.json admin-key.pem admin.pem] + +Client certificates are created for each worker node [Files created: worker-X.csr worker-X-csr.json worker-X-key.pem worker-X.pem] (where X is the instance number of the worker node) NOTE: THIS IS USING A HARDCODED VALUE OF 2 FOR THE WORKER NODES. THIS NEEDS TO BE UPDATED TO CALCULATE THE NUMBER OF WORKER NODES. + +The Kubernetes controller manager certificate and key are created next. [Files created: kube-controller-manager.csr kube-controller-manager-csr.json kube-controller-manager-key.pem kube-controller-manager.pem] + +The Kubernetes Proxy certificate and key are created next. [Files created: kube-proxy.xsr kube-proxy-csr.json kube-proxy-key.pem kube-proxy.pem] + +The Kubernetes Scheduler certificate and key are then created. [Files created: kube-scheduler.csr kube-scheduler-csr.json kube-scheduler-key.pem kube-scheduler.pem] + +The Kubernets API Server certificate and key are created next. [Files created: kubernetes.csr kubernetes-csr.json kubernetes-key.pem kubernetes.pem] NOTE: THIS IS USING HARDCODED IP ADDRESSES AND IS ONLY SET FOR 2 MASTER NODES. THIS NEEDS TO BE UPDATED TO CALCULATE THE MASTER NODE ADDRESSES CORRECTLY + +The service account certificate and key are then created. [Files created: service-account.xsr service-account-csr.json service-account-key.pem service-account.pem] + +The client kubeconfig authentication files are created for each worker node. [Files created: worker-X.kubeconfig] (where X is the instance number of the worker node) NOTE: +THIS IS USING A HARDCODED VALUE OF 2 FOR THE WORKER NODES. THIS NEEDS TO BE UPDATED TO CALCULATE THE NUMBER OF WORKER NODES. + +The Kubernetes Proxy kubeconfig file is created next. [Files created: kube-proxy.kubeconfig] + +The Kubernetes Controller Manager kubeconfig is created next. [Files created: kube-controller-manager.kubeconfig] + +The Kubernetes Scheduler kubeconfig is created next. [Files created: kube-scheduler.kubeconfig] + +The admin user kubeconfig file is then created. [Files created: admin.kubeconfig] + +The relevant key material files are then copied to the worker nodes and master nodes as required. + +Finally, a data encryption key is created and copied to the master nodes. diff --git a/docs/3_Install_and_Configure_etcd.md b/docs/3_Install_and_Configure_etcd.md new file mode 100644 index 0000000..c7b0b90 --- /dev/null +++ b/docs/3_Install_and_Configure_etcd.md @@ -0,0 +1,16 @@ +This script ets up the etcd daemon on the master nodes. It starts by copying across a script etcd_setup.sh to each master node and then executing it on the master node. This script performs the following functions. A .variables file is also copied across to each controller, which contains instance specific variables created from the environment details created during the first step. These variables are the number of master nodes in the cluster, and the kubernetes cluster subnet address. + +NOTE: The following steps run on each controller node serially (i.e. one after another) +The Kubernetes yum repository is set up, and docker, etcd, and kubectl are installed. The docker daemon is started and enabled at this point. + +SELinux is set to permissive mode - this is bad practice ... and at a later date I'll look to create policies that allow SELinux to remain enabled. + +The key material for etcd is then copied to the /etc/etcd/ directory. + +The /etc/etcd/etcd.conf file is then updated to include the correct details for the Kubernetes cluster. Some of this is simply filled in with local environment parameters (e.g. hostname or local IP address). The ETCD_INITIAL_CLUSTER parameter must include all the IP addresses of the members of the etcd cluster. This is why we must pass the number of master nodes in the cluster as a variable to each master node. + +The etcd package from the EPEL repository doesn't automatically enable most of the settings we require. Having updated the /etc/etcd/etcd.conf file, the /etc/systemd/system/etcd.service file is created with the correct startup parameters enabled that we need. + +The /var/lib/etcd/default.etcd directory (if it exists) is removed so that when we start etcd it is using the brand new configuration we've created. + +The etcd daemon is then started and enabled - after performing a systemctl daemon-reload. A final check is performed to ensure that the etcd member nodes are working. This will only be correct on the final master node as this installation runs serially on the master nodes. diff --git a/docs/4_Install_and_Configure_apiserver.md b/docs/4_Install_and_Configure_apiserver.md new file mode 100644 index 0000000..e2d65c1 --- /dev/null +++ b/docs/4_Install_and_Configure_apiserver.md @@ -0,0 +1,15 @@ +This script sets up the API server on the master nodes. To prepare for this, we pass the Kubernetes subnet and number of master nodes into a variable file and copy this to each master node. A separate kube-apiserver_setup.sh script is copied across to each controller and executed on each controller in a serial order. + +The kube-apiserver_setup.sh script performs the following actions: + +A string variable containing the addresses of the etcd servers is generated from the variabels passed earlier. This will be used in a later configuration file. + +The kube-apiserver binary is downloaded from the upstream project and copied to /usr/local/bin - currently this uses 1.10.3. It should be trivial to update this to use 1.10.4 and 1.10.5. Updating to 1.11.0 will require further work. + +The required cryptographic material is copied to a newly created directory /var/lib/kubernetes + +A new /etc/systemd/system/kube-apiserver.service file is created and will read it's configuration from files created later. + +The /etc/kubernetes/apiserver configuration file is then populated with the necessary data - this will use the etcd server address string generated earlier. + +The kube-apiserver is then started and enabled, after systemctl daemon-reload has been run (to load in the new kube-apiserver.service file) diff --git a/docs/5_Install_and_Configure_Controller_Manager.md b/docs/5_Install_and_Configure_Controller_Manager.md new file mode 100644 index 0000000..84ce569 --- /dev/null +++ b/docs/5_Install_and_Configure_Controller_Manager.md @@ -0,0 +1,13 @@ +The Kubernetes Controller Manager service is installed and configured by this script. The CIDR used by the pods needs to be passed to each master. As such a variable file and controller_manager_setup.sh file are copied to each master node. + +The controller_manager_setup.sh script will perform the following actions on each master node in a serial order: + +Download the kube-controller-manager binary from the upstream Kubernetes release and copy to /usr/local/bin + +The kube-controller-manager.kubeconfig file is copied to the /var/lib/kubernetes directory to allow the controller-manager to authenticate to Kubernetes. + +A new /etc/systemd/system/kube-controller-manager.service file is created to enable the Controller Manager service. + +The /etc/kubernetes/controller-manager configuration file is filled with the necessary configuration data (this includes the pod CIDR taken from the environment variables passed to each controller). + +The kube-controller-manager daemon is then started and enabled, after systemctl daemon-reload has been run. diff --git a/docs/6_Install_and_Configure_Scheduler.md b/docs/6_Install_and_Configure_Scheduler.md new file mode 100644 index 0000000..54348c4 --- /dev/null +++ b/docs/6_Install_and_Configure_Scheduler.md @@ -0,0 +1,13 @@ +The Kubernetes scheduler setup script file copies across a setup script to each master node, and then runs this script in a serial order. The scheduler_setup.sh script file perofrms the following actions: + +The Kubernetes scheduler binary is downloaded from the upstream project and copied to /usr/local/bin + +The /etc/kubernetes/kube-scheduler.yaml file is created with the correct data, which will be used in the start up parameters for the scheduler daemon. + +The scheduler.kubeconfig file is moved to the /var/lib/kubernetes directory. + +A new /etc/systemd/system/kube-scheduler.service file is created with the expected startup parameters. These parameters include the kube-scheduler.yaml file listed above. + +The /etc/kubernetes/scheduler file is created and populated next. + +Finally, the kube-scheduler daemon is started and enabled, after systemctl daemon-reload is executed. diff --git a/docs/7_Create_nginx_Health_Monitor.md b/docs/7_Create_nginx_Health_Monitor.md new file mode 100644 index 0000000..ef827f8 --- /dev/null +++ b/docs/7_Create_nginx_Health_Monitor.md @@ -0,0 +1,11 @@ +This script will create the nginx based health monitor that will expose the Kubernetes API server HTTPS endpoint via HTTP which can then be used by the Google Cloud Load Balancer (which only supports HTTP based health checks and not HTTPS based health checks) + +Two RABC yaml files are copied to each master node, as well as the nginx reverse proxy configuration file, and a script to set up the nginx daemon on each master node. The setup script, named nginx_health_monitor_setup.sh performs the following actions: + +The nginx package installed, and the health monitor configuration file copied to the correct location. + +The nginx daemon is then enabled and started. + +The Kubernetes component statuses are checked, and the nginx instance is tested to make sure it is working as well. + +The RBAC yaml files are then applied to the Kubernetes cluster. diff --git a/docs/8_Create_Load_Balancer.md b/docs/8_Create_Load_Balancer.md new file mode 100644 index 0000000..16d55e0 --- /dev/null +++ b/docs/8_Create_Load_Balancer.md @@ -0,0 +1,13 @@ +This script sets up the load balancer on the Google Cloud. + +The script reads environment variables so that it knows how many instances are to be serving the load balancer. + +The script first creates the HTTP based health check that checks the nginx endpoint created previously. + +A firewall rule is created to allow the HTTP health check to work. + +A target pool is created and then populated with the master node instances. + +A forwarding rule is then created so that traffic to the public address is passed to the target pool. + +A connection check is made from the local machine (i.e. outside of the Google Cloud) to the Kubernetes public address. diff --git a/docs/9a_Create_Kubernetes_Workers_with_Docker.md b/docs/9a_Create_Kubernetes_Workers_with_Docker.md new file mode 100644 index 0000000..3088756 --- /dev/null +++ b/docs/9a_Create_Kubernetes_Workers_with_Docker.md @@ -0,0 +1,18 @@ +There are two versions of this script - one to use docker, which is 9a, and the other to use containerd, which is 9b. + +The 9a (docker) variant does not setup the untrusted runtime environment using runsc and gVisor. However, docker is easier and more straightforward to deploy on CentOS via yum. + +This script installs Kubernetes on the nodes designated as worker nodes. + +The script reads the KUBE_POD_ADDR (this is pod specific) and KUBE_NODE_POD_PREFIX environment variables and copies these to each workder node. + +The script then copies the kubelet setup script to each worker node, and then executes via sudo the script on each node. + +The node setup script installs and enables docker on each node, and then installs kubelet from the Kubernetes repository. Currently this is installing 1.10.3 - action item for the future is to use an updated version, or make the version configurable via an environment variable. + +The container networking configuraation is provided in /etc/cni - this uses the variables previously copied across. + +The kubelet is downloaded and the correct configuration applied from the environment variables. + +The kubelet service is then enabled and started. + diff --git a/docs/9b_Create_Kubernetes_Workers_with_containerd.md b/docs/9b_Create_Kubernetes_Workers_with_containerd.md new file mode 100644 index 0000000..0a065be --- /dev/null +++ b/docs/9b_Create_Kubernetes_Workers_with_containerd.md @@ -0,0 +1,14 @@ +This script installs Kubernetes on the nodes designated as worker nodes. + +The script reads the KUBE_POD_ADDR (this is pod specific) and KUBE_NODE_POD_PREFIX environment variables and copies these to each workder node. + +The script then copies the kubelet setup script to each worker node, and then executes via sudo the script on each node. + +The node setup script installs and enables docker on each node, and then installs kubelet from the Kubernetes repository. Currently this is installing 1.10.3 - action item for the future is to use an updated version, or make the version configurable via an environment variable. + +The container networking configuraation is provided in /etc/cni - this uses the variables previously copied across. + +The kubelet is downloaded and the correct configuration applied from the environment variables. + +The kubelet service is then enabled and started. + diff --git a/etcd-listen b/etcd-listen new file mode 100644 index 0000000..e6bd56f --- /dev/null +++ b/etcd-listen @@ -0,0 +1 @@ +https://10.240.0.10:2380,https://10.240.0.11:2380,https://10.240.0.12:2380 \ No newline at end of file diff --git a/etcd_setup.sh b/etcd_setup.sh new file mode 100755 index 0000000..87e95c9 --- /dev/null +++ b/etcd_setup.sh @@ -0,0 +1,21 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## +sudo mv kubernetes.repo /etc/yum.d.repos/ +sudo yum install -y docker etcd kubectl +sudo systemctl start docker && sudo systemctl enable docker + +sudo setenforce 0 +sudo mv config /etc/selinux/ +sudo mkdir -p /etc/kubernetes + +sudo cp ca.pem kubernetes-key.pem kubernetes.pem /etc/etcd/ +sudo mv etcd.conf /etc/etcd/ +sudo chown etcd:etcd /etc/etcd/*.pem +sudo rm -rf /var/lib/etcd/default.etcd +sudo systemctl start etcd +sudo systemctl enable etcd +sudo etcdctl --ca-file /etc/etcd/ca.pem --cert-file /etc/etcd/kubernetes.pem --key-file /etc/etcd/kubernetes-key.pem --endpoints https://127.0.0.1:2379 member list + +echo "The next step is to create the Kubernetes services" diff --git a/etcd_vars b/etcd_vars new file mode 100644 index 0000000..9a792d6 --- /dev/null +++ b/etcd_vars @@ -0,0 +1,2 @@ +KUBE_SUBNET_ADDR=10.240.0 +KUBE_CONTROLLERS=2 diff --git a/instance b/instance new file mode 100644 index 0000000..a5f85e0 --- /dev/null +++ b/instance @@ -0,0 +1 @@ +controller-0,controller-1 \ No newline at end of file diff --git a/kube-apiserver_setup.sh.orig b/kube-apiserver_setup.sh.orig new file mode 100755 index 0000000..b151dba --- /dev/null +++ b/kube-apiserver_setup.sh.orig @@ -0,0 +1,19 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## + +sudo curl https://storage.googleapis.com/kubernetes-release/release/v1.10.3/bin/linux/amd64/kube-apiserver -o /usr/local/bin/kube-apiserver +sudo chmod 755 /usr/local/bin/kube-apiserver + +sudo mkdir -p /var/lib/kubernetes +sudo mv ca.pem ca-key.pem kubernetes.pem kubernetes-key.pem service-account.pem service-account-key.pem encryption-config.yaml /var/lib/kubernetes/ + +sudo mv kube-apiserver.service /etc/systemd/system/ +sudo mv apiserver /etc/kubernetes/ + +sudo systemctl daemon-reload +sudo systemctl start kube-apiserver +sudo systemctl enable kube-apiserver + +echo "The next step is to create the Kubernetes Controller Manager" diff --git a/kube-dns.yaml b/kube-dns.yaml new file mode 100644 index 0000000..477ba72 --- /dev/null +++ b/kube-dns.yaml @@ -0,0 +1,206 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: 10.32.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-dns + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-dns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + # replicas: not specified here: + # 1. In order to make Addon Manager do not reconcile this replicas parameter. + # 2. Default is 1. + # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 0 + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + volumes: + - name: kube-dns-config + configMap: + name: kube-dns + optional: true + containers: + - name: kubedns + image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7 + resources: + # TODO: Set memory limits when we've profiled the container for large + # clusters, then set request = limit to keep this container in + # guaranteed class. Currently, this container falls into the + # "burstable" category so the kubelet doesn't backoff from restarting it. + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + livenessProbe: + httpGet: + path: /healthcheck/kubedns + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /readiness + port: 8081 + scheme: HTTP + # we poll on pod startup for the Kubernetes master service and + # only setup the /readiness HTTP server once that's available. + initialDelaySeconds: 3 + timeoutSeconds: 5 + args: + - --domain=cluster.local. + - --dns-port=10053 + - --config-dir=/kube-dns-config + - --v=2 + env: + - name: PROMETHEUS_PORT + value: "10055" + ports: + - containerPort: 10053 + name: dns-local + protocol: UDP + - containerPort: 10053 + name: dns-tcp-local + protocol: TCP + - containerPort: 10055 + name: metrics + protocol: TCP + volumeMounts: + - name: kube-dns-config + mountPath: /kube-dns-config + - name: dnsmasq + image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7 + livenessProbe: + httpGet: + path: /healthcheck/dnsmasq + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - -v=2 + - -logtostderr + - -configDir=/etc/k8s/dns/dnsmasq-nanny + - -restartDnsmasq=true + - -- + - -k + - --cache-size=1000 + - --no-negcache + - --log-facility=- + - --server=/cluster.local/127.0.0.1#10053 + - --server=/in-addr.arpa/127.0.0.1#10053 + - --server=/ip6.arpa/127.0.0.1#10053 + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + # see: https://github.com/kubernetes/kubernetes/issues/29055 for details + resources: + requests: + cpu: 150m + memory: 20Mi + volumeMounts: + - name: kube-dns-config + mountPath: /etc/k8s/dns/dnsmasq-nanny + - name: sidecar + image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7 + livenessProbe: + httpGet: + path: /metrics + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - --v=2 + - --logtostderr + - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV + - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV + ports: + - containerPort: 10054 + name: metrics + protocol: TCP + resources: + requests: + memory: 20Mi + cpu: 10m + dnsPolicy: Default # Don't use cluster DNS. + serviceAccountName: kube-dns diff --git a/notes b/notes new file mode 100644 index 0000000..ea876f8 --- /dev/null +++ b/notes @@ -0,0 +1,4 @@ +Need to disable SELinux - definitely on the controller nodes for the nginx reverse proxy for the health monitors + - this can probably be worked around, but I haven't had a look at the SELinux violations. + + diff --git a/old_stuff/cni/bridge b/old_stuff/cni/bridge new file mode 100755 index 0000000..2f6dc50 Binary files /dev/null and b/old_stuff/cni/bridge differ diff --git a/old_stuff/cni/cni-plugins-amd64-v0.6.0.tgz b/old_stuff/cni/cni-plugins-amd64-v0.6.0.tgz new file mode 100644 index 0000000..bb2b555 Binary files /dev/null and b/old_stuff/cni/cni-plugins-amd64-v0.6.0.tgz differ diff --git a/old_stuff/cni/dhcp b/old_stuff/cni/dhcp new file mode 100755 index 0000000..3397eb1 Binary files /dev/null and b/old_stuff/cni/dhcp differ diff --git a/old_stuff/cni/flannel b/old_stuff/cni/flannel new file mode 100755 index 0000000..1d5cab4 Binary files /dev/null and b/old_stuff/cni/flannel differ diff --git a/old_stuff/cni/host-local b/old_stuff/cni/host-local new file mode 100755 index 0000000..3a6a847 Binary files /dev/null and b/old_stuff/cni/host-local differ diff --git a/old_stuff/cni/ipvlan b/old_stuff/cni/ipvlan new file mode 100755 index 0000000..02b77c6 Binary files /dev/null and b/old_stuff/cni/ipvlan differ diff --git a/old_stuff/cni/loopback b/old_stuff/cni/loopback new file mode 100755 index 0000000..b1f69ef Binary files /dev/null and b/old_stuff/cni/loopback differ diff --git a/old_stuff/cni/macvlan b/old_stuff/cni/macvlan new file mode 100755 index 0000000..a8fc171 Binary files /dev/null and b/old_stuff/cni/macvlan differ diff --git a/old_stuff/cni/portmap b/old_stuff/cni/portmap new file mode 100755 index 0000000..d21fbe4 Binary files /dev/null and b/old_stuff/cni/portmap differ diff --git a/old_stuff/cni/ptp b/old_stuff/cni/ptp new file mode 100755 index 0000000..8505632 Binary files /dev/null and b/old_stuff/cni/ptp differ diff --git a/old_stuff/cni/sample b/old_stuff/cni/sample new file mode 100755 index 0000000..a657dc3 Binary files /dev/null and b/old_stuff/cni/sample differ diff --git a/old_stuff/cni/tuning b/old_stuff/cni/tuning new file mode 100755 index 0000000..ace28fe Binary files /dev/null and b/old_stuff/cni/tuning differ diff --git a/old_stuff/cni/vlan b/old_stuff/cni/vlan new file mode 100755 index 0000000..470e1f8 Binary files /dev/null and b/old_stuff/cni/vlan differ diff --git a/old_stuff/controller-0/apiserver b/old_stuff/controller-0/apiserver new file mode 100644 index 0000000..23b620c --- /dev/null +++ b/old_stuff/controller-0/apiserver @@ -0,0 +1,26 @@ +### +# kubernetes system config +# +# The following values are used to configure the kube-apiserver +# + +# The address on the local server to listen to. +#KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1" + +# The port on the local server to listen on. +#KUBE_API_PORT="--port=6443" + +# Port minions listen on +KUBELET_PORT="--kubelet-port=10250" + +# Comma separated list of nodes in the etcd cluster +KUBE_ETCD_SERVERS="--etcd-servers=https://10.240.0.10:2379,https://10.240.0.11:2379" + +# Address range to use for services +KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.32.0.0/16" + +# default admission control policies +KUBE_ADMISSION_CONTROL="--enable-admission-plugins=Initializers,NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota" + +# Add your own! +KUBE_API_ARGS="--advertise-address=10.240.0.10 --allow-privileged=true --apiserver-count=2 --audit-log-maxage=30 --audit-log-maxbackup=3 audit-log-maxsize=100 audit-log-path=/var/log/audit/audit.log --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --client-ca-file=/var/lib/kubernetes/ca.pem --enable-swagger-ui=true --etcd-cafile=/var/lib/kubernetes/ca.pem --etcd-certfile=/var/lib/kubernetes/kubernetes.pem --etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem --event-ttl=1h --experimental-encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml --kubelet-certificate-authority=/var/lib/kubernetes/ca.pem --kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem --kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem --kubelet-https=true --runtime-config=api/all --service-account-key-file=/var/lib/kubernetes/service-account.pem --service-node-port-range=30000-32767 --tls-cert-file=/var/lib/kubernetes/kubernetes.pem --tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem --v=2" diff --git a/old_stuff/controller-0/config b/old_stuff/controller-0/config new file mode 100644 index 0000000..e57d165 --- /dev/null +++ b/old_stuff/controller-0/config @@ -0,0 +1,13 @@ + +# This file controls the state of SELinux on the system. +# SELINUX= can take one of these three values: +# enforcing - SELinux security policy is enforced. +# permissive - SELinux prints warnings instead of enforcing. +# disabled - No SELinux policy is loaded. +#SELINUX=enforcing +SELINUX=permissive +# SELINUXTYPE= can take one of three two values: +# targeted - Targeted processes are protected, +# minimum - Modification of targeted policy. Only selected processes are protected. +# mls - Multi Level Security protection. +SELINUXTYPE=targeted diff --git a/old_stuff/controller-0/controller-manager b/old_stuff/controller-0/controller-manager new file mode 100644 index 0000000..545f850 --- /dev/null +++ b/old_stuff/controller-0/controller-manager @@ -0,0 +1,7 @@ +### +# The following values are used to configure the kubernetes controller-manager + +# defaults from config and apiserver should be adequate + +# Add your own! +KUBE_CONTROLLER_MANAGER_ARGS="--address=0.0.0.0 --cluster-cidr=10.200.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/var/lib/kubernetes/ca.pem --cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem --kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig --leader-elect=true --root-ca-file=/var/lib/kubernetes/ca.pem --service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem --service-cluster-ip-range=10.32.0.0/24 --use-service-account-credentials=true --v=2" diff --git a/old_stuff/controller-0/etcd.conf b/old_stuff/controller-0/etcd.conf new file mode 100644 index 0000000..7e9f1b9 --- /dev/null +++ b/old_stuff/controller-0/etcd.conf @@ -0,0 +1,69 @@ +#[Member] +#ETCD_CORS="" +ETCD_DATA_DIR="/var/lib/etcd/default.etcd" +#ETCD_WAL_DIR="" +ETCD_LISTEN_PEER_URLS="https://10.240.0.10:2380" +ETCD_LISTEN_CLIENT_URLS="https://10.240.0.10:2379,https://127.0.0.1:2379" +#ETCD_MAX_SNAPSHOTS="5" +#ETCD_MAX_WALS="5" +ETCD_NAME="controller-0" +#ETCD_SNAPSHOT_COUNT="100000" +#ETCD_HEARTBEAT_INTERVAL="100" +#ETCD_ELECTION_TIMEOUT="1000" +#ETCD_QUOTA_BACKEND_BYTES="0" +#ETCD_MAX_REQUEST_BYTES="1572864" +#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" +#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" +#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" +# +#[Clustering] +ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.240.0.10:2380" +ETCD_ADVERTISE_CLIENT_URLS="https://10.240.0.10:2379" +#ETCD_DISCOVERY="" +#ETCD_DISCOVERY_FALLBACK="proxy" +#ETCD_DISCOVERY_PROXY="" +#ETCD_DISCOVERY_SRV="" +ETCD_INITIAL_CLUSTER="controller-0=https://10.240.0.10:2380,controller-1=https://10.240.0.11:2380" +ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" +ETCD_INITIAL_CLUSTER_STATE="new" +#ETCD_STRICT_RECONFIG_CHECK="true" +#ETCD_ENABLE_V2="true" +# +#[Proxy] +#ETCD_PROXY="off" +#ETCD_PROXY_FAILURE_WAIT="5000" +#ETCD_PROXY_REFRESH_INTERVAL="30000" +#ETCD_PROXY_DIAL_TIMEOUT="1000" +#ETCD_PROXY_WRITE_TIMEOUT="5000" +#ETCD_PROXY_READ_TIMEOUT="0" +# +#[Security] +ETCD_CERT_FILE="/etc/etcd/kubernetes.pem" +ETCD_KEY_FILE="/etc/etcd/kubernetes-key.pem" +ETCD_CLIENT_CERT_AUTH="true" +ETCD_TRUSTED_CA_FILE="/etc/etcd/ca.pem" +#ETCD_AUTO_TLS="false" +ETCD_PEER_CERT_FILE="/etc/etcd/kubernetes.pem" +ETCD_PEER_KEY_FILE="/etc/etcd/kubernetes-key.pem" +ETCD_PEER_CLIENT_CERT_AUTH="true" +ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ca.pem" +#ETCD_PEER_AUTO_TLS="false" +# +#[Logging] +#ETCD_DEBUG="false" +#ETCD_LOG_PACKAGE_LEVELS="" +#ETCD_LOG_OUTPUT="default" +# +#[Unsafe] +#ETCD_FORCE_NEW_CLUSTER="false" +# +#[Version] +#ETCD_VERSION="false" +#ETCD_AUTO_COMPACTION_RETENTION="0" +# +#[Profiling] +#ETCD_ENABLE_PPROF="false" +#ETCD_METRICS="basic" +# +#[Auth] +#ETCD_AUTH_TOKEN="simple" diff --git a/old_stuff/controller-0/etcd.service b/old_stuff/controller-0/etcd.service new file mode 100644 index 0000000..2306643 --- /dev/null +++ b/old_stuff/controller-0/etcd.service @@ -0,0 +1,18 @@ +[Unit] +Description=Etcd Server +After=network.target +After=network-online.target +Wants=network-online.target + +[Service] +Type=notify +WorkingDirectory=/var/lib/etcd/ +EnvironmentFile=-/etc/etcd/etcd.conf +User=etcd +# set GOMAXPROCS to number of processors +ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd --name=${ETCD_NAME} --data-dir=${ETCD_DATA_DIR} --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS} --listen-peer-urls=${ETCD_LISTEN_PEER_URLS} --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} --initial-cluster=${ETCD_INITIAL_CLUSTER} --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} --initial-cluster-state=${ETCD_INITIAL_CLUSTER_STATE} --cert-file=${ETCD_CERT_FILE} --key-file=${ETCD_KEY_FILE} --client-cert-auth=${ETCD_CLIENT_CERT_AUTH} --trusted-ca-file=${ETCD_TRUSTED_CA_FILE} --peer-cert-file=${ETCD_PEER_CERT_FILE} --peer-key-file=${ETCD_PEER_KEY_FILE} --peer-client-cert-auth=${ETCD_PEER_CLIENT_CERT_AUTH} --peer-trusted-ca-file=${ETCD_PEER_TRUSTED_CA_FILE}" +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/old_stuff/controller-0/kube-apiserver.service b/old_stuff/controller-0/kube-apiserver.service new file mode 100644 index 0000000..5cebb13 --- /dev/null +++ b/old_stuff/controller-0/kube-apiserver.service @@ -0,0 +1,27 @@ +[Unit] +Description=Kubernetes API Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=network.target +After=etcd.service + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/apiserver +#User=kube +ExecStart=/usr/local/bin/kube-apiserver \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_ETCD_SERVERS \ + $KUBE_API_ADDRESS \ + $KUBE_API_PORT \ + $KUBELET_PORT \ + $KUBE_ALLOW_PRIV \ + $KUBE_SERVICE_ADDRESSES \ + $KUBE_ADMISSION_CONTROL \ + $KUBE_API_ARGS +Restart=on-failure +Type=notify +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/old_stuff/controller-0/kube-apiserver_rbac.yaml b/old_stuff/controller-0/kube-apiserver_rbac.yaml new file mode 100644 index 0000000..ac17cf6 --- /dev/null +++ b/old_stuff/controller-0/kube-apiserver_rbac.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: system:kube-apiserver + namespace: "" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kube-apiserver-to-kubelet +subjects: + - apiGroup: rbac.authorization.k8s.io + kind: User + name: kubernetes diff --git a/old_stuff/controller-0/kube-controller-manager.service b/old_stuff/controller-0/kube-controller-manager.service new file mode 100644 index 0000000..a8effcd --- /dev/null +++ b/old_stuff/controller-0/kube-controller-manager.service @@ -0,0 +1,18 @@ +[Unit] +Description=Kubernetes Controller Manager +Documentation=https://github.com/GoogleCloudPlatform/kubernetes + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/controller-manager +#User=kube +ExecStart=/usr/local/bin/kube-controller-manager \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_MASTER \ + $KUBE_CONTROLLER_MANAGER_ARGS +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/old_stuff/controller-0/kube-scheduler.service b/old_stuff/controller-0/kube-scheduler.service new file mode 100644 index 0000000..f85e0db --- /dev/null +++ b/old_stuff/controller-0/kube-scheduler.service @@ -0,0 +1,13 @@ +[Unit] +Description=Kubernetes Scheduler +Documentation=https://github.com/kubernetes/kubernetes + +[Service] +ExecStart=/usr/local/bin/kube-scheduler \ + --config=/etc/kubernetes/kube-scheduler.yaml \ + --v=2 +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target diff --git a/old_stuff/controller-0/kube-scheduler.yaml b/old_stuff/controller-0/kube-scheduler.yaml new file mode 100644 index 0000000..051cc66 --- /dev/null +++ b/old_stuff/controller-0/kube-scheduler.yaml @@ -0,0 +1,6 @@ +apiVersion: componentconfig/v1alpha1 +kind: KubeSchedulerConfiguration +clientConnection: + kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig" +leaderElection: + leaderElect: true diff --git a/old_stuff/controller-0/kubernetes.default.svc.cluster.local.conf b/old_stuff/controller-0/kubernetes.default.svc.cluster.local.conf new file mode 100644 index 0000000..8d8ffe8 --- /dev/null +++ b/old_stuff/controller-0/kubernetes.default.svc.cluster.local.conf @@ -0,0 +1,9 @@ +server { + listen 80; + server_name kubernetes.default.svc.cluster.local; + + location /healthz { + proxy_pass https://127.0.0.1:6443/healthz; + proxy_ssl_trusted_certificate /var/lib/kubernetes/ca.pem; + } +} diff --git a/old_stuff/controller-0/kubernetes.repo b/old_stuff/controller-0/kubernetes.repo new file mode 100644 index 0000000..8094327 --- /dev/null +++ b/old_stuff/controller-0/kubernetes.repo @@ -0,0 +1,8 @@ +[kubernetes] +name=Kubernetes +baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg + https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg diff --git a/old_stuff/controller-0/rbac_authorizations.yaml b/old_stuff/controller-0/rbac_authorizations.yaml new file mode 100644 index 0000000..92b3dbb --- /dev/null +++ b/old_stuff/controller-0/rbac_authorizations.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:kube-apiserver-to-kubelet +rules: + - apiGroups: + - "" + resources: + - nodes/proxy + - nodes/stats + - nodes/log + - nodes/spec + - nodes/metrics + verbs: + - "*" diff --git a/old_stuff/controller-0/scheduler b/old_stuff/controller-0/scheduler new file mode 100644 index 0000000..8a134a7 --- /dev/null +++ b/old_stuff/controller-0/scheduler @@ -0,0 +1,7 @@ +### +# kubernetes scheduler config + +# default config should be adequate + +# Add your own! +KUBE_SCHEDULER_ARGS="--config=/etc/kubernetes/kube-scheduler.yaml --v=2" diff --git a/old_stuff/controller-1/apiserver b/old_stuff/controller-1/apiserver new file mode 100644 index 0000000..49854fb --- /dev/null +++ b/old_stuff/controller-1/apiserver @@ -0,0 +1,26 @@ +### +# kubernetes system config +# +# The following values are used to configure the kube-apiserver +# + +# The address on the local server to listen to. +#KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1" + +# The port on the local server to listen on. +#KUBE_API_PORT="--port=6443" + +# Port minions listen on +KUBELET_PORT="--kubelet-port=10250" + +# Comma separated list of nodes in the etcd cluster +KUBE_ETCD_SERVERS="--etcd-servers=https://10.240.0.10:2379,https://10.240.0.11:2379" + +# Address range to use for services +KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.32.0.0/16" + +# default admission control policies +KUBE_ADMISSION_CONTROL="--enable-admission-plugins=Initializers,NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota" + +# Add your own! +KUBE_API_ARGS="--advertise-address=10.240.0.11 --allow-privileged=true --apiserver-count=2 --audit-log-maxage=30 --audit-log-maxbackup=3 audit-log-maxsize=100 audit-log-path=/var/log/audit/audit.log --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --client-ca-file=/var/lib/kubernetes/ca.pem --enable-swagger-ui=true --etcd-cafile=/var/lib/kubernetes/ca.pem --etcd-certfile=/var/lib/kubernetes/kubernetes.pem --etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem --event-ttl=1h --experimental-encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml --kubelet-certificate-authority=/var/lib/kubernetes/ca.pem --kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem --kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem --kubelet-https=true --runtime-config=api/all --service-account-key-file=/var/lib/kubernetes/service-account.pem --service-node-port-range=30000-32767 --tls-cert-file=/var/lib/kubernetes/kubernetes.pem --tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem --v=2" diff --git a/old_stuff/controller-1/config b/old_stuff/controller-1/config new file mode 100644 index 0000000..e57d165 --- /dev/null +++ b/old_stuff/controller-1/config @@ -0,0 +1,13 @@ + +# This file controls the state of SELinux on the system. +# SELINUX= can take one of these three values: +# enforcing - SELinux security policy is enforced. +# permissive - SELinux prints warnings instead of enforcing. +# disabled - No SELinux policy is loaded. +#SELINUX=enforcing +SELINUX=permissive +# SELINUXTYPE= can take one of three two values: +# targeted - Targeted processes are protected, +# minimum - Modification of targeted policy. Only selected processes are protected. +# mls - Multi Level Security protection. +SELINUXTYPE=targeted diff --git a/old_stuff/controller-1/controller-manager b/old_stuff/controller-1/controller-manager new file mode 100644 index 0000000..545f850 --- /dev/null +++ b/old_stuff/controller-1/controller-manager @@ -0,0 +1,7 @@ +### +# The following values are used to configure the kubernetes controller-manager + +# defaults from config and apiserver should be adequate + +# Add your own! +KUBE_CONTROLLER_MANAGER_ARGS="--address=0.0.0.0 --cluster-cidr=10.200.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/var/lib/kubernetes/ca.pem --cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem --kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig --leader-elect=true --root-ca-file=/var/lib/kubernetes/ca.pem --service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem --service-cluster-ip-range=10.32.0.0/24 --use-service-account-credentials=true --v=2" diff --git a/old_stuff/controller-1/etcd.conf b/old_stuff/controller-1/etcd.conf new file mode 100644 index 0000000..9b509e0 --- /dev/null +++ b/old_stuff/controller-1/etcd.conf @@ -0,0 +1,69 @@ +#[Member] +#ETCD_CORS="" +ETCD_DATA_DIR="/var/lib/etcd/default.etcd" +#ETCD_WAL_DIR="" +ETCD_LISTEN_PEER_URLS="https://10.240.0.11:2380" +ETCD_LISTEN_CLIENT_URLS="https://10.240.0.11:2379,https://127.0.0.1:2379" +#ETCD_MAX_SNAPSHOTS="5" +#ETCD_MAX_WALS="5" +ETCD_NAME="controller-1" +#ETCD_SNAPSHOT_COUNT="100000" +#ETCD_HEARTBEAT_INTERVAL="100" +#ETCD_ELECTION_TIMEOUT="1000" +#ETCD_QUOTA_BACKEND_BYTES="0" +#ETCD_MAX_REQUEST_BYTES="1572864" +#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" +#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" +#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" +# +#[Clustering] +ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.240.0.11:2380" +ETCD_ADVERTISE_CLIENT_URLS="https://10.240.0.11:2379" +#ETCD_DISCOVERY="" +#ETCD_DISCOVERY_FALLBACK="proxy" +#ETCD_DISCOVERY_PROXY="" +#ETCD_DISCOVERY_SRV="" +ETCD_INITIAL_CLUSTER="controller-0=https://10.240.0.10:2380,controller-1=https://10.240.0.11:2380" +ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" +ETCD_INITIAL_CLUSTER_STATE="new" +#ETCD_STRICT_RECONFIG_CHECK="true" +#ETCD_ENABLE_V2="true" +# +#[Proxy] +#ETCD_PROXY="off" +#ETCD_PROXY_FAILURE_WAIT="5000" +#ETCD_PROXY_REFRESH_INTERVAL="30000" +#ETCD_PROXY_DIAL_TIMEOUT="1000" +#ETCD_PROXY_WRITE_TIMEOUT="5000" +#ETCD_PROXY_READ_TIMEOUT="0" +# +#[Security] +ETCD_CERT_FILE="/etc/etcd/kubernetes.pem" +ETCD_KEY_FILE="/etc/etcd/kubernetes-key.pem" +ETCD_CLIENT_CERT_AUTH="true" +ETCD_TRUSTED_CA_FILE="/etc/etcd/ca.pem" +#ETCD_AUTO_TLS="false" +ETCD_PEER_CERT_FILE="/etc/etcd/kubernetes.pem" +ETCD_PEER_KEY_FILE="/etc/etcd/kubernetes-key.pem" +ETCD_PEER_CLIENT_CERT_AUTH="true" +ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ca.pem" +#ETCD_PEER_AUTO_TLS="false" +# +#[Logging] +#ETCD_DEBUG="false" +#ETCD_LOG_PACKAGE_LEVELS="" +#ETCD_LOG_OUTPUT="default" +# +#[Unsafe] +#ETCD_FORCE_NEW_CLUSTER="false" +# +#[Version] +#ETCD_VERSION="false" +#ETCD_AUTO_COMPACTION_RETENTION="0" +# +#[Profiling] +#ETCD_ENABLE_PPROF="false" +#ETCD_METRICS="basic" +# +#[Auth] +#ETCD_AUTH_TOKEN="simple" diff --git a/old_stuff/controller-1/etcd.service b/old_stuff/controller-1/etcd.service new file mode 100644 index 0000000..2306643 --- /dev/null +++ b/old_stuff/controller-1/etcd.service @@ -0,0 +1,18 @@ +[Unit] +Description=Etcd Server +After=network.target +After=network-online.target +Wants=network-online.target + +[Service] +Type=notify +WorkingDirectory=/var/lib/etcd/ +EnvironmentFile=-/etc/etcd/etcd.conf +User=etcd +# set GOMAXPROCS to number of processors +ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd --name=${ETCD_NAME} --data-dir=${ETCD_DATA_DIR} --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS} --listen-peer-urls=${ETCD_LISTEN_PEER_URLS} --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} --initial-cluster=${ETCD_INITIAL_CLUSTER} --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} --initial-cluster-state=${ETCD_INITIAL_CLUSTER_STATE} --cert-file=${ETCD_CERT_FILE} --key-file=${ETCD_KEY_FILE} --client-cert-auth=${ETCD_CLIENT_CERT_AUTH} --trusted-ca-file=${ETCD_TRUSTED_CA_FILE} --peer-cert-file=${ETCD_PEER_CERT_FILE} --peer-key-file=${ETCD_PEER_KEY_FILE} --peer-client-cert-auth=${ETCD_PEER_CLIENT_CERT_AUTH} --peer-trusted-ca-file=${ETCD_PEER_TRUSTED_CA_FILE}" +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/old_stuff/controller-1/kube-apiserver.service b/old_stuff/controller-1/kube-apiserver.service new file mode 100644 index 0000000..5cebb13 --- /dev/null +++ b/old_stuff/controller-1/kube-apiserver.service @@ -0,0 +1,27 @@ +[Unit] +Description=Kubernetes API Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=network.target +After=etcd.service + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/apiserver +#User=kube +ExecStart=/usr/local/bin/kube-apiserver \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_ETCD_SERVERS \ + $KUBE_API_ADDRESS \ + $KUBE_API_PORT \ + $KUBELET_PORT \ + $KUBE_ALLOW_PRIV \ + $KUBE_SERVICE_ADDRESSES \ + $KUBE_ADMISSION_CONTROL \ + $KUBE_API_ARGS +Restart=on-failure +Type=notify +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/old_stuff/controller-1/kube-apiserver_rbac.yaml b/old_stuff/controller-1/kube-apiserver_rbac.yaml new file mode 100644 index 0000000..ac17cf6 --- /dev/null +++ b/old_stuff/controller-1/kube-apiserver_rbac.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: system:kube-apiserver + namespace: "" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kube-apiserver-to-kubelet +subjects: + - apiGroup: rbac.authorization.k8s.io + kind: User + name: kubernetes diff --git a/old_stuff/controller-1/kube-controller-manager.service b/old_stuff/controller-1/kube-controller-manager.service new file mode 100644 index 0000000..a8effcd --- /dev/null +++ b/old_stuff/controller-1/kube-controller-manager.service @@ -0,0 +1,18 @@ +[Unit] +Description=Kubernetes Controller Manager +Documentation=https://github.com/GoogleCloudPlatform/kubernetes + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/controller-manager +#User=kube +ExecStart=/usr/local/bin/kube-controller-manager \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_MASTER \ + $KUBE_CONTROLLER_MANAGER_ARGS +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/old_stuff/controller-1/kube-scheduler.service b/old_stuff/controller-1/kube-scheduler.service new file mode 100644 index 0000000..f85e0db --- /dev/null +++ b/old_stuff/controller-1/kube-scheduler.service @@ -0,0 +1,13 @@ +[Unit] +Description=Kubernetes Scheduler +Documentation=https://github.com/kubernetes/kubernetes + +[Service] +ExecStart=/usr/local/bin/kube-scheduler \ + --config=/etc/kubernetes/kube-scheduler.yaml \ + --v=2 +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target diff --git a/old_stuff/controller-1/kube-scheduler.yaml b/old_stuff/controller-1/kube-scheduler.yaml new file mode 100644 index 0000000..051cc66 --- /dev/null +++ b/old_stuff/controller-1/kube-scheduler.yaml @@ -0,0 +1,6 @@ +apiVersion: componentconfig/v1alpha1 +kind: KubeSchedulerConfiguration +clientConnection: + kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig" +leaderElection: + leaderElect: true diff --git a/old_stuff/controller-1/kubernetes.default.svc.cluster.local.conf b/old_stuff/controller-1/kubernetes.default.svc.cluster.local.conf new file mode 100644 index 0000000..8d8ffe8 --- /dev/null +++ b/old_stuff/controller-1/kubernetes.default.svc.cluster.local.conf @@ -0,0 +1,9 @@ +server { + listen 80; + server_name kubernetes.default.svc.cluster.local; + + location /healthz { + proxy_pass https://127.0.0.1:6443/healthz; + proxy_ssl_trusted_certificate /var/lib/kubernetes/ca.pem; + } +} diff --git a/old_stuff/controller-1/kubernetes.repo b/old_stuff/controller-1/kubernetes.repo new file mode 100644 index 0000000..8094327 --- /dev/null +++ b/old_stuff/controller-1/kubernetes.repo @@ -0,0 +1,8 @@ +[kubernetes] +name=Kubernetes +baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg + https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg diff --git a/old_stuff/controller-1/rbac_authorizations.yaml b/old_stuff/controller-1/rbac_authorizations.yaml new file mode 100644 index 0000000..92b3dbb --- /dev/null +++ b/old_stuff/controller-1/rbac_authorizations.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:kube-apiserver-to-kubelet +rules: + - apiGroups: + - "" + resources: + - nodes/proxy + - nodes/stats + - nodes/log + - nodes/spec + - nodes/metrics + verbs: + - "*" diff --git a/old_stuff/controller-1/scheduler b/old_stuff/controller-1/scheduler new file mode 100644 index 0000000..8a134a7 --- /dev/null +++ b/old_stuff/controller-1/scheduler @@ -0,0 +1,7 @@ +### +# kubernetes scheduler config + +# default config should be adequate + +# Add your own! +KUBE_SCHEDULER_ARGS="--config=/etc/kubernetes/kube-scheduler.yaml --v=2" diff --git a/old_stuff/controllers b/old_stuff/controllers new file mode 100644 index 0000000..ce01362 --- /dev/null +++ b/old_stuff/controllers @@ -0,0 +1 @@ +hello diff --git a/scheduler_setup.sh b/scheduler_setup.sh new file mode 100755 index 0000000..bdfc708 --- /dev/null +++ b/scheduler_setup.sh @@ -0,0 +1,18 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## + +sudo curl https://storage.googleapis.com/kubernetes-release/release/v1.10.3/bin/linux/amd64/kube-scheduler -o /usr/local/bin/kube-scheduler +sudo chmod 755 /usr/local/bin/kube-scheduler + +sudo mv kube-scheduler.yaml /etc/kubernetes/lib/kubernetes/ +sudo mv kube-scheduler.kubeconfig /var/lib/kubernetes/ +sudo mv kube-scheduler.service /etc/systemd/system/ +sudo mv scheduler /etc/kubernetes/ + +sudo systemctl daemon-reload +sudo systemctl start kube-scheduler +sudo systemctl enable kube-scheduler + +echo "The next step is to create the nginx based health monitor" diff --git a/scripts/.gce_kubernetes.config b/scripts/.gce_kubernetes.config new file mode 100644 index 0000000..f27e07c --- /dev/null +++ b/scripts/.gce_kubernetes.config @@ -0,0 +1,17 @@ +GCE_REGION=us-west1 +GCE_ZONE=us-west1-c +GCE_PROJECT=centos-k8s +KUBE_VER=1.11.2 +KUBE_NETWORK=kubernetes +KUBE_SUBNET=kubernetes-subnet +KUBE_PUB_IP=kube-public-ip +KUBE_SUBNET_CIDR=10.240.0.0/24 +KUBE_SUBNET_ADDR=10.240.0 +KUBE_POD_CIDR=10.200.0.0/16 +KUBE_POD_ADDR=10.200 +KUBE_NODE_POD_PREFIX=24 +KUBE_INT_FW_NAME=kubernetes-allow-internal +KUBE_EXT_FW_NAME=kubernetes-allow-external +KUBE_CLUSTER=kube-cluster +KUBE_CONTROLLERS=2 +KUBE_WORKERS=2 diff --git a/scripts/.variables b/scripts/.variables new file mode 100644 index 0000000..bba6d6c --- /dev/null +++ b/scripts/.variables @@ -0,0 +1,2 @@ +COUNTER=1 +KUBE_SUBNET_ADDR=10.240.0 diff --git a/scripts/.worker_variables_0 b/scripts/.worker_variables_0 new file mode 100644 index 0000000..86aa67e --- /dev/null +++ b/scripts/.worker_variables_0 @@ -0,0 +1,2 @@ +KUBE_POD_ADDR=10.200.0.0 +KUBE_NODE_POD_PREFIX=24 diff --git a/scripts/.worker_variables_1 b/scripts/.worker_variables_1 new file mode 100644 index 0000000..ac43e10 --- /dev/null +++ b/scripts/.worker_variables_1 @@ -0,0 +1,2 @@ +KUBE_POD_ADDR=10.200.1.0 +KUBE_NODE_POD_PREFIX=24 diff --git a/scripts/0_env_setup.sh b/scripts/0_env_setup.sh new file mode 100755 index 0000000..2400e9f --- /dev/null +++ b/scripts/0_env_setup.sh @@ -0,0 +1,120 @@ +#!/bin/bash +## +## Script to set up the kubernetes on Google Cloud environment settings +## +main () { +echo "This script will create a file .gce_kubernetes.config in the current directory that +will contain the variables that you are about to set to set up the Kubernetes cluster on +Google Compute Engine. You should also have already authenticated to the Google Cloud via +the gcloud command line interface." + + if [ -f .gce_kubernetes.config ]; then + read -p "Kubernetes configuration file exists. Do you wish to display the contents? [y]" show_config + show_config=${show_config:-y} + if [ "$show_config" == "y" ]; then + cat .gce_kubernetes.config + read -p "Do you wish to set up the configuration again? [n]" reset_config + reset_config=${reset_config:-n} + if [ "$reset_config" == "n" ]; then + echo "Exiting" + else + config_gather + fi + else + echo "Setting up the configuration ..." + config_gather + fi + else + echo "Setting up the configuration ..." + config_gather + fi +} + +config_gather () { + read -p "Please enter the Google Compute Region you want to use? [us-west1]" gce_region + gce_region=${gce_region:-us-west1} + read -p "Please enter the Google Compute Zone you want to use? [us-west1-c]" gce_zone + gce_zone=${gce_zone:-us-west1-c} + read -p "Please enter the Google Compute Project you are going to use? (This should already exist)" gce_project + + read -p "What version of Kubernetes do you want to install? Tested versions: 1.10.3, 1.10.5, and [1.10.6]" + kube_ver=${kube_ver:-1.10.6} + + read -p "Please enter the name of GCE network you want to create? [kubernetes]" kube_network + kube_network=${kube_network:-kubernetes} + read -p "Please enter the name of the GCE subnet within the $kube_network you want to create? [kubernetes-subnet]" kube_subnet + kube_subnet=${kube_subnet:-kubernetes-subnet} + read -p "Please enter the name used for the public IP address object in Google Compute? [kube-public-ip]" kube_pub_ip + kube_pub_ip=${kube_pub_ip:-kube-public-ip} + read -p "Please enter the CIDR network address range of the kubernetes subnet? [10.240.0.0/24]" kube_subnet_cidr + kube_subnet_cidr=${kube_subnet_cidr:-10.240.0.0/24} + kube_subnet_addr_calc=`ipcalc -n $kube_subnet_cidr | cut -c 9- | rev | cut -c 3- | rev` + read -p "Please enter the lowest network component of the IP address range [$kube_subnet_addr_calc]?" kube_subnet_addr + kube_subnet_addr=${kube_subnet_addr:-$kube_subnet_addr_calc} + read -p "Please enter the pod CIDR network address to be used? [10.200.0.0/16]" kube_pod_cidr + kube_pod_cidr=${kube_pod_cidr:-10.200.0.0/16} + kube_pod_addr_calc=`ipcalc -n $kube_pod_cidr | cut -c 9- | rev | cut -c 5- | rev` + read -p "Please enter the lowest network component of the pod address range [$kube_pod_addr_calc]?" kube_pod_addr + kube_pod_addr=${kube_pod_addr:-$kube_pod_addr_calc} + kube_pod_cidr_prefix=`echo $kube_pod_cidr | rev| cut -c -2 | rev` + kube_pod_node_prefix=$(($kube_pod_cidr_prefix + 8)) + read -p "Best guess at the CIDR prefix used on the nodes for the pod subnets [$kube_pod_node_prefix]" kube_node_pod_prefix + kube_node_pod_prefix=${kube_node_pod_prefix:-$kube_pod_node_prefix} + read -p "Firewall rules will be created for internal traffic (all allowed) and external traffic (ssh, ping, kubernetes). + You can provide a name for the internal traffic rule: [kubernetes-allow-internal]" kube_int_fw_name + kube_int_fw_name=${kube_int_fw_name:-kubernetes-allow-internal} + read -p " You can provide a name for the external traffic rule: [kubernetes-allow-external]" kube_ext_fw_name + kube_ext_fw_name=${kube_ext_fw_name:-kubernetes-allow-external} + read -p "Please enter the name used for the Kubernetes Cluster object? [kube-cluster]" kube_cluster + kube_cluster=${kube_cluster:-kube-cluster} + read -p "How many master (controller) nodes in the Kubernetes cluster do you want to create (currently a maximum of 9)? [2]" kube_masters + kube_masters=${kube_masters:-2} + read -p "How many worker nodes in the Kubernetes cluster do you want to create (currently a maximum of 9)? [2]" kube_workers + kube_workers=${kube_workers:-2} + + echo "The following variables will be committed to the configuration file: + GCE region: $gce_region + GCE zone: $gce_zone + GCE project: $gce_project + Kubernetes Version: $kube_ver + GCE VPC network name: $kube_network + GCE VPC subnet name: $kube_subnet + Kubernetes public IP address object name: $kube_pub_ip + Kubernetes subnet CIDR address: $kube_subnet_cidr + Kubernetes subnet address: $kube_subnet_addr + Kubernetes pod subnet CIDR address: $kube_pod_cidr + Kubernetes pod address: $kube_pod_addr + Kubernetes node pod CIDR prefix: $kube_node_pod_prefix + Kubernetes internal firewall rule: $kube_int_fw_name + Kubernetes external firewall rule: $kube_ext_fw_name + Kubernetes cluster object name: $kube_cluster + Number of controller nodes: $kube_masters + Number of worker nodes: $kube_workers" + + read -p "Please confirm that these values are correct (y/n) [y]" confirm_values + confirm_values=${confirm_values:-y} + + if [ "$confirm_values" == "y" ]; then + cat > .gce_kubernetes.config << EOM +GCE_REGION=$gce_region +GCE_ZONE=$gce_zone +GCE_PROJECT=$gce_project +KUBE_VER=$kube_ver +KUBE_NETWORK=$kube_network +KUBE_SUBNET=$kube_subnet +KUBE_PUB_IP=$kube_pub_ip +KUBE_SUBNET_CIDR=$kube_subnet_cidr +KUBE_SUBNET_ADDR=$kube_subnet_addr +KUBE_POD_CIDR=$kube_pod_cidr +KUBE_POD_ADDR=$kube_pod_addr +KUBE_NODE_POD_PREFIX=$kube_node_pod_prefix +KUBE_INT_FW_NAME=$kube_int_fw_name +KUBE_EXT_FW_NAME=$kube_ext_fw_name +KUBE_CLUSTER=$kube_cluster +KUBE_CONTROLLERS=$kube_masters +KUBE_WORKERS=$kube_workers +EOM + fi +} + +main diff --git a/scripts/10_worker_kube-proxy_setup.sh b/scripts/10_worker_kube-proxy_setup.sh new file mode 100755 index 0000000..a1225a3 --- /dev/null +++ b/scripts/10_worker_kube-proxy_setup.sh @@ -0,0 +1,23 @@ +. .gce_kubernetes.config +echo "###################################### +Set the gcloud compute region and zone +######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" + +KUBE_WORKERS=$((KUBE_WORKERS-1)) + +sed "s|KUBE_POD_CIDR|$KUBE_POD_CIDR|g" worker_kube-proxy_setup.sh.template > worker_kube-proxy_setup.sh.1 +sed "s|KUBE_VERSION|$KUBE_VER|g" worker_kube-proxy_setup.sh.1 > worker_kube-proxy_setup.sh +rm worker_kube-proxy_setup.sh.1 +chmod 755 worker_kube-proxy_setup.sh +for ((i=0; i<=$KUBE_WORKERS; i++)); do + gcloud compute scp worker_kube-proxy_setup.sh worker-${i}: +done + +echo "Worker kube-proxy setup scripts copied across" +for ((i=0; i<=$KUBE_WORKERS; i++)); do + gcloud compute ssh worker-${i} -- sudo ~/worker_kube-proxy_setup.sh +done diff --git a/scripts/11_client_kubectl_admin_setup.sh b/scripts/11_client_kubectl_admin_setup.sh new file mode 100755 index 0000000..58bc617 --- /dev/null +++ b/scripts/11_client_kubectl_admin_setup.sh @@ -0,0 +1,24 @@ +#!/bin/bash +## +## Script to set up the client (this system) for Kubernetes access +## +. .gce_kubernetes.config +echo "###################################### +Set the gcloud compute region and zone +######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" + +cd certs-dir +rm admin.kubeconfig +KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe $KUBE_NETWORK --region $GCE_REGION --format 'value(address)') +echo $KUBERNETES_PUBLIC_ADDRESS +kubectl config set-cluster $KUBE_CLUSTER --certificate-authority=ca.pem --embed-certs=true --server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 --kubeconfig=admin.kubeconfig +kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --kubeconfig=admin.kubeconfig +kubectl config set-context $KUBE_NETWORK --cluster=$KUBE_CLUSTER --user=admin --kubeconfig=admin.kubeconfig +kubectl config use-context $KUBE_NETWORK --kubeconfig=admin.kubeconfig + +kubectl get componentstatuses --kubeconfig=admin.kubeconfig +kubectl get nodes --kubeconfig=admin.kubeconfig diff --git a/scripts/12_setup_pod_routing.sh b/scripts/12_setup_pod_routing.sh new file mode 100755 index 0000000..b0e02f6 --- /dev/null +++ b/scripts/12_setup_pod_routing.sh @@ -0,0 +1,22 @@ +#!/bin/bash +## +## Script to set up the pod routing between nodes +## +. .gce_kubernetes.config +echo "###################################### +Set the gcloud compute region and zone +######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" + +KUBE_WORKERS=$((KUBE_WORKERS-1)) + +for ((i=0; i<=$KUBE_WORKERS; i++)); do + gcloud compute routes create kubernetes-pod-route-worker-${i} --network $KUBE_NETWORK --next-hop-address $KUBE_SUBNET_ADDR.2${i} --destination-range $KUBE_POD_ADDR.${i}.0/24 +done +sleep 10 +gcloud compute routes list --filter "network: $KUBE_NETWORK" + +echo "Routes should be listed above as created" diff --git a/scripts/13_setup_core-dns.sh b/scripts/13_setup_core-dns.sh new file mode 100755 index 0000000..937c101 --- /dev/null +++ b/scripts/13_setup_core-dns.sh @@ -0,0 +1,24 @@ +#!/bin/bash +## +## Script to deploy the kube-dns pod to the cluster +## +echo "Using the core-dns.yaml file dapated from https://github.com/coredns/deployment/blob/master/kubernetes/coredns.yaml.sed" +kubectl create -f core-dns.yaml --kubeconfig=certs-dir/admin.kubeconfig + +sleep 2 +kubectl get pods -l k8s-app=core-dns -n kube-system -o wide --kubeconfig=certs-dir/admin.kubeconfig + +echo "Verifying this has worked" +## USing and older version of busybox than latest due to errors with nslookup on the latest versions: https://github.com/docker-library/busybox/issues/48 +kubectl run busybox --image=busybox:1.28.4 --kubeconfig=certs-dir/admin.kubeconfig --command -- sleep 3600 + +sleep 10 + +echo "Is the busybox pod running?" +kubectl get pods -l run=busybox --kubeconfig=certs-dir/admin.kubeconfig + +echo "Get the pod name" +POD_NAME=$(kubectl get pods -l run=busybox --kubeconfig=certs-dir/admin.kubeconfig -o jsonpath="{.items[0].metadata.name}") + +echo "Is the DNS pod functional?" +kubectl exec -ti --kubeconfig=certs-dir/admin.kubeconfig $POD_NAME -- nslookup kubernetes diff --git a/scripts/13_setup_kube-dns.sh b/scripts/13_setup_kube-dns.sh new file mode 100755 index 0000000..399137d --- /dev/null +++ b/scripts/13_setup_kube-dns.sh @@ -0,0 +1,25 @@ +#!/bin/bash +## +## Script to deploy the kube-dns pod to the cluster +## +echo "Taking the kube-dns.yaml file from the Kubernetes the Hard Way author" +wget https://storage.googleapis.com/kubernetes-the-hard-way/kube-dns.yaml +kubectl create -f kube-dns.yaml --kubeconfig=certs-dir/admin.kubeconfig + +sleep 2 +kubectl get pods -l k8s-app=kube-dns -n kube-system -o wide --kubeconfig=certs-dir/admin.kubeconfig + +echo "Verifying this has worked" +## USing and older version of busybox than latest due to errors with nslookup on the latest versions: https://github.com/docker-library/busybox/issues/48 +kubectl run busybox --image=busybox:1.28.4 --kubeconfig=certs-dir/admin.kubeconfig --command -- sleep 3600 + +sleep 10 + +echo "Is the busybox pod running?" +kubectl get pods -l run=busybox --kubeconfig=certs-dir/admin.kubeconfig + +echo "Get the pod name" +POD_NAME=$(kubectl get pods -l run=busybox --kubeconfig=certs-dir/admin.kubeconfig -o jsonpath="{.items[0].metadata.name}") + +echo "Is the DNS pod functional?" +kubectl exec -ti --kubeconfig=certs-dir/admin.kubeconfig $POD_NAME -- nslookup kubernetes diff --git a/scripts/14_additional_functional_tests.sh b/scripts/14_additional_functional_tests.sh new file mode 100755 index 0000000..4f389ea --- /dev/null +++ b/scripts/14_additional_functional_tests.sh @@ -0,0 +1,55 @@ +#!/bin/bash +## +## Further set of tests to ensure that Kubernetes is working as expected +## +. .gce_kubernetes.config + +echo "######################################" +echo "Set the gcloud compute region and zone" +echo "######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" +echo "" + +echo "Testing the encryption of data at rest via the key created earlier" +kubectl create secret generic super-secret --from-literal="mykey=mydata" --kubeconfig=certs-dir/admin.kubeconfig + +gcloud compute ssh controller-0 \ +--command "sudo ETCDCTL_API=3 etcdctl get \ +--endpoints=https://127.0.0.1:2379 \ +--cacert=/etc/etcd/ca.pem \ +--cert=/etc/etcd/kubernetes.pem \ +--key=/etc/etcd/kubernetes-key.pem \ +/registry/secrets/default/super-secret | hexdump -C" + +echo "Output should be prefixed with k8s:enc:aescbc:v1:key1 + +Testing application (nginx) deployments" +kubectl run nginx --image=nginx --kubeconfig=certs-dir/admin.kubeconfig +echo "Waiting 10 seconds for the pod to start ..." +sleep 10 +kubectl get pods -l run=nginx -o wide --kubeconfig=certs-dir/admin.kubeconfig + +echo "nginx should be listed as running" +POD_NAME=$(kubectl get pods -l run=nginx --kubeconfig=certs-dir/admin.kubeconfig -o jsonpath="{.items[0].metadata.name}") +echo "You'll need to switch to another terminal and test with 'curl --head http://127.0.0.1:8080' Press Ctrl+C once completed" +kubectl port-forward $POD_NAME 8080:80 --kubeconfig=certs-dir/admin.kubeconfig + +echo "Displaying the logs from the nginx container" +kubectl logs $POD_NAME --kubeconfig=certs-dir/admin.kubeconfig + +echo "Executing a command inside a container" +kubectl exec -it $POD_NAME --kubeconfig=certs-dir/admin.kubeconfig -- nginx -v + +echo "Exposing a container as a service (in this example NodePort)" +kubectl expose deployment nginx --port 80 --type NodePort --kubeconfig=certs-dir/admin.kubeconfig +NODE_PORT=$(kubectl get svc nginx --kubeconfig=certs-dir/admin.kubeconfig --output=jsonpath='{range .spec.ports[0]}{.nodePort}') +echo "Creating a firewall rule to allow access to the exposed node" +gcloud compute firewall-rules create $KUBE_NETWORK-allow-nginx-service --allow=tcp:${NODE_PORT} --network $KUBE_NETWORK +echo "Retrieving the external IP" +EXTERNAL_IP=$(gcloud compute instances describe worker-0 --format 'value(networkInterfaces[0].accessConfigs[0].natIP)') +curl -I http://${EXTERNAL_IP}:${NODE_PORT} + +echo "Functional tests should now be completed successfully" diff --git a/scripts/14b_additional_functional_tests.sh b/scripts/14b_additional_functional_tests.sh new file mode 100755 index 0000000..c38dc96 --- /dev/null +++ b/scripts/14b_additional_functional_tests.sh @@ -0,0 +1,75 @@ +#!/bin/bash +## +## Further set of tests to ensure that Kubernetes is working as expected +## +. .gce_kubernetes.config + +echo "######################################" +echo "Set the gcloud compute region and zone" +echo "######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" +echo "" + +echo "Testing the encryption of data at rest via the key created earlier" +kubectl create secret generic super-secret --from-literal="mykey=mydata" --kubeconfig=certs-dir/admin.kubeconfig + +gcloud compute ssh controller-0 \ +--command "sudo ETCDCTL_API=3 etcdctl get \ +--endpoints=https://127.0.0.1:2379 \ +--cacert=/etc/etcd/ca.pem \ +--cert=/etc/etcd/kubernetes.pem \ +--key=/etc/etcd/kubernetes-key.pem \ +/registry/secrets/default/super-secret | hexdump -C" + +echo "Output should be prefixed with k8s:enc:aescbc:v1:key1 + +Testing application (nginx) deployments" +kubectl run nginx --image=nginx --kubeconfig=certs-dir/admin.kubeconfig +echo "Waiting 10 seconds for the pod to start ..." +sleep 10 +kubectl get pods -l run=nginx -o wide --kubeconfig=certs-dir/admin.kubeconfig + +echo "nginx should be listed as running" +POD_NAME=$(kubectl get pods -l run=nginx --kubeconfig=certs-dir/admin.kubeconfig -o jsonpath="{.items[0].metadata.name}") +echo "You'll need to switch to another terminal and test with 'curl --head http://127.0.0.1:8080' Press Ctrl+C once completed" +kubectl port-forward $POD_NAME 8080:80 --kubeconfig=certs-dir/admin.kubeconfig + +echo "Displaying the logs from the nginx container" +kubectl logs $POD_NAME --kubeconfig=certs-dir/admin.kubeconfig + +echo "Executing a command inside a container" +kubectl exec -it $POD_NAME --kubeconfig=certs-dir/admin.kubeconfig -- nginx -v + +echo "Exposing a container as a service (in this example NodePort)" +kubectl expose deployment nginx --port 80 --type NodePort --kubeconfig=certs-dir/admin.kubeconfig +NODE_PORT=$(kubectl get svc nginx --kubeconfig=certs-dir/admin.kubeconfig --output=jsonpath='{range .spec.ports[0]}{.nodePort}') +echo "Creating a firewall rule to allow access to the exposed node" +gcloud compute firewall-rules create kubernetes-centos-allow-nginx-service --allow=tcp:${NODE_PORT} --network $KUBE_NETWORK +echo "Retrieving the external IP" +EXTERNAL_IP=$(gcloud compute instances describe worker-0 --format 'value(networkInterfaces[0].accessConfigs[0].natIP)') +curl -I http://${EXTERNAL_IP}:${NODE_PORT} + +echo "Running an untrusted pod under gVisor for inspection - only run this test if you are using containerd and have runsc present" +alias ckctl='kubectl --kubeconfig=certs-dir/admin.kubeconfig' +cat < ca-config.json < ca-csr.json < admin-csr.json < ${i}-csr.json < kube-controller-manager-csr.json < kube-proxy-csr.json < kube-scheduler-csr.json < kubernetes-csr.json < service-account-csr.json < encryption-config.yaml < .variables +echo "KUBE_SUBNET_ADDR=$KUBE_SUBNET_ADDR" >> .variables + +for ((i=0; i<=$KUBE_CONTROLLERS; i++)); do + gcloud compute scp etcd_setup.sh .variables controller-${i}: +done + +echo "Controller setup scripts copied across" +for ((i=0; i<=$KUBE_CONTROLLERS; i++)); do + gcloud compute ssh controller-${i} -- sudo ~/etcd_setup.sh +done + diff --git a/scripts/4_install_configure_apiserver.sh b/scripts/4_install_configure_apiserver.sh new file mode 100755 index 0000000..8aadcf6 --- /dev/null +++ b/scripts/4_install_configure_apiserver.sh @@ -0,0 +1,23 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## +. .gce_kubernetes.config +echo "###################################### +Set the gcloud compute region and zone +######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" + +echo "KUBE_SUBNET_ADDR=$KUBE_SUBNET_ADDR" > etcd_vars +echo "KUBE_CONTROLLERS=$KUBE_CONTROLLERS" >> etcd_vars +sed "s|KUBE_VERSION|$KUBE_VER|g" kube-apiserver_setup.sh.template > kube-apiserver_setup.sh +KUBE_CONTROLLERS=$((KUBE_CONTROLLERS-1)) +for (( i=0; i<=$KUBE_CONTROLLERS; i++)); do + gcloud compute scp kube-apiserver_setup.sh etcd_vars controller-${i}: + gcloud compute ssh controller-${i} -- sudo ~/kube-apiserver_setup.sh +done + +echo "Next step is to install the Kubernetes Controller Manager daemon" diff --git a/scripts/5_install_configure_controller_manager.sh b/scripts/5_install_configure_controller_manager.sh new file mode 100755 index 0000000..6a1ca17 --- /dev/null +++ b/scripts/5_install_configure_controller_manager.sh @@ -0,0 +1,25 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## +. .gce_kubernetes.config +echo "###################################### +Set the gcloud compute region and zone +######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" + +KUBE_CONTROLLERS=$((KUBE_CONTROLLERS-1)) +echo "KUBE_POD_CIDR=$KUBE_POD_CIDR" > ctrl-mgr_vars +sed "s|KUBE_VERSION|$KUBE_VER|g" controller_manager_setup.sh.template > controller_manager_setup.sh +for (( i=0; i<=$KUBE_CONTROLLERS; i++)); do + gcloud compute scp ctrl-mgr_vars controller_manager_setup.sh controller-${i}: +done + +echo "Controller setup scripts copied across" +for (( i=0; i<=$KUBE_CONTROLLERS; i++)); do + gcloud compute ssh controller-${i} -- sudo ~/controller_manager_setup.sh +done + diff --git a/scripts/6_install_configure_scheduler.sh b/scripts/6_install_configure_scheduler.sh new file mode 100755 index 0000000..2accb24 --- /dev/null +++ b/scripts/6_install_configure_scheduler.sh @@ -0,0 +1,26 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## +. .gce_kubernetes.config +echo "###################################### +Set the gcloud compute region and zone +######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" + +sed "s|KUBE_VERSION|$KUBE_VER|g" scheduler_setup.sh.template > scheduler_setup.sh +chmod 755 scheduler_setup.sh + +KUBE_CONTROLLERS=$((KUBE_CONTROLLERS-1)) +for (( i=0; i<=$KUBE_CONTROLLERS; i++)); do + gcloud compute scp scheduler_setup.sh controller-${i}: +done + +echo "Controller setup scripts copied across" +for (( i=0; i<=$KUBE_CONTROLLERS; i++)); do + gcloud compute ssh controller-${i} -- sudo ~/scheduler_setup.sh +done + diff --git a/scripts/7_nginx_health_monitor_setup.sh b/scripts/7_nginx_health_monitor_setup.sh new file mode 100755 index 0000000..35bbb95 --- /dev/null +++ b/scripts/7_nginx_health_monitor_setup.sh @@ -0,0 +1,24 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## +. .gce_kubernetes.config +echo "######################################" +echo "Set the gcloud compute region and zone" +echo "######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" +echo "" + +KUBE_CONTROLLERS=$((KUBE_CONTROLLERS-1)) +for ((i=0; i<=$KUBE_CONTROLLERS; i++)); do + gcloud compute scp controller-files/rbac_authorizations.yaml controller-files/kube-apiserver_rbac.yaml controller-files/kubernetes.default.svc.cluster.local.conf nginx_health_monitor_setup.sh controller-${i}: +done + +echo "Controller setup scripts copied across" +for ((i=0; i<=$KUBE_CONTROLLERS; i++)); do + gcloud compute ssh controller-${i} -- sudo ~/nginx_health_monitor_setup.sh +done + diff --git a/scripts/8_create_load_balancer.sh b/scripts/8_create_load_balancer.sh new file mode 100755 index 0000000..4e17068 --- /dev/null +++ b/scripts/8_create_load_balancer.sh @@ -0,0 +1,37 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## +. .gce_kubernetes.config +echo "######################################" +echo "Set the gcloud compute region and zone" +echo "######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" +echo "" + +KUBE_CONTROLLERS=$((KUBE_CONTROLLERS-1)) +KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe $KUBE_NETWORK --region $GCE_REGION --format 'value(address)') + +for (( i=0; i<=$KUBE_CONTROLLERS; i++ )); do + if [ "$i" == "0" ]; then + printf 'controller-'${i} > instance + else + printf ',controller-'${i} >> instance + fi +done +INSTANCES=`cat instance` + +gcloud compute http-health-checks create $KUBE_NETWORK --description "Kubernetes Health Check" --host "kubernetes.default.svc.cluster.local" --request-path "/healthz" +gcloud compute firewall-rules create $KUBE_NETWORK-allow-health-check --network $KUBE_NETWORK --source-ranges 209.85.152.0/22,209.85.204.0/22,35.191.0.0/16 --allow tcp +gcloud compute target-pools create $KUBE_NETWORK-target-pool --http-health-check $KUBE_NETWORK +gcloud compute target-pools add-instances kubernetes-target-pool --instances $INSTANCES +gcloud compute forwarding-rules create $KUBE_NETWORK-forwarding-rule --address ${KUBERNETES_PUBLIC_ADDRESS} --ports 6443 --region $GCE_REGION --target-pool $KUBE_NETWORK-target-pool + +echo 'Verifying this has worked:' +cd certs-dir +curl --cacert ca.pem https://${KUBERNETES_PUBLIC_ADDRESS}:6443/version + +echo 'The next step is to configure the worker nodes' diff --git a/scripts/9a_worker_kubelet_via_docker_setup.sh b/scripts/9a_worker_kubelet_via_docker_setup.sh new file mode 100755 index 0000000..568f7be --- /dev/null +++ b/scripts/9a_worker_kubelet_via_docker_setup.sh @@ -0,0 +1,23 @@ +#!/bin/bash +. .gce_kubernetes.config +echo "###################################### +Set the gcloud compute region and zone +######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" + +sed "s|KUBE_VERSION|$KUBE_VER|g" worker_kubelet_setup.sh.template > worker_kubelet_setup.sh +chmod 755 worker_kubelet_setup.sh + +KUBE_WORKERS=$((KUBE_WORKERS-1)) +for ((i=0; i<=$KUBE_WORKERS; i++)); do + echo "KUBE_POD_ADDR=$KUBE_POD_ADDR.$i.0" > .worker_variables_$i + echo "KUBE_NODE_POD_PREFIX=$KUBE_NODE_POD_PREFIX" >> .worker_variables_$i + gcloud compute scp .worker_variables_$i worker-${i}:.worker_variables + gcloud compute scp worker_kubelet_setup.sh worker-${i}: + gcloud compute ssh worker-${i} -- sudo ~/worker_kubelet_setup.sh +done + +echo "The next step should be creating the kube-proxy instances" diff --git a/scripts/9b_worker_kubelet_via_containerd_setup.sh b/scripts/9b_worker_kubelet_via_containerd_setup.sh new file mode 100755 index 0000000..c26fd34 --- /dev/null +++ b/scripts/9b_worker_kubelet_via_containerd_setup.sh @@ -0,0 +1,22 @@ +#!/bin/bash +. .gce_kubernetes.config +echo "###################################### +Set the gcloud compute region and zone +######################################" +gcloud config set compute/region $GCE_REGION +gcloud config set compute/zone $GCE_ZONE +gcloud config set project $GCE_PROJECT +echo "Compute region and zone set" + +sed "s|KUBE_VERSION|$KUBE_VER|g" worker_kubelet_containerd_setup.sh.template > worker_kubelet_containerd_setup.sh + +KUBE_WORKERS=$((KUBE_WORKERS-1)) +for ((i=0; i<=$KUBE_WORKERS; i++)); do + echo "KUBE_POD_ADDR=$KUBE_POD_ADDR.$i.0" > .worker_variables_$i + echo "KUBE_NODE_POD_PREFIX=$KUBE_NODE_POD_PREFIX" >> .worker_variables_$i + gcloud compute scp .worker_variables_$i worker-${i}:.worker_variables + gcloud compute scp worker_kubelet_containerd_setup.sh worker-${i}: + gcloud compute ssh worker-${i} -- sudo ~/worker_kubelet_containerd_setup.sh +done + +echo "The next step should be creating the kube-proxy instances" diff --git a/scripts/controller-files/kube-apiserver_rbac.yaml b/scripts/controller-files/kube-apiserver_rbac.yaml new file mode 100644 index 0000000..ac17cf6 --- /dev/null +++ b/scripts/controller-files/kube-apiserver_rbac.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: system:kube-apiserver + namespace: "" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kube-apiserver-to-kubelet +subjects: + - apiGroup: rbac.authorization.k8s.io + kind: User + name: kubernetes diff --git a/scripts/controller-files/kubernetes.default.svc.cluster.local.conf b/scripts/controller-files/kubernetes.default.svc.cluster.local.conf new file mode 100644 index 0000000..8d8ffe8 --- /dev/null +++ b/scripts/controller-files/kubernetes.default.svc.cluster.local.conf @@ -0,0 +1,9 @@ +server { + listen 80; + server_name kubernetes.default.svc.cluster.local; + + location /healthz { + proxy_pass https://127.0.0.1:6443/healthz; + proxy_ssl_trusted_certificate /var/lib/kubernetes/ca.pem; + } +} diff --git a/scripts/controller-files/rbac_authorizations.yaml b/scripts/controller-files/rbac_authorizations.yaml new file mode 100644 index 0000000..92b3dbb --- /dev/null +++ b/scripts/controller-files/rbac_authorizations.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:kube-apiserver-to-kubelet +rules: + - apiGroups: + - "" + resources: + - nodes/proxy + - nodes/stats + - nodes/log + - nodes/spec + - nodes/metrics + verbs: + - "*" diff --git a/scripts/controller_manager_setup.sh b/scripts/controller_manager_setup.sh new file mode 100755 index 0000000..9dddb74 --- /dev/null +++ b/scripts/controller_manager_setup.sh @@ -0,0 +1,48 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## +. ctrl-mgr_vars + +curl https://storage.googleapis.com/kubernetes-release/release/v1.11.2/bin/linux/amd64/kube-controller-manager -o /usr/local/bin/kube-controller-manager +chmod 755 /usr/local/bin/kube-controller-manager + +mkdir -p /var/lib/kubernetes + +mv kube-controller-manager.kubeconfig /var/lib/kubernetes/ +cat > /etc/systemd/system/kube-controller-manager.service << 'EOM' +[Unit] +Description=Kubernetes Controller Manager +Documentation=https://github.com/GoogleCloudPlatform/kubernetes + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/controller-manager +#User=kube +ExecStart=/usr/local/bin/kube-controller-manager \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_MASTER \ + $KUBE_CONTROLLER_MANAGER_ARGS +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target +EOM + +cat > /etc/kubernetes/controller-manager << 'EOM' +### +# The following values are used to configure the kubernetes controller-manager + +# defaults from config and apiserver should be adequate + +# Add your own! +KUBE_CONTROLLER_MANAGER_ARGS="--address=0.0.0.0 --cluster-cidr=$KUBE_POD_CIDR --cluster-name=kubernetes --cluster-signing-cert-file=/var/lib/kubernetes/ca.pem --cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem --kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig --leader-elect=true --root-ca-file=/var/lib/kubernetes/ca.pem --service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem --service-cluster-ip-range=10.32.0.0/24 --use-service-account-credentials=true --v=2" +EOM + +systemctl daemon-reload +systemctl start kube-controller-manager +systemctl enable kube-controller-manager + +echo "The next step is to create the Kubernetes Scheduler" diff --git a/scripts/controller_manager_setup.sh.template b/scripts/controller_manager_setup.sh.template new file mode 100755 index 0000000..300181c --- /dev/null +++ b/scripts/controller_manager_setup.sh.template @@ -0,0 +1,48 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## +. ctrl-mgr_vars + +curl https://storage.googleapis.com/kubernetes-release/release/vKUBE_VERSION/bin/linux/amd64/kube-controller-manager -o /usr/local/bin/kube-controller-manager +chmod 755 /usr/local/bin/kube-controller-manager + +mkdir -p /var/lib/kubernetes + +mv kube-controller-manager.kubeconfig /var/lib/kubernetes/ +cat > /etc/systemd/system/kube-controller-manager.service << 'EOM' +[Unit] +Description=Kubernetes Controller Manager +Documentation=https://github.com/GoogleCloudPlatform/kubernetes + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/controller-manager +#User=kube +ExecStart=/usr/local/bin/kube-controller-manager \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_MASTER \ + $KUBE_CONTROLLER_MANAGER_ARGS +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target +EOM + +cat > /etc/kubernetes/controller-manager << 'EOM' +### +# The following values are used to configure the kubernetes controller-manager + +# defaults from config and apiserver should be adequate + +# Add your own! +KUBE_CONTROLLER_MANAGER_ARGS="--address=0.0.0.0 --cluster-cidr=$KUBE_POD_CIDR --cluster-name=kubernetes --cluster-signing-cert-file=/var/lib/kubernetes/ca.pem --cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem --kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig --leader-elect=true --root-ca-file=/var/lib/kubernetes/ca.pem --service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem --service-cluster-ip-range=10.32.0.0/24 --use-service-account-credentials=true --v=2" +EOM + +systemctl daemon-reload +systemctl start kube-controller-manager +systemctl enable kube-controller-manager + +echo "The next step is to create the Kubernetes Scheduler" diff --git a/scripts/core-dns.yaml b/scripts/core-dns.yaml new file mode 100644 index 0000000..ddcc279 --- /dev/null +++ b/scripts/core-dns.yaml @@ -0,0 +1,165 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data: + Corefile: | + .:53 { + errors + health + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + upstream + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + proxy . /etc/resolv.conf + cache 30 + reload + loadbalance + } +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: coredns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/name: "CoreDNS" +spec: + replicas: 2 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + spec: + serviceAccountName: coredns + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: "CriticalAddonsOnly" + operator: "Exists" + containers: + - name: coredns + image: coredns/coredns:1.2.0 + imagePullPolicy: IfNotPresent + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + readOnly: true + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + dnsPolicy: Default + volumes: + - name: config-volume + configMap: + name: coredns + items: + - key: Corefile + path: Corefile +--- +apiVersion: v1 +kind: Service +metadata: + name: core-dns + namespace: kube-system + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: 10.32.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP diff --git a/scripts/ctrl-mgr_vars b/scripts/ctrl-mgr_vars new file mode 100644 index 0000000..860ca83 --- /dev/null +++ b/scripts/ctrl-mgr_vars @@ -0,0 +1 @@ +KUBE_POD_CIDR=10.200.0.0/16 diff --git a/scripts/etcd_setup.sh b/scripts/etcd_setup.sh new file mode 100755 index 0000000..3d42501 --- /dev/null +++ b/scripts/etcd_setup.sh @@ -0,0 +1,165 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## +. .variables +HOST_IP=`hostname -I | awk '{ print $1 }'` +echo "Setting up the Kubernetes repo:" +cat > /etc/yum.repos.d/kubernetes.repo << EOM +[kubernetes] +name=Kubernetes +baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg + https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg +EOM + +echo "Installing docker, etcd, and kubectl" +yum install -y docker etcd kubectl + +echo "Starting and enabling the docker daemon" +systemctl start docker && sudo systemctl enable docker + +echo "Setting SELinux to permissive mode" +setenforce 0 +sed -i s/SELINUX=enforcing/SELINUX=permissive/g /etc/selinux/config + +echo "Making requried directories and copying key material" +mkdir -p /etc/kubernetes +cp ca.pem kubernetes-key.pem kubernetes.pem /etc/etcd/ + +echo "Generating the ETCD_INITIAL_CLUSTER parameter ..." + +for (( i=0; i<=$COUNTER; i++ )); do + if [ "$i" == "0" ]; then + printf 'controller-'${i}'=https://'$KUBE_SUBNET_ADDR'.1'${i}':2380' > etcd-initial + else + printf ',controller-'${i}'=https://'$KUBE_SUBNET_ADDR'.1'${i}':2380' >> etcd-initial + fi +done +ETCD_INITIAL_CLUSTER=`cat etcd-initial` + +echo "Applying the configuration for etcd" +cat > /etc/etcd/etcd.conf << EOM +#[Member] +#ETCD_CORS="" +ETCD_DATA_DIR="/var/lib/etcd/default.etcd" +#ETCD_WAL_DIR="" +ETCD_LISTEN_PEER_URLS="https://$HOST_IP:2380" +ETCD_LISTEN_CLIENT_URLS="https://$HOST_IP:2379,https://127.0.0.1:2379" +#ETCD_MAX_SNAPSHOTS="5" +#ETCD_MAX_WALS="5" +ETCD_NAME="$HOSTNAME" +#ETCD_SNAPSHOT_COUNT="100000" +#ETCD_HEARTBEAT_INTERVAL="100" +#ETCD_ELECTION_TIMEOUT="1000" +#ETCD_QUOTA_BACKEND_BYTES="0" +#ETCD_MAX_REQUEST_BYTES="1572864" +#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" +#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" +#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" +# +#[Clustering] +ETCD_INITIAL_ADVERTISE_PEER_URLS="https://$HOST_IP:2380" +ETCD_ADVERTISE_CLIENT_URLS="https://$HOST_IP:2379" +#ETCD_DISCOVERY="" +#ETCD_DISCOVERY_FALLBACK="proxy" +#ETCD_DISCOVERY_PROXY="" +#ETCD_DISCOVERY_SRV="" +ETCD_INITIAL_CLUSTER="$ETCD_INITIAL_CLUSTER" +ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" +ETCD_INITIAL_CLUSTER_STATE="new" +#ETCD_STRICT_RECONFIG_CHECK="true" +#ETCD_ENABLE_V2="true" +# +#[Proxy] +#ETCD_PROXY="off" +#ETCD_PROXY_FAILURE_WAIT="5000" +#ETCD_PROXY_REFRESH_INTERVAL="30000" +#ETCD_PROXY_DIAL_TIMEOUT="1000" +#ETCD_PROXY_WRITE_TIMEOUT="5000" +#ETCD_PROXY_READ_TIMEOUT="0" +# +#[Security] +ETCD_CERT_FILE="/etc/etcd/kubernetes.pem" +ETCD_KEY_FILE="/etc/etcd/kubernetes-key.pem" +ETCD_CLIENT_CERT_AUTH="true" +ETCD_TRUSTED_CA_FILE="/etc/etcd/ca.pem" +#ETCD_AUTO_TLS="false" +ETCD_PEER_CERT_FILE="/etc/etcd/kubernetes.pem" +ETCD_PEER_KEY_FILE="/etc/etcd/kubernetes-key.pem" +ETCD_PEER_CLIENT_CERT_AUTH="true" +ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ca.pem" +#ETCD_PEER_AUTO_TLS="false" +# +#[Logging] +#ETCD_DEBUG="false" +#ETCD_LOG_PACKAGE_LEVELS="" +#ETCD_LOG_OUTPUT="default" +# +#[Unsafe] +#ETCD_FORCE_NEW_CLUSTER="false" +# +#[Version] +#ETCD_VERSION="false" +#ETCD_AUTO_COMPACTION_RETENTION="0" +# +#[Profiling] +#ETCD_ENABLE_PPROF="false" +#ETCD_METRICS="basic" +# +#[Auth] +#ETCD_AUTH_TOKEN="simple" +EOM + +cat > /usr/lib/systemd/system/etcd.service << EOM +[Unit] +Description=Etcd Server +After=network.target +After=network-online.target +Wants=network-online.target + +[Service] +Type=notify +WorkingDirectory=/var/lib/etcd/ +EnvironmentFile=-/etc/etcd/etcd.conf +User=etcd +# set GOMAXPROCS to number of processors +ExecStart=/bin/bash -c "GOMAXPROCS=\$(nproc) /usr/bin/etcd --name=\${ETCD_NAME} \\ + --data-dir=\${ETCD_DATA_DIR} \\ + --listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS} \\ + --listen-peer-urls=\${ETCD_LISTEN_PEER_URLS} \\ + --initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS} \\ + --advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS} \\ + --initial-cluster=\${ETCD_INITIAL_CLUSTER} \\ + --initial-cluster-token=\${ETCD_INITIAL_CLUSTER_TOKEN} \\ + --initial-cluster-state=\${ETCD_INITIAL_CLUSTER_STATE} \\ + --cert-file=\${ETCD_CERT_FILE} \\ + --key-file=\${ETCD_KEY_FILE} \\ + --client-cert-auth=\${ETCD_CLIENT_CERT_AUTH} \\ + --trusted-ca-file=\${ETCD_TRUSTED_CA_FILE} \\ + --peer-cert-file=\${ETCD_PEER_CERT_FILE} \\ + --peer-key-file=\${ETCD_PEER_KEY_FILE} \\ + --peer-client-cert-auth=\${ETCD_PEER_CLIENT_CERT_AUTH} \\ + --peer-trusted-ca-file=\${ETCD_PEER_TRUSTED_CA_FILE}" +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target +EOM + +chown etcd:etcd /etc/etcd/*.pem +rm -rf /var/lib/etcd/default.etcd + +echo "Starting and enabling the etcd daemon" +systemctl daemon-reload +systemctl start etcd +systemctl enable etcd + +echo "Checking that etcd is running" +etcdctl --ca-file /etc/etcd/ca.pem --cert-file /etc/etcd/kubernetes.pem --key-file /etc/etcd/kubernetes-key.pem --endpoints https://127.0.0.1:2379 member list + +echo "The next step is to create the Kubernetes services" diff --git a/scripts/etcd_vars b/scripts/etcd_vars new file mode 100644 index 0000000..9a792d6 --- /dev/null +++ b/scripts/etcd_vars @@ -0,0 +1,2 @@ +KUBE_SUBNET_ADDR=10.240.0 +KUBE_CONTROLLERS=2 diff --git a/scripts/gvisor_tests.sh b/scripts/gvisor_tests.sh new file mode 100755 index 0000000..9d3ae71 --- /dev/null +++ b/scripts/gvisor_tests.sh @@ -0,0 +1,12 @@ +echo "Getting list of containers running under gVisor" +runsc --root /run/containerd/runsc/k8s.io list + +echo "Getting the ID of the untrusted pod" +POD_ID=$(crictl -r unix:///var/run/containerd/containerd.sock pods --name untrusted -q) + +echo "Getting the ID of the webserver container in the untrusted pod" +CONTAINER_ID=$(crictl -r unix:///var/run/containerd/containerd.sock ps -p ${POD_ID} -q) + +echo "Displaying the processes running inside the webserver container" +runsc --root /run/containerd/runsc/k8s.io ps ${CONTAINER_ID} + diff --git a/scripts/instance b/scripts/instance new file mode 100644 index 0000000..a5f85e0 --- /dev/null +++ b/scripts/instance @@ -0,0 +1 @@ +controller-0,controller-1 \ No newline at end of file diff --git a/scripts/kube-apiserver_setup.sh b/scripts/kube-apiserver_setup.sh new file mode 100755 index 0000000..8cf84a7 --- /dev/null +++ b/scripts/kube-apiserver_setup.sh @@ -0,0 +1,86 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## + +. etcd_vars + +KUBE_CONTROLLERS=$((KUBE_CONTROLLERS-1)) +for (( i=0; i<=$KUBE_CONTROLLERS; i++)); do + if [ "$i" == "$KUBE_CONTROLLERS" ]; then + ETCD_SERVER=https://$KUBE_SUBNET_ADDR.1${i}:2379 + else + ETCD_SERVER=https://$KUBE_SUBNET_ADDR.1${i}:2379, + fi + ETCD_SERVERS=`echo $ETCD_SERVERS$ETCD_SERVER` +done + +curl https://storage.googleapis.com/kubernetes-release/release/v1.11.2/bin/linux/amd64/kube-apiserver -o /usr/local/bin/kube-apiserver +chmod 755 /usr/local/bin/kube-apiserver + +mkdir -p /var/lib/kubernetes +mv ca.pem ca-key.pem kubernetes.pem kubernetes-key.pem service-account.pem service-account-key.pem encryption-config.yaml /var/lib/kubernetes/ + +cat > /etc/systemd/system/kube-apiserver.service << 'EOM' +[Unit] +Description=Kubernetes API Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=network.target +After=etcd.service + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/apiserver +#User=kube +ExecStart=/usr/local/bin/kube-apiserver \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_ETCD_SERVERS \ + $KUBE_API_ADDRESS \ + $KUBE_API_PORT \ + $KUBELET_PORT \ + $KUBE_ALLOW_PRIV \ + $KUBE_SERVICE_ADDRESSES \ + $KUBE_ADMISSION_CONTROL \ + $KUBE_API_ARGS +Restart=on-failure +Type=notify +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target +EOM + +cat > /etc/kubernetes/apiserver << EOM +### +# kubernetes system config +# +# The following values are used to configure the kube-apiserver +# + +# The address on the local server to listen to. +#KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1" + +# The port on the local server to listen on. +#KUBE_API_PORT="--port=6443" + +# Port minions listen on +KUBELET_PORT="--kubelet-port=10250" + +# Comma separated list of nodes in the etcd cluster +KUBE_ETCD_SERVERS="--etcd-servers=$ETCD_SERVERS" + +# Address range to use for services +KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.32.0.0/16" + +# default admission control policies +KUBE_ADMISSION_CONTROL="--enable-admission-plugins=Initializers,NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota" +# Add your own! +KUBE_API_ARGS="--advertise-address=`hostname -i` --allow-privileged=true --apiserver-count=2 --audit-log-maxage=30 --audit-log-maxbackup=3 audit-log-maxsize=100 audit-log-path=/var/log/audit/audit.log --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --client-ca-file=/var/lib/kubernetes/ca.pem --enable-swagger-ui=true --etcd-cafile=/var/lib/kubernetes/ca.pem --etcd-certfile=/var/lib/kubernetes/kubernetes.pem --etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem --event-ttl=1h --experimental-encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml --kubelet-certificate-authority=/var/lib/kubernetes/ca.pem --kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem --kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem --kubelet-https=true --runtime-config=api/all --service-account-key-file=/var/lib/kubernetes/service-account.pem --service-node-port-range=30000-32767 --tls-cert-file=/var/lib/kubernetes/kubernetes.pem --tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem --v=2" +EOM + +systemctl daemon-reload +systemctl start kube-apiserver +systemctl enable kube-apiserver + +echo "The next step is to create the Kubernetes Controller Manager" diff --git a/scripts/kube-apiserver_setup.sh.template b/scripts/kube-apiserver_setup.sh.template new file mode 100755 index 0000000..53ef67e --- /dev/null +++ b/scripts/kube-apiserver_setup.sh.template @@ -0,0 +1,86 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## + +. etcd_vars + +KUBE_CONTROLLERS=$((KUBE_CONTROLLERS-1)) +for (( i=0; i<=$KUBE_CONTROLLERS; i++)); do + if [ "$i" == "$KUBE_CONTROLLERS" ]; then + ETCD_SERVER=https://$KUBE_SUBNET_ADDR.1${i}:2379 + else + ETCD_SERVER=https://$KUBE_SUBNET_ADDR.1${i}:2379, + fi + ETCD_SERVERS=`echo $ETCD_SERVERS$ETCD_SERVER` +done + +curl https://storage.googleapis.com/kubernetes-release/release/vKUBE_VERSION/bin/linux/amd64/kube-apiserver -o /usr/local/bin/kube-apiserver +chmod 755 /usr/local/bin/kube-apiserver + +mkdir -p /var/lib/kubernetes +mv ca.pem ca-key.pem kubernetes.pem kubernetes-key.pem service-account.pem service-account-key.pem encryption-config.yaml /var/lib/kubernetes/ + +cat > /etc/systemd/system/kube-apiserver.service << 'EOM' +[Unit] +Description=Kubernetes API Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=network.target +After=etcd.service + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/apiserver +#User=kube +ExecStart=/usr/local/bin/kube-apiserver \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_ETCD_SERVERS \ + $KUBE_API_ADDRESS \ + $KUBE_API_PORT \ + $KUBELET_PORT \ + $KUBE_ALLOW_PRIV \ + $KUBE_SERVICE_ADDRESSES \ + $KUBE_ADMISSION_CONTROL \ + $KUBE_API_ARGS +Restart=on-failure +Type=notify +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target +EOM + +cat > /etc/kubernetes/apiserver << EOM +### +# kubernetes system config +# +# The following values are used to configure the kube-apiserver +# + +# The address on the local server to listen to. +#KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1" + +# The port on the local server to listen on. +#KUBE_API_PORT="--port=6443" + +# Port minions listen on +KUBELET_PORT="--kubelet-port=10250" + +# Comma separated list of nodes in the etcd cluster +KUBE_ETCD_SERVERS="--etcd-servers=$ETCD_SERVERS" + +# Address range to use for services +KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.32.0.0/16" + +# default admission control policies +KUBE_ADMISSION_CONTROL="--enable-admission-plugins=Initializers,NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota" +# Add your own! +KUBE_API_ARGS="--advertise-address=`hostname -i` --allow-privileged=true --apiserver-count=2 --audit-log-maxage=30 --audit-log-maxbackup=3 audit-log-maxsize=100 audit-log-path=/var/log/audit/audit.log --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --client-ca-file=/var/lib/kubernetes/ca.pem --enable-swagger-ui=true --etcd-cafile=/var/lib/kubernetes/ca.pem --etcd-certfile=/var/lib/kubernetes/kubernetes.pem --etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem --event-ttl=1h --experimental-encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml --kubelet-certificate-authority=/var/lib/kubernetes/ca.pem --kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem --kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem --kubelet-https=true --runtime-config=api/all --service-account-key-file=/var/lib/kubernetes/service-account.pem --service-node-port-range=30000-32767 --tls-cert-file=/var/lib/kubernetes/kubernetes.pem --tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem --v=2" +EOM + +systemctl daemon-reload +systemctl start kube-apiserver +systemctl enable kube-apiserver + +echo "The next step is to create the Kubernetes Controller Manager" diff --git a/scripts/kube-dns.yaml b/scripts/kube-dns.yaml new file mode 100644 index 0000000..40ded69 --- /dev/null +++ b/scripts/kube-dns.yaml @@ -0,0 +1,206 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: 10.32.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-dns + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-dns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + # replicas: not specified here: + # 1. In order to make Addon Manager do not reconcile this replicas parameter. + # 2. Default is 1. + # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 0 + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + volumes: + - name: kube-dns-config + configMap: + name: kube-dns + optional: true + containers: + - name: kubedns + image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.10 + resources: + # TODO: Set memory limits when we've profiled the container for large + # clusters, then set request = limit to keep this container in + # guaranteed class. Currently, this container falls into the + # "burstable" category so the kubelet doesn't backoff from restarting it. + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + livenessProbe: + httpGet: + path: /healthcheck/kubedns + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /readiness + port: 8081 + scheme: HTTP + # we poll on pod startup for the Kubernetes master service and + # only setup the /readiness HTTP server once that's available. + initialDelaySeconds: 3 + timeoutSeconds: 5 + args: + - --domain=cluster.local. + - --dns-port=10053 + - --config-dir=/kube-dns-config + - --v=2 + env: + - name: PROMETHEUS_PORT + value: "10055" + ports: + - containerPort: 10053 + name: dns-local + protocol: UDP + - containerPort: 10053 + name: dns-tcp-local + protocol: TCP + - containerPort: 10055 + name: metrics + protocol: TCP + volumeMounts: + - name: kube-dns-config + mountPath: /kube-dns-config + - name: dnsmasq + image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.10 + livenessProbe: + httpGet: + path: /healthcheck/dnsmasq + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - -v=2 + - -logtostderr + - -configDir=/etc/k8s/dns/dnsmasq-nanny + - -restartDnsmasq=true + - -- + - -k + - --cache-size=1000 + - --no-negcache + - --log-facility=- + - --server=/cluster.local/127.0.0.1#10053 + - --server=/in-addr.arpa/127.0.0.1#10053 + - --server=/ip6.arpa/127.0.0.1#10053 + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + # see: https://github.com/kubernetes/kubernetes/issues/29055 for details + resources: + requests: + cpu: 150m + memory: 20Mi + volumeMounts: + - name: kube-dns-config + mountPath: /etc/k8s/dns/dnsmasq-nanny + - name: sidecar + image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.10 + livenessProbe: + httpGet: + path: /metrics + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - --v=2 + - --logtostderr + - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV + - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV + ports: + - containerPort: 10054 + name: metrics + protocol: TCP + resources: + requests: + memory: 20Mi + cpu: 10m + dnsPolicy: Default # Don't use cluster DNS. + serviceAccountName: kube-dns diff --git a/scripts/kube-dns.yaml.1 b/scripts/kube-dns.yaml.1 new file mode 100644 index 0000000..477ba72 --- /dev/null +++ b/scripts/kube-dns.yaml.1 @@ -0,0 +1,206 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: 10.32.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-dns + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-dns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + # replicas: not specified here: + # 1. In order to make Addon Manager do not reconcile this replicas parameter. + # 2. Default is 1. + # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 0 + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + volumes: + - name: kube-dns-config + configMap: + name: kube-dns + optional: true + containers: + - name: kubedns + image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7 + resources: + # TODO: Set memory limits when we've profiled the container for large + # clusters, then set request = limit to keep this container in + # guaranteed class. Currently, this container falls into the + # "burstable" category so the kubelet doesn't backoff from restarting it. + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + livenessProbe: + httpGet: + path: /healthcheck/kubedns + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /readiness + port: 8081 + scheme: HTTP + # we poll on pod startup for the Kubernetes master service and + # only setup the /readiness HTTP server once that's available. + initialDelaySeconds: 3 + timeoutSeconds: 5 + args: + - --domain=cluster.local. + - --dns-port=10053 + - --config-dir=/kube-dns-config + - --v=2 + env: + - name: PROMETHEUS_PORT + value: "10055" + ports: + - containerPort: 10053 + name: dns-local + protocol: UDP + - containerPort: 10053 + name: dns-tcp-local + protocol: TCP + - containerPort: 10055 + name: metrics + protocol: TCP + volumeMounts: + - name: kube-dns-config + mountPath: /kube-dns-config + - name: dnsmasq + image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7 + livenessProbe: + httpGet: + path: /healthcheck/dnsmasq + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - -v=2 + - -logtostderr + - -configDir=/etc/k8s/dns/dnsmasq-nanny + - -restartDnsmasq=true + - -- + - -k + - --cache-size=1000 + - --no-negcache + - --log-facility=- + - --server=/cluster.local/127.0.0.1#10053 + - --server=/in-addr.arpa/127.0.0.1#10053 + - --server=/ip6.arpa/127.0.0.1#10053 + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + # see: https://github.com/kubernetes/kubernetes/issues/29055 for details + resources: + requests: + cpu: 150m + memory: 20Mi + volumeMounts: + - name: kube-dns-config + mountPath: /etc/k8s/dns/dnsmasq-nanny + - name: sidecar + image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7 + livenessProbe: + httpGet: + path: /metrics + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - --v=2 + - --logtostderr + - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV + - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV + ports: + - containerPort: 10054 + name: metrics + protocol: TCP + resources: + requests: + memory: 20Mi + cpu: 10m + dnsPolicy: Default # Don't use cluster DNS. + serviceAccountName: kube-dns diff --git a/scripts/kube-dns.yaml.2 b/scripts/kube-dns.yaml.2 new file mode 100644 index 0000000..477ba72 --- /dev/null +++ b/scripts/kube-dns.yaml.2 @@ -0,0 +1,206 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: 10.32.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-dns + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-dns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + # replicas: not specified here: + # 1. In order to make Addon Manager do not reconcile this replicas parameter. + # 2. Default is 1. + # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 0 + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + volumes: + - name: kube-dns-config + configMap: + name: kube-dns + optional: true + containers: + - name: kubedns + image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7 + resources: + # TODO: Set memory limits when we've profiled the container for large + # clusters, then set request = limit to keep this container in + # guaranteed class. Currently, this container falls into the + # "burstable" category so the kubelet doesn't backoff from restarting it. + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + livenessProbe: + httpGet: + path: /healthcheck/kubedns + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /readiness + port: 8081 + scheme: HTTP + # we poll on pod startup for the Kubernetes master service and + # only setup the /readiness HTTP server once that's available. + initialDelaySeconds: 3 + timeoutSeconds: 5 + args: + - --domain=cluster.local. + - --dns-port=10053 + - --config-dir=/kube-dns-config + - --v=2 + env: + - name: PROMETHEUS_PORT + value: "10055" + ports: + - containerPort: 10053 + name: dns-local + protocol: UDP + - containerPort: 10053 + name: dns-tcp-local + protocol: TCP + - containerPort: 10055 + name: metrics + protocol: TCP + volumeMounts: + - name: kube-dns-config + mountPath: /kube-dns-config + - name: dnsmasq + image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7 + livenessProbe: + httpGet: + path: /healthcheck/dnsmasq + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - -v=2 + - -logtostderr + - -configDir=/etc/k8s/dns/dnsmasq-nanny + - -restartDnsmasq=true + - -- + - -k + - --cache-size=1000 + - --no-negcache + - --log-facility=- + - --server=/cluster.local/127.0.0.1#10053 + - --server=/in-addr.arpa/127.0.0.1#10053 + - --server=/ip6.arpa/127.0.0.1#10053 + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + # see: https://github.com/kubernetes/kubernetes/issues/29055 for details + resources: + requests: + cpu: 150m + memory: 20Mi + volumeMounts: + - name: kube-dns-config + mountPath: /etc/k8s/dns/dnsmasq-nanny + - name: sidecar + image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7 + livenessProbe: + httpGet: + path: /metrics + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - --v=2 + - --logtostderr + - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV + - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV + ports: + - containerPort: 10054 + name: metrics + protocol: TCP + resources: + requests: + memory: 20Mi + cpu: 10m + dnsPolicy: Default # Don't use cluster DNS. + serviceAccountName: kube-dns diff --git a/scripts/kube-dns.yaml.3 b/scripts/kube-dns.yaml.3 new file mode 100644 index 0000000..477ba72 --- /dev/null +++ b/scripts/kube-dns.yaml.3 @@ -0,0 +1,206 @@ +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: 10.32.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-dns + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-dns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + # replicas: not specified here: + # 1. In order to make Addon Manager do not reconcile this replicas parameter. + # 2. Default is 1. + # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 0 + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + volumes: + - name: kube-dns-config + configMap: + name: kube-dns + optional: true + containers: + - name: kubedns + image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7 + resources: + # TODO: Set memory limits when we've profiled the container for large + # clusters, then set request = limit to keep this container in + # guaranteed class. Currently, this container falls into the + # "burstable" category so the kubelet doesn't backoff from restarting it. + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + livenessProbe: + httpGet: + path: /healthcheck/kubedns + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /readiness + port: 8081 + scheme: HTTP + # we poll on pod startup for the Kubernetes master service and + # only setup the /readiness HTTP server once that's available. + initialDelaySeconds: 3 + timeoutSeconds: 5 + args: + - --domain=cluster.local. + - --dns-port=10053 + - --config-dir=/kube-dns-config + - --v=2 + env: + - name: PROMETHEUS_PORT + value: "10055" + ports: + - containerPort: 10053 + name: dns-local + protocol: UDP + - containerPort: 10053 + name: dns-tcp-local + protocol: TCP + - containerPort: 10055 + name: metrics + protocol: TCP + volumeMounts: + - name: kube-dns-config + mountPath: /kube-dns-config + - name: dnsmasq + image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7 + livenessProbe: + httpGet: + path: /healthcheck/dnsmasq + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - -v=2 + - -logtostderr + - -configDir=/etc/k8s/dns/dnsmasq-nanny + - -restartDnsmasq=true + - -- + - -k + - --cache-size=1000 + - --no-negcache + - --log-facility=- + - --server=/cluster.local/127.0.0.1#10053 + - --server=/in-addr.arpa/127.0.0.1#10053 + - --server=/ip6.arpa/127.0.0.1#10053 + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + # see: https://github.com/kubernetes/kubernetes/issues/29055 for details + resources: + requests: + cpu: 150m + memory: 20Mi + volumeMounts: + - name: kube-dns-config + mountPath: /etc/k8s/dns/dnsmasq-nanny + - name: sidecar + image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7 + livenessProbe: + httpGet: + path: /metrics + port: 10054 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + args: + - --v=2 + - --logtostderr + - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV + - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV + ports: + - containerPort: 10054 + name: metrics + protocol: TCP + resources: + requests: + memory: 20Mi + cpu: 10m + dnsPolicy: Default # Don't use cluster DNS. + serviceAccountName: kube-dns diff --git a/scripts/nginx_health_monitor_setup.sh b/scripts/nginx_health_monitor_setup.sh new file mode 100755 index 0000000..fa0680c --- /dev/null +++ b/scripts/nginx_health_monitor_setup.sh @@ -0,0 +1,18 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## +yum install -y nginx + +mv kubernetes.default.svc.cluster.local.conf /etc/nginx/conf.d/kubernetes.default.svc.cluster.local.conf +systemctl start nginx && sudo systemctl enable nginx + +kubectl get componentstatuses --kubeconfig admin.kubeconfig + +curl -H "Host: kubernetes.default.svc.cluster.local" -i http://127.0.0.1/healthz + +echo "Also applying RBAC roles to admin user" +kubectl apply -f rbac_authorizations.yaml --kubeconfig admin.kubeconfig +kubectl apply -f kube-apiserver_rbac.yaml --kubeconfig admin.kubeconfig + +echo "The next step is to create the Load Balancer" diff --git a/scripts/scheduler_setup.sh b/scripts/scheduler_setup.sh new file mode 100755 index 0000000..edf08a0 --- /dev/null +++ b/scripts/scheduler_setup.sh @@ -0,0 +1,50 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## + +curl https://storage.googleapis.com/kubernetes-release/release/v1.11.2/bin/linux/amd64/kube-scheduler -o /usr/local/bin/kube-scheduler +chmod 755 /usr/local/bin/kube-scheduler + +cat > /etc/kubernetes/kube-scheduler.yaml << 'EOM' +apiVersion: componentconfig/v1alpha1 +kind: KubeSchedulerConfiguration +clientConnection: + kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig" +leaderElection: + leaderElect: true +EOM + +mv kube-scheduler.kubeconfig /var/lib/kubernetes/ + +cat > /etc/systemd/system/kube-scheduler.service << 'EOM' +[Unit] +Description=Kubernetes Scheduler +Documentation=https://github.com/kubernetes/kubernetes + +[Service] +ExecStart=/usr/local/bin/kube-scheduler \ + --config=/etc/kubernetes/kube-scheduler.yaml \ + --v=2 +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOM + +cat > /etc/kubernetes/scheduler << 'EOM' +### +# kubernetes scheduler config + +# default config should be adequate + +# Add your own! +KUBE_SCHEDULER_ARGS="--config=/etc/kubernetes/kube-scheduler.yaml --v=2" +EOM + +systemctl daemon-reload +systemctl start kube-scheduler +systemctl enable kube-scheduler + +echo "The next step is to create the nginx based health monitor" diff --git a/scripts/scheduler_setup.sh.template b/scripts/scheduler_setup.sh.template new file mode 100755 index 0000000..656a8bf --- /dev/null +++ b/scripts/scheduler_setup.sh.template @@ -0,0 +1,50 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## + +curl https://storage.googleapis.com/kubernetes-release/release/vKUBE_VERSION/bin/linux/amd64/kube-scheduler -o /usr/local/bin/kube-scheduler +chmod 755 /usr/local/bin/kube-scheduler + +cat > /etc/kubernetes/kube-scheduler.yaml << 'EOM' +apiVersion: componentconfig/v1alpha1 +kind: KubeSchedulerConfiguration +clientConnection: + kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig" +leaderElection: + leaderElect: true +EOM + +mv kube-scheduler.kubeconfig /var/lib/kubernetes/ + +cat > /etc/systemd/system/kube-scheduler.service << 'EOM' +[Unit] +Description=Kubernetes Scheduler +Documentation=https://github.com/kubernetes/kubernetes + +[Service] +ExecStart=/usr/local/bin/kube-scheduler \ + --config=/etc/kubernetes/kube-scheduler.yaml \ + --v=2 +Restart=on-failure +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOM + +cat > /etc/kubernetes/scheduler << 'EOM' +### +# kubernetes scheduler config + +# default config should be adequate + +# Add your own! +KUBE_SCHEDULER_ARGS="--config=/etc/kubernetes/kube-scheduler.yaml --v=2" +EOM + +systemctl daemon-reload +systemctl start kube-scheduler +systemctl enable kube-scheduler + +echo "The next step is to create the nginx based health monitor" diff --git a/scripts/untrusted.yaml b/scripts/untrusted.yaml new file mode 100644 index 0000000..aa733cb --- /dev/null +++ b/scripts/untrusted.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Pod +metadata: + name: untrusted + annotations: + io.kubernetes.cri.untrusted-workload: "true" +spec: + containers: + - name: webserver + image: gcr.io/hightowerlabs/helloworld:2.0.0 diff --git a/scripts/worker_kube-proxy_setup.sh b/scripts/worker_kube-proxy_setup.sh new file mode 100755 index 0000000..ccea1ff --- /dev/null +++ b/scripts/worker_kube-proxy_setup.sh @@ -0,0 +1,50 @@ +#!/bin/bash +## +## Script to provide commands to run on all worker nodes +## + +############################## +# Configuring the Kube Proxy # +############################## + +mkdir -p /var/lib/kube-proxy +mv kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig +cat > /etc/systemd/system/kube-proxy.service << 'EOM' +[Unit] +Description=Kubernetes Kube-Proxy Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=network.target + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/proxy +ExecStart=/usr/local/bin/kube-proxy \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_MASTER \ + $KUBE_PROXY_ARGS +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target +EOM + +echo 'KUBE_PROXY_ARGS="--config=/var/lib/kube-proxy/kube-proxy-config.yaml"' > /etc/kubernetes/proxy + +cat > /var/lib/kube-proxy/kube-proxy-config.yaml << EOM +kind: KubeProxyConfiguration +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +clientConnection: + kubeconfig: "/var/lib/kube-proxy/kubeconfig" +mode: "iptables" +clusterCIDR: "10.200.0.0/16" +EOM + +curl https://storage.googleapis.com/kubernetes-release/release/v1.11.2/bin/linux/amd64/kube-proxy -o /usr/local/bin/kube-proxy +chmod 755 /usr/local/bin/kube-proxy + +systemctl daemon-reload +systemctl enable kube-proxy +systemctl start kube-proxy + diff --git a/scripts/worker_kube-proxy_setup.sh.template b/scripts/worker_kube-proxy_setup.sh.template new file mode 100755 index 0000000..1349866 --- /dev/null +++ b/scripts/worker_kube-proxy_setup.sh.template @@ -0,0 +1,50 @@ +#!/bin/bash +## +## Script to provide commands to run on all worker nodes +## + +############################## +# Configuring the Kube Proxy # +############################## + +mkdir -p /var/lib/kube-proxy +mv kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig +cat > /etc/systemd/system/kube-proxy.service << 'EOM' +[Unit] +Description=Kubernetes Kube-Proxy Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=network.target + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/proxy +ExecStart=/usr/local/bin/kube-proxy \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_MASTER \ + $KUBE_PROXY_ARGS +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target +EOM + +echo 'KUBE_PROXY_ARGS="--config=/var/lib/kube-proxy/kube-proxy-config.yaml"' > /etc/kubernetes/proxy + +cat > /var/lib/kube-proxy/kube-proxy-config.yaml << EOM +kind: KubeProxyConfiguration +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +clientConnection: + kubeconfig: "/var/lib/kube-proxy/kubeconfig" +mode: "iptables" +clusterCIDR: "KUBE_POD_CIDR" +EOM + +curl https://storage.googleapis.com/kubernetes-release/release/vKUBE_VERSION/bin/linux/amd64/kube-proxy -o /usr/local/bin/kube-proxy +chmod 755 /usr/local/bin/kube-proxy + +systemctl daemon-reload +systemctl enable kube-proxy +systemctl start kube-proxy + diff --git a/scripts/worker_kubelet_containerd_setup.sh.template b/scripts/worker_kubelet_containerd_setup.sh.template new file mode 100755 index 0000000..95db7d1 --- /dev/null +++ b/scripts/worker_kubelet_containerd_setup.sh.template @@ -0,0 +1,158 @@ +#!/bin/bash +## +## Script to provide commands to run on all worker nodes +## +. .worker_variables +echo "Setting up the Kubernetes repo:" +cat > /etc/yum.repos.d/kubernetes.repo << EOM +[kubernetes] +name=Kubernetes +baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg + https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg +EOM + +yum install -y conntrack-tools socat containernetworking-plugins + +curl https://storage.googleapis.com/kubernetes-the-hard-way/runsc -O /usr/local/bin/runsc +curl https://storage.googleapis.com/kubernetes-release/release/vKUBE_VERSION/bin/linux/amd64/kubectl -O /usr/local/bin/kubectl +curl https://github.com/opencontainers/runc/releases/download/v1.0.0-rc5/runc.amd64 -O /usr/local/bin/runc +curl https://github.com/containerd/containerd/releases/download/v1.2.0-beta.0/containerd-1.2.0-beta.0.linux-amd64.tar.gz -O /tmp/containerd.tar.gz +curl https://github.com/kubernetes-incubator/cri-tools/releases/download/v1.11.1-beta.0/crictl-v1.11.1-linux-amd64.tar.gz -O /tmp/crictl.tar.gz +chmod 755 /usr/local/bin/runsc /usr/local/bin/kubectl /usr/local/bin/runc + +tar zxvf /tmp/containerd.tar.gz -C / +tar zxvf /tmp/crictl.tar.gz -C /usr/local/bin + +sed -i s/SELINUX=enforcing/SELINUX=permissive/g /etc/selinux/config +sudo setenforce 0 + +mkdir -p /etc/cni/net.d +cat > /etc/cni/net.d/10-bridge.conf << EOM +{ + "cniVersion": "0.3.1", + "name": "bridge", + "type": "bridge", + "bridge": "cnio0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "ranges": [ + [{"subnet": "$KUBE_POD_ADDR/$KUBE_NODE_POD_PREFIX"}] + ], + "routes": [{"dst": "0.0.0.0/0"}] + } +} +EOM +cat > /etc/cni/net.d/99-loopback.conf << EOM +{ + "cniVersion": "0.3.1", + "type": "loopback" +} +EOM + +########################## +# Configuring containerd # +########################## +echo "Configuring containerd" +mkdir -p /etc/containerd/ +cat << EOF | sudo tee /etc/containerd/config.toml +[plugins] + [plugins.cri.containerd] + snapshotter = "overlayfs" + [plugins.cri.containerd.default_runtime] + runtime_type = "io.containerd.runtime.v1.linux" + runtime_engine = "/usr/local/bin/runc" + runtime_root = "" + [plugins.cri.containerd.untrusted_workload_runtime] + runtime_type = "io.containerd.runtime.v1.linux" + runtime_engine = "/usr/local/bin/runsc" + runtime_root = "/run/containerd/runsc" +EOF + +echo "Creating the containerd service" +cat < /etc/systemd/system/kubelet.service << EOM +[Unit] +Description=kubelet: The Kubernetes Node Agent +Documentation=http://kubernetes.io/docs/ + +[Service] +EnvironmentFile=-/etc/kubernetes/kubelet +ExecStart=/usr/local/bin/kubelet \$KUBELET_ARGS +Restart=always +StartLimitInterval=0 +RestartSec=10 + +[Install] +WantedBy=multi-user.target +EOM + +cat > /etc/kubernetes/kubelet << EOM +KUBELET_ARGS="--cni-bin-dir=/usr/libexec/cni --cgroup-driver=systemd --config=/var/lib/kubelet/kubelet-config.yaml --docker-endpoint=unix:///var/run/docker.sock --image-pull-progress-deadline=2m --kubeconfig=/var/lib/kubelet/kubeconfig --network-plugin=cni --v=2" +EOM + +cat > /var/lib/kubelet/kubelet-config.yaml << EOM +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +authentication: + anonymous: + enabled: false + webhook: + enabled: true + x509: + clientCAFile: "/var/lib/kubernetes/ca.pem" +authorization: + mode: Webhook +clusterDomain: "cluster.local" +clusterDNS: + - "10.32.0.10" +podCIDR: "$KUBE_POD_ADDR/$KUBE_NODE_POD_PREFIX" +runtimeRequestTimeout: "15m" +tlsCertFile: "/var/lib/kubelet/${HOSTNAME}.pem" +tlsPrivateKeyFile: "/var/lib/kubelet/${HOSTNAME}-key.pem" +EOM + +mv ${HOSTNAME}-key.pem ${HOSTNAME}.pem /var/lib/kubelet/ +mv ${HOSTNAME}.kubeconfig /var/lib/kubelet/kubeconfig +mv ca.pem /var/lib/kubernetes/ + +systemctl daemon-reload +systemctl enable kubelet +sudo systemctl start kubelet + diff --git a/scripts/worker_kubelet_setup.sh b/scripts/worker_kubelet_setup.sh new file mode 100755 index 0000000..3294b60 --- /dev/null +++ b/scripts/worker_kubelet_setup.sh @@ -0,0 +1,106 @@ +#!/bin/bash +## +## Script to provide commands to run on all worker nodes +## +. .worker_variables +echo "Setting up the Kubernetes repo:" +cat > /etc/yum.repos.d/kubernetes.repo << EOM +[kubernetes] +name=Kubernetes +baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg + https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg +EOM + +yum install -y conntrack-tools kubelet docker socat containernetworking-plugins +systemctl start docker && sudo systemctl enable docker + +sed -i s/SELINUX=enforcing/SELINUX=permissive/g /etc/selinux/config +sudo setenforce 0 + +mkdir -p /etc/cni/net.d +cat > /etc/cni/net.d/10-bridge.conf << EOM +{ + "cniVersion": "0.3.1", + "name": "bridge", + "type": "bridge", + "bridge": "cnio0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "ranges": [ + [{"subnet": "$KUBE_POD_ADDR/$KUBE_NODE_POD_PREFIX"}] + ], + "routes": [{"dst": "0.0.0.0/0"}] + } +} +EOM +cat > /etc/cni/net.d/99-loopback.conf << EOM +{ + "cniVersion": "0.3.1", + "type": "loopback" +} +EOM + +########################### +# Configuring the Kubelet # +########################### +curl https://storage.googleapis.com/kubernetes-release/release/v1.11.2/bin/linux/amd64/kubelet -o /usr/local/bin/kubelet +chmod 755 /usr/local/bin/kubelet + +mkdir -p /etc/kubernetes +mkdir -p /var/lib/kubelet +mkdir -p /var/lib/kubernetes +cat > /etc/systemd/system/kubelet.service << EOM +[Unit] +Description=kubelet: The Kubernetes Node Agent +Documentation=http://kubernetes.io/docs/ + +[Service] +EnvironmentFile=-/etc/kubernetes/kubelet +ExecStart=/usr/local/bin/kubelet \$KUBELET_ARGS +Restart=always +StartLimitInterval=0 +RestartSec=10 + +[Install] +WantedBy=multi-user.target +EOM + +cat > /etc/kubernetes/kubelet << EOM +KUBELET_ARGS="--cni-bin-dir=/usr/libexec/cni --cgroup-driver=systemd --config=/var/lib/kubelet/kubelet-config.yaml --docker-endpoint=unix:///var/run/docker.sock --image-pull-progress-deadline=2m --kubeconfig=/var/lib/kubelet/kubeconfig --network-plugin=cni --v=2" +EOM + +cat > /var/lib/kubelet/kubelet-config.yaml << EOM +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +authentication: + anonymous: + enabled: false + webhook: + enabled: true + x509: + clientCAFile: "/var/lib/kubernetes/ca.pem" +authorization: + mode: Webhook +clusterDomain: "cluster.local" +clusterDNS: + - "10.32.0.10" +podCIDR: "$KUBE_POD_ADDR/$KUBE_NODE_POD_PREFIX" +runtimeRequestTimeout: "15m" +tlsCertFile: "/var/lib/kubelet/${HOSTNAME}.pem" +tlsPrivateKeyFile: "/var/lib/kubelet/${HOSTNAME}-key.pem" +EOM + +mv ${HOSTNAME}-key.pem ${HOSTNAME}.pem /var/lib/kubelet/ +mv ${HOSTNAME}.kubeconfig /var/lib/kubelet/kubeconfig +mv ca.pem /var/lib/kubernetes/ + +systemctl daemon-reload +systemctl enable kubelet +sudo systemctl start kubelet + diff --git a/scripts/worker_kubelet_setup.sh.template b/scripts/worker_kubelet_setup.sh.template new file mode 100755 index 0000000..a64be8c --- /dev/null +++ b/scripts/worker_kubelet_setup.sh.template @@ -0,0 +1,106 @@ +#!/bin/bash +## +## Script to provide commands to run on all worker nodes +## +. .worker_variables +echo "Setting up the Kubernetes repo:" +cat > /etc/yum.repos.d/kubernetes.repo << EOM +[kubernetes] +name=Kubernetes +baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg + https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg +EOM + +yum install -y conntrack-tools kubelet docker socat containernetworking-plugins +systemctl start docker && sudo systemctl enable docker + +sed -i s/SELINUX=enforcing/SELINUX=permissive/g /etc/selinux/config +sudo setenforce 0 + +mkdir -p /etc/cni/net.d +cat > /etc/cni/net.d/10-bridge.conf << EOM +{ + "cniVersion": "0.3.1", + "name": "bridge", + "type": "bridge", + "bridge": "cnio0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "ranges": [ + [{"subnet": "$KUBE_POD_ADDR/$KUBE_NODE_POD_PREFIX"}] + ], + "routes": [{"dst": "0.0.0.0/0"}] + } +} +EOM +cat > /etc/cni/net.d/99-loopback.conf << EOM +{ + "cniVersion": "0.3.1", + "type": "loopback" +} +EOM + +########################### +# Configuring the Kubelet # +########################### +curl https://storage.googleapis.com/kubernetes-release/release/vKUBE_VERSION/bin/linux/amd64/kubelet -o /usr/local/bin/kubelet +chmod 755 /usr/local/bin/kubelet + +mkdir -p /etc/kubernetes +mkdir -p /var/lib/kubelet +mkdir -p /var/lib/kubernetes +cat > /etc/systemd/system/kubelet.service << EOM +[Unit] +Description=kubelet: The Kubernetes Node Agent +Documentation=http://kubernetes.io/docs/ + +[Service] +EnvironmentFile=-/etc/kubernetes/kubelet +ExecStart=/usr/local/bin/kubelet \$KUBELET_ARGS +Restart=always +StartLimitInterval=0 +RestartSec=10 + +[Install] +WantedBy=multi-user.target +EOM + +cat > /etc/kubernetes/kubelet << EOM +KUBELET_ARGS="--cni-bin-dir=/usr/libexec/cni --cgroup-driver=systemd --config=/var/lib/kubelet/kubelet-config.yaml --docker-endpoint=unix:///var/run/docker.sock --image-pull-progress-deadline=2m --kubeconfig=/var/lib/kubelet/kubeconfig --network-plugin=cni --v=2" +EOM + +cat > /var/lib/kubelet/kubelet-config.yaml << EOM +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +authentication: + anonymous: + enabled: false + webhook: + enabled: true + x509: + clientCAFile: "/var/lib/kubernetes/ca.pem" +authorization: + mode: Webhook +clusterDomain: "cluster.local" +clusterDNS: + - "10.32.0.10" +podCIDR: "$KUBE_POD_ADDR/$KUBE_NODE_POD_PREFIX" +runtimeRequestTimeout: "15m" +tlsCertFile: "/var/lib/kubelet/${HOSTNAME}.pem" +tlsPrivateKeyFile: "/var/lib/kubelet/${HOSTNAME}-key.pem" +EOM + +mv ${HOSTNAME}-key.pem ${HOSTNAME}.pem /var/lib/kubelet/ +mv ${HOSTNAME}.kubeconfig /var/lib/kubelet/kubeconfig +mv ca.pem /var/lib/kubernetes/ + +systemctl daemon-reload +systemctl enable kubelet +sudo systemctl start kubelet + diff --git a/test.sh b/test.sh new file mode 100755 index 0000000..75871ed --- /dev/null +++ b/test.sh @@ -0,0 +1,10 @@ +#!/bin/bash +. .gce_kubernetes.config + +for ((i=0; i<=$KUBE_WORKERS; i++)); do + if [ "$i" == "0" ]; then + printf 'https://'$KUBE_SUBNET_ADDR'.1'${i}':2380' > etcd-listen + else + printf ',https://'$KUBE_SUBNET_ADDR'.1'${i}':2380' >> etcd-listen + fi +done diff --git a/test2 b/test2 new file mode 100644 index 0000000..e69de29 diff --git a/u3_uninstall_kubernetes_controllers.sh b/u3_uninstall_kubernetes_controllers.sh new file mode 100755 index 0000000..4938675 --- /dev/null +++ b/u3_uninstall_kubernetes_controllers.sh @@ -0,0 +1,21 @@ +#!/bin/bash +## +## Script to automate the Kubernetes CentOS client side pieces +## +echo "###################################### +Set the gcloud compute region and zone +######################################" +gcloud config set compute/region us-west1 +gcloud config set compute/zone us-west1-c +gcloud config set project kubernetescentos-205702 +echo "Compute region and zone set" + +for i in 0 1; do + gcloud compute scp controller_uninstall.sh controller-${i}: +done + +echo "Controller setup scripts copied across" +for i in 0 1; do + gcloud compute ssh controller-${i} -- ~/controller_uninstall.sh +done + diff --git a/worker-0/10-bridge.conf b/worker-0/10-bridge.conf new file mode 100644 index 0000000..aa6fd03 --- /dev/null +++ b/worker-0/10-bridge.conf @@ -0,0 +1,15 @@ +{ + "cniVersion": "0.3.1", + "name": "bridge", + "type": "bridge", + "bridge": "cnio0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "ranges": [ + [{"subnet": "10.200.0.0/24"}] + ], + "routes": [{"dst": "0.0.0.0/0"}] + } +} diff --git a/worker-0/99-loopback.conf b/worker-0/99-loopback.conf new file mode 100644 index 0000000..f375c5d --- /dev/null +++ b/worker-0/99-loopback.conf @@ -0,0 +1,4 @@ +{ + "cniVersion": "0.3.0", + "type": "loopback" +} diff --git a/worker-0/config b/worker-0/config new file mode 100644 index 0000000..e57d165 --- /dev/null +++ b/worker-0/config @@ -0,0 +1,13 @@ + +# This file controls the state of SELinux on the system. +# SELINUX= can take one of these three values: +# enforcing - SELinux security policy is enforced. +# permissive - SELinux prints warnings instead of enforcing. +# disabled - No SELinux policy is loaded. +#SELINUX=enforcing +SELINUX=permissive +# SELINUXTYPE= can take one of three two values: +# targeted - Targeted processes are protected, +# minimum - Modification of targeted policy. Only selected processes are protected. +# mls - Multi Level Security protection. +SELINUXTYPE=targeted diff --git a/worker-0/kube-proxy-config.yaml b/worker-0/kube-proxy-config.yaml new file mode 100644 index 0000000..af03998 --- /dev/null +++ b/worker-0/kube-proxy-config.yaml @@ -0,0 +1,6 @@ +kind: KubeProxyConfiguration +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +clientConnection: + kubeconfig: "/var/lib/kube-proxy/kubeconfig" +mode: "iptables" +clusterCIDR: "10.200.0.0/16" diff --git a/worker-0/kube-proxy.service b/worker-0/kube-proxy.service new file mode 100644 index 0000000..adeb57e --- /dev/null +++ b/worker-0/kube-proxy.service @@ -0,0 +1,18 @@ +[Unit] +Description=Kubernetes Kube-Proxy Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=network.target + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/proxy +ExecStart=/usr/local/bin/kube-proxy \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_MASTER \ + $KUBE_PROXY_ARGS +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/worker-0/kubelet b/worker-0/kubelet new file mode 100644 index 0000000..dec3595 --- /dev/null +++ b/worker-0/kubelet @@ -0,0 +1 @@ +KUBELET_ARGS="--cni-bin-dir=/usr/libexec/cni --cgroup-driver=systemd --config=/var/lib/kubelet/kubelet-config.yaml --docker-endpoint=unix:///var/run/docker.sock --image-pull-progress-deadline=2m --kubeconfig=/var/lib/kubelet/kubeconfig --network-plugin=cni --v=2" diff --git a/worker-0/kubelet-config.yaml b/worker-0/kubelet-config.yaml new file mode 100644 index 0000000..5dba01b --- /dev/null +++ b/worker-0/kubelet-config.yaml @@ -0,0 +1,18 @@ +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +authentication: + anonymous: + enabled: false + webhook: + enabled: true + x509: + clientCAFile: "/var/lib/kubernetes/ca.pem" +authorization: + mode: Webhook +clusterDomain: "cluster.local" +clusterDNS: + - "10.32.0.10" +podCIDR: "10.200.0.0/24" +runtimeRequestTimeout: "15m" +tlsCertFile: "/var/lib/kubelet/worker-0.pem" +tlsPrivateKeyFile: "/var/lib/kubelet/worker-0-key.pem" diff --git a/worker-0/kubelet.service b/worker-0/kubelet.service new file mode 100644 index 0000000..732ff77 --- /dev/null +++ b/worker-0/kubelet.service @@ -0,0 +1,13 @@ +[Unit] +Description=kubelet: The Kubernetes Node Agent +Documentation=http://kubernetes.io/docs/ + +[Service] +EnvironmentFile=-/etc/kubernetes/kubelet +ExecStart=/usr/local/bin/kubelet $KUBELET_ARGS +Restart=always +StartLimitInterval=0 +RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/worker-0/proxy b/worker-0/proxy new file mode 100644 index 0000000..bff7a9e --- /dev/null +++ b/worker-0/proxy @@ -0,0 +1 @@ +KUBE_PROXY_ARGS="--config=/var/lib/kube-proxy/kube-proxy-config.yaml" diff --git a/worker-1/10-bridge.conf b/worker-1/10-bridge.conf new file mode 100644 index 0000000..da22b3a --- /dev/null +++ b/worker-1/10-bridge.conf @@ -0,0 +1,15 @@ +{ + "cniVersion": "0.3.1", + "name": "bridge", + "type": "bridge", + "bridge": "cnio0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "ranges": [ + [{"subnet": "10.200.1.0/24"}] + ], + "routes": [{"dst": "0.0.0.0/0"}] + } +} diff --git a/worker-1/99-loopback.conf b/worker-1/99-loopback.conf new file mode 100644 index 0000000..f375c5d --- /dev/null +++ b/worker-1/99-loopback.conf @@ -0,0 +1,4 @@ +{ + "cniVersion": "0.3.0", + "type": "loopback" +} diff --git a/worker-1/config b/worker-1/config new file mode 100644 index 0000000..e57d165 --- /dev/null +++ b/worker-1/config @@ -0,0 +1,13 @@ + +# This file controls the state of SELinux on the system. +# SELINUX= can take one of these three values: +# enforcing - SELinux security policy is enforced. +# permissive - SELinux prints warnings instead of enforcing. +# disabled - No SELinux policy is loaded. +#SELINUX=enforcing +SELINUX=permissive +# SELINUXTYPE= can take one of three two values: +# targeted - Targeted processes are protected, +# minimum - Modification of targeted policy. Only selected processes are protected. +# mls - Multi Level Security protection. +SELINUXTYPE=targeted diff --git a/worker-1/kube-proxy-config.yaml b/worker-1/kube-proxy-config.yaml new file mode 100644 index 0000000..af03998 --- /dev/null +++ b/worker-1/kube-proxy-config.yaml @@ -0,0 +1,6 @@ +kind: KubeProxyConfiguration +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +clientConnection: + kubeconfig: "/var/lib/kube-proxy/kubeconfig" +mode: "iptables" +clusterCIDR: "10.200.0.0/16" diff --git a/worker-1/kube-proxy.service b/worker-1/kube-proxy.service new file mode 100644 index 0000000..adeb57e --- /dev/null +++ b/worker-1/kube-proxy.service @@ -0,0 +1,18 @@ +[Unit] +Description=Kubernetes Kube-Proxy Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=network.target + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/proxy +ExecStart=/usr/local/bin/kube-proxy \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_MASTER \ + $KUBE_PROXY_ARGS +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target diff --git a/worker-1/kubelet b/worker-1/kubelet new file mode 100644 index 0000000..dec3595 --- /dev/null +++ b/worker-1/kubelet @@ -0,0 +1 @@ +KUBELET_ARGS="--cni-bin-dir=/usr/libexec/cni --cgroup-driver=systemd --config=/var/lib/kubelet/kubelet-config.yaml --docker-endpoint=unix:///var/run/docker.sock --image-pull-progress-deadline=2m --kubeconfig=/var/lib/kubelet/kubeconfig --network-plugin=cni --v=2" diff --git a/worker-1/kubelet-config.yaml b/worker-1/kubelet-config.yaml new file mode 100644 index 0000000..720574f --- /dev/null +++ b/worker-1/kubelet-config.yaml @@ -0,0 +1,18 @@ +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +authentication: + anonymous: + enabled: false + webhook: + enabled: true + x509: + clientCAFile: "/var/lib/kubernetes/ca.pem" +authorization: + mode: Webhook +clusterDomain: "cluster.local" +clusterDNS: + - "10.32.0.10" +podCIDR: "10.200.1.0/24" +runtimeRequestTimeout: "15m" +tlsCertFile: "/var/lib/kubelet/worker-1.pem" +tlsPrivateKeyFile: "/var/lib/kubelet/worker-1-key.pem" diff --git a/worker-1/kubelet.service b/worker-1/kubelet.service new file mode 100644 index 0000000..732ff77 --- /dev/null +++ b/worker-1/kubelet.service @@ -0,0 +1,13 @@ +[Unit] +Description=kubelet: The Kubernetes Node Agent +Documentation=http://kubernetes.io/docs/ + +[Service] +EnvironmentFile=-/etc/kubernetes/kubelet +ExecStart=/usr/local/bin/kubelet $KUBELET_ARGS +Restart=always +StartLimitInterval=0 +RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/worker-1/proxy b/worker-1/proxy new file mode 100644 index 0000000..bff7a9e --- /dev/null +++ b/worker-1/proxy @@ -0,0 +1 @@ +KUBE_PROXY_ARGS="--config=/var/lib/kube-proxy/kube-proxy-config.yaml" diff --git a/worker_kube-proxy_setup.sh b/worker_kube-proxy_setup.sh new file mode 100755 index 0000000..b7ec6e9 --- /dev/null +++ b/worker_kube-proxy_setup.sh @@ -0,0 +1,50 @@ +#!/bin/bash +## +## Script to provide commands to run on all worker nodes +## + +############################## +# Configuring the Kube Proxy # +############################## + +mkdir -p /var/lib/kube-proxy +mv kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig +cat > /etc/systemd/system/kube-proxy.service << 'EOM' +[Unit] +Description=Kubernetes Kube-Proxy Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=network.target + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/proxy +ExecStart=/usr/local/bin/kube-proxy \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_MASTER \ + $KUBE_PROXY_ARGS +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target +EOM + +echo 'KUBE_PROXY_ARGS="--config=/var/lib/kube-proxy/kube-proxy-config.yaml"' > /etc/kubernetes/proxy + +cat > /var/lib/kube-proxy/kube-proxy-config.yaml << EOM +kind: KubeProxyConfiguration +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +clientConnection: + kubeconfig: "/var/lib/kube-proxy/kubeconfig" +mode: "iptables" +clusterCIDR: "10.200.0.0/16" +EOM + +curl https://storage.googleapis.com/kubernetes-release/release/v1.10.3/bin/linux/amd64/kube-proxy -o /usr/local/bin/kube-proxy +chmod 755 /usr/local/bin/kube-proxy + +systemctl daemon-reload +systemctl enable kube-proxy +systemctl start kube-proxy + diff --git a/worker_kube-proxy_setup.sh.template b/worker_kube-proxy_setup.sh.template new file mode 100755 index 0000000..df6340a --- /dev/null +++ b/worker_kube-proxy_setup.sh.template @@ -0,0 +1,50 @@ +#!/bin/bash +## +## Script to provide commands to run on all worker nodes +## + +############################## +# Configuring the Kube Proxy # +############################## + +mkdir -p /var/lib/kube-proxy +mv kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig +cat > /etc/systemd/system/kube-proxy.service << 'EOM' +[Unit] +Description=Kubernetes Kube-Proxy Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=network.target + +[Service] +EnvironmentFile=-/etc/kubernetes/config +EnvironmentFile=-/etc/kubernetes/proxy +ExecStart=/usr/local/bin/kube-proxy \ + $KUBE_LOGTOSTDERR \ + $KUBE_LOG_LEVEL \ + $KUBE_MASTER \ + $KUBE_PROXY_ARGS +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target +EOM + +echo 'KUBE_PROXY_ARGS="--config=/var/lib/kube-proxy/kube-proxy-config.yaml"' > /etc/kubernetes/proxy + +cat > /var/lib/kube-proxy/kube-proxy-config.yaml << EOM +kind: KubeProxyConfiguration +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +clientConnection: + kubeconfig: "/var/lib/kube-proxy/kubeconfig" +mode: "iptables" +clusterCIDR: "KUBE_POD_CIDR" +EOM + +curl https://storage.googleapis.com/kubernetes-release/release/v1.10.3/bin/linux/amd64/kube-proxy -o /usr/local/bin/kube-proxy +chmod 755 /usr/local/bin/kube-proxy + +systemctl daemon-reload +systemctl enable kube-proxy +systemctl start kube-proxy + diff --git a/worker_kubelet_setup.sh b/worker_kubelet_setup.sh new file mode 100755 index 0000000..982f85b --- /dev/null +++ b/worker_kubelet_setup.sh @@ -0,0 +1,106 @@ +#!/bin/bash +## +## Script to provide commands to run on all worker nodes +## +. .worker_variables +echo "Setting up the Kubernetes repo:" +cat > /etc/yum.repos.d/kubernetes.repo << EOM +[kubernetes] +name=Kubernetes +baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg + https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg +EOM + +yum install -y conntrack-tools kubelet docker socat containernetworking-plugins +systemctl start docker && sudo systemctl enable docker + +sed -i s/SELINUX=enforcing/SELINUX=permissive/g /etc/selinux/config +sudo setenforce 0 + +mkdir -p /etc/cni/net.d +cat > /etc/cni/net.d/10-bridge.conf << EOM +{ + "cniVersion": "0.3.1", + "name": "bridge", + "type": "bridge", + "bridge": "cnio0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "ranges": [ + [{"subnet": "$KUBE_POD_ADDR/$KUBE_NODE_POD_PREFIX"}] + ], + "routes": [{"dst": "0.0.0.0/0"}] + } +} +EOM +cat > /etc/cni/net.d/99-loopback.conf << EOM +{ + "cniVersion": "0.3.1", + "type": "loopback" +} +EOM + +########################### +# Configuring the Kubelet # +########################### +curl https://storage.googleapis.com/kubernetes-release/release/v1.10.3/bin/linux/amd64/kubelet -o /usr/local/bin/kubelet +chmod 755 /usr/local/bin/kubelet + +mkdir -p /etc/kubernetes +mkdir -p /var/lib/kubelet +mkdir -p /var/lib/kubernetes +cat > /etc/systemd/system/kubelet.service << EOM +[Unit] +Description=kubelet: The Kubernetes Node Agent +Documentation=http://kubernetes.io/docs/ + +[Service] +EnvironmentFile=-/etc/kubernetes/kubelet +ExecStart=/usr/local/bin/kubelet \$KUBELET_ARGS +Restart=always +StartLimitInterval=0 +RestartSec=10 + +[Install] +WantedBy=multi-user.target +EOM + +cat > /etc/kubernetes/kubelet << EOM +KUBELET_ARGS="--cni-bin-dir=/usr/libexec/cni --cgroup-driver=systemd --config=/var/lib/kubelet/kubelet-config.yaml --docker-endpoint=unix:///var/run/docker.sock --image-pull-progress-deadline=2m --kubeconfig=/var/lib/kubelet/kubeconfig --network-plugin=cni --v=2" +EOM + +cat > /var/lib/kubelet/kubelet-config.yaml << EOM +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +authentication: + anonymous: + enabled: false + webhook: + enabled: true + x509: + clientCAFile: "/var/lib/kubernetes/ca.pem" +authorization: + mode: Webhook +clusterDomain: "cluster.local" +clusterDNS: + - "10.32.0.10" +podCIDR: "$KUBE_POD_ADDR/$KUBE_NODE_POD_PREFIX" +runtimeRequestTimeout: "15m" +tlsCertFile: "/var/lib/kubelet/${HOSTNAME}.pem" +tlsPrivateKeyFile: "/var/lib/kubelet/${HOSTNAME}-key.pem" +EOM + +mv ${HOSTNAME}-key.pem ${HOSTNAME}.pem /var/lib/kubelet/ +mv ${HOSTNAME}.kubeconfig /var/lib/kubelet/kubeconfig +mv ca.pem /var/lib/kubernetes/ + +systemctl daemon-reload +systemctl enable kubelet +sudo systemctl start kubelet + diff --git a/worker_setup.sh b/worker_setup.sh new file mode 100755 index 0000000..5b93398 --- /dev/null +++ b/worker_setup.sh @@ -0,0 +1,43 @@ +#!/bin/bash +## +## Script to provide commands to run on all worker nodes +## +sudo yum install -y conntrack-tools kubelet docker socat containernetworking-plugins +sudo systemctl start docker && sudo systemctl enable docker + +sudo mv config /etc/selinux/ +sudo setenforce 0 + +sudo mkdir -p /etc/cni/net.d +sudo mv 10-bridge.conf /etc/cni/net.d/ +sudo mv 99-loopback.conf /etc/cni/net.d/ + +########################### +# Configuring the Kubelet # +########################### +sudo curl https://storage.googleapis.com/kubernetes-release/release/v1.10.3/bin/linux/amd64/kubelet -o /usr/local/bin/kubelet +sudo chmod 755 /usr/local/bin/kubelet + +mkdir -p /etc/kubernetes +sudo mkdir -p /var/lib/kubelet +sudo mkdir -p /var/lib/kubernetes +sudo mv kubelet.service /etc/systemd/system +sudo mv kubelet /etc/kubernetes/ +sudo mv kubelet-config.yaml /var/lib/kubelet/ +sudo mv ${HOSTNAME}-key.pem ${HOSTNAME}.pem /var/lib/kubelet/ +sudo mv ${HOSTNAME}.kubeconfig /var/lib/kubelet/kubeconfig +sudo mv ca.pem /var/lib/kubernetes/ + +sudo mkdir -p /var/lib/kube-proxy +sudo mv kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig +sudo mv kube-proxy.service /etc/systemd/system/ +sudo mv proxy /etc/kubernetes/ +sudo mv kube-proxy-config.yaml + +sudo curl https://storage.googleapis.com/kubernetes-release/release/v1.10.3/bin/linux/amd64/kube-proxy -o /usr/local/bin/kube-proxy +sudo chmod 755 /usr/local/bin/kube-proxy + +sudo systemctl daemon-reload +sudo systemctl enable kubelet kube-proxy +sudo systemctl start kubelet kube-proxy' +