Post-installation steps for Linux https://docs.docker.com/engine/install/linux-postinstall/ https://docs.docker.com/go/guides/ https://hub.docker.com/ https://hub.docker.com/settings/security https://kubernetes.io/docs/reference/kubectl/cheatsheet/ https://kubernetes.io/docs/user-guide/jsonpath https://kubernetes.io/docs/api https://kubernetes.io/docs/reference/using-api/deprecation-guide/ https://github.com/kubernetes/kubernetes/blob/master/pkg/controller https://github.com/luksa/kubernetes-in-action https://forums.manning.com/forums/kubernetes-in-action https://app.slack.com/client/T09NY5SBT/C09NXKJKA https://www.mongodb.com/community/forums/top/weekly https://cloud.google.com/compute/docs/disks/add-persistent-disk#formatting https://cloud.google.com/compute/docs/disks/add-persistent-disk#gcloud Google Kubernetes Engine Google Compute Engine Nov 15, 9.2.1 gcloud> cd v1 v1> docker login v1> docker build -t marcsf/kubia . v1> docker push marcsf/kubia gcloud> k create -f kubia-rc-and-service-v1.yaml replicationcontroller/kubia-v1 created Error from server (AlreadyExists): error when creating "kubia-rc-and-service-v1.yaml": services "kubia" already exists gcloud> k delete svc kubia service "kubia" deleted gcloud> k create -f kubia-rc-and-service-v1.yaml service/kubia created Error from server (AlreadyExists): error when creating "kubia-rc-and-service-v1.yaml": replicationcontrollers "kubia-v1" already exists gcloud> k get svc kubia NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubia LoadBalancer 10.116.6.48 34.140.65.129 80:32270/TCP 89s gcloud> k get rc kubia NAME DESIRED CURRENT READY AGE kubia 3 3 0 14d gcloud> k get rc kubia-v1 NAME DESIRED CURRENT READY AGE kubia-v1 3 3 0 2m34s gcloud> k get po | grep kubia-v1 kubia-v1-gd7tg 0/1 ImagePullBackOff 0 4m40s kubia-v1-lbclg 0/1 ImagePullBackOff 0 4m41s kubia-v1-zd8lw 0/1 ImagePullBackOff 0 4m41s v1> docker tag marcsf/kubia marcsf/kubia:v1 v1> docker push marcsf/kubia:v1 v1> docker pull marcsf/kubia:v1 v1: Pulling from marcsf/kubia Digest: sha256:53f9ea3c8f425c6e7d958205115f6fc991552b9de2da6ce6a60e01360fcab406 Status: Image is up to date for marcsf/kubia:v1 docker.io/marcsf/kubia:v1 gcloud> k logs --since=60s --timestamps=true kubia-v1-gd7tg Error from server (BadRequest): container "nodejs" in pod "kubia-v1-gd7tg" is waiting to start: trying and failing to pull image gcloud> k delete svc kubia service "kubia" deleted gcloud> k delete rc kubia-v1 replicationcontroller "kubia-v1" deleted gcloud> k create -f kubia-rc-and-service-v1.yaml replicationcontroller/kubia-v1 created service/kubia created gcloud> k get po | grep kubia-v1 kubia-v1-cj2rp 1/1 Running 0 25s kubia-v1-hdrjf 1/1 Running 0 25s kubia-v1-hlf5m 1/1 Running 0 25s gcloud> k get svc kubia NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubia LoadBalancer 10.116.12.105 34.140.4.154 80:31017/TCP 39s gcloud> k get rc kubia-v1 NAME DESIRED CURRENT READY AGE kubia-v1 3 3 3 45s gcloud> k logs --since=60s --timestamps=true kubia-v1-cj2rp 2021-11-15T14:12:38.818979630Z Kubia server starting... 2021-11-15T14:12:41.976706827Z Received request from ::ffff:130.211.0.238 2021-11-15T14:12:42.449275606Z Received request from ::ffff:130.211.0.240 2021-11-15T14:12:43.032301087Z Received request from ::ffff:130.211.0.250 2021-11-15T14:12:56.975951121Z Received request from ::ffff:130.211.0.238 2021-11-15T14:12:57.448622011Z Received request from ::ffff:130.211.0.240 2021-11-15T14:12:58.034295026Z Received request from ::ffff:130.211.0.250 2021-11-15T14:13:11.974402019Z Received request from ::ffff:130.211.0.238 2021-11-15T14:13:12.451738578Z Received request from ::ffff:130.211.0.240 v1> while true; do curl http://34.140.4.154; done This is v1 running in pod kubia-v1-hdrjf This is v1 running in pod kubia-v1-hlf5m This is v1 running in pod kubia-v1-hdrjf This is v1 running in pod kubia-v1-hlf5m This is v1 running in pod kubia-v1-hdrjf This is v1 running in pod kubia-v1-hdrjf This is v1 running in pod kubia-v1-hdrjf This is v1 running in pod kubia-v1-hlf5m This is v1 running in pod kubia-v1-hlf5m This is v1 running in pod kubia-v1-hlf5m This is v1 running in pod kubia-v1-cj2rp ... v1> cd ../v2 v2> docker build -t marcsf/kubia . v2> docker tag marcsf/kubia marcsf/kubia:v2 v2> docker push marcsf/kubia:v2 gcloud> egrep '\bimagePullPolicy' *.yaml kubia-liveness-probe.yaml: f:imagePullPolicy: {} kubia-liveness-probe.yaml: imagePullPolicy: Always kubia.yaml: f:imagePullPolicy: {} kubia.yaml: imagePullPolicy: Always nginx1.yaml: f:imagePullPolicy: {} nginx1.yaml: imagePullPolicy: Always nginx2.yaml: f:imagePullPolicy: {} nginx2.yaml: imagePullPolicy: Always gcloud> k rolling-update kubia-v1 kubia-v2 --image=marcsf/kubia:v2 Error: unknown command "rolling-update" for "kubectl" Run 'kubectl --help' for usage. gcloud> k --help | grep -i update taint Update the taints on one or more nodes patch Update field(s) of a resource label Update the labels on a resource annotate Update the annotations on a resource gcloud> k get rc | grep kubia kubia 3 3 0 14d kubia-v1 3 3 3 23m gcloud> k create -f kubia-deployment-v1.yaml deployment.apps/kubia created gcloud> k get rs NAME DESIRED CURRENT READY AGE kubia-6849468f7f 3 3 3 100s gcloud> k delete rc --all replicationcontroller "kubia" deleted replicationcontroller "kubia-v1" deleted gcloud> k delete deployment kubia deployment.apps "kubia" deleted gcloud> k get rs No resources found in default namespace. gcloud> k create -f kubia-deployment-v1.yaml --record deployment.apps/kubia created gcloud> k get rs NAME DESIRED CURRENT READY AGE kubia-6849468f7f 3 3 0 3s gcloud> k get rc No resources found in default namespace. gcloud> k rollout status deployment kubia deployment "kubia" successfully rolled out gcloud> k get po | grep kubia- kubia-6849468f7f-cqkft 1/1 Running 0 102s kubia-6849468f7f-kgz9q 1/1 Running 0 102s kubia-6849468f7f-phcbq 1/1 Running 0 102s gcloud> k patch deployment kubia -p '{"spec": {"minReadySeconds": 10}}' deployment.apps/kubia patched gcloud> k set image deployment kubia nodejs=marcsf/kubia:v2 deployment.apps/kubia image updated ... This is v2 running in pod kubia-c79c89cc4-9tvmt This is v1 running in pod kubia-6849468f7f-phcbq This is v2 running in pod kubia-c79c89cc4-q5snq This is v2 running in pod kubia-c79c89cc4-9tvmt This is v1 running in pod kubia-6849468f7f-phcbq ... gcloud> k get rs NAME DESIRED CURRENT READY AGE kubia-6849468f7f 0 0 0 19m kubia-c79c89cc4 3 3 3 5m50s v2> cd ../v3 v3> docker build -t marcsf/kubia . v3> docker tag marcsf/kubia marcsf/kubia:v3 v3> docker push marcsf/kubia:v3 gcloud> k set image deployment kubia nodejs=marcsf/kubia:v3 deployment.apps/kubia image updated gcloud> k rollout status deployment kubia Waiting for deployment "kubia" rollout to finish: 1 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 1 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 1 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 1 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 2 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 2 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 2 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 2 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 1 old replicas are pending termination... Waiting for deployment "kubia" rollout to finish: 1 old replicas are pending termination... Waiting for deployment "kubia" rollout to finish: 1 old replicas are pending termination... deployment "kubia" successfully rolled out Some internal error has occurred! This is pod kubia-7c96bf698f-ddqtf This is v2 running in pod kubia-c79c89cc4-q5snq This is v2 running in pod kubia-c79c89cc4-q5snq This is v2 running in pod kubia-c79c89cc4-9tvmt This is v2 running in pod kubia-c79c89cc4-9tvmt This is v2 running in pod kubia-c79c89cc4-9tvmt Some internal error has occurred! This is pod kubia-7c96bf698f-ddqtf This is v2 running in pod kubia-c79c89cc4-9tvmt gcloud> k rollout undo deployment kubia deployment.apps/kubia rolled back gcloud> k rollout status deployment kubia Waiting for deployment "kubia" rollout to finish: 2 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 2 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 2 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 2 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 1 old replicas are pending termination... Waiting for deployment "kubia" rollout to finish: 1 old replicas are pending termination... Waiting for deployment "kubia" rollout to finish: 1 old replicas are pending termination... deployment "kubia" successfully rolled out ... This is v2 running in pod kubia-c79c89cc4-w75l4 This is v2 running in pod kubia-c79c89cc4-w75l4 This is v2 running in pod kubia-c79c89cc4-w75l4 ... gcloud> k rollout history deployment kubia deployment.apps/kubia REVISION CHANGE-CAUSE 1 kubectl create --filename=kubia-deployment-v1.yaml --record=true 3 kubectl create --filename=kubia-deployment-v1.yaml --record=true 4 kubectl create --filename=kubia-deployment-v1.yaml --record=true gcloud> k get rs NAME DESIRED CURRENT READY AGE kubia-6849468f7f 0 0 0 40m kubia-7c96bf698f 0 0 0 11m kubia-c79c89cc4 3 3 3 27m gcloud> k get po | grep kubia- kubia-c79c89cc4-jd8g9 1/1 Running 0 10m kubia-c79c89cc4-kh9q6 1/1 Running 0 10m kubia-c79c89cc4-w75l4 1/1 Running 0 9m52s gcloud> k rollout undo deployment kubia --to-revision=1 deployment.apps/kubia rolled back gcloud> k rollout status deployment kubia Waiting for deployment "kubia" rollout to finish: 1 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 1 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 1 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 2 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 2 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 2 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 2 out of 3 new replicas have been updated... Waiting for deployment "kubia" rollout to finish: 1 old replicas are pending termination... Waiting for deployment "kubia" rollout to finish: 1 old replicas are pending termination... Waiting for deployment "kubia" rollout to finish: 1 old replicas are pending termination... deployment "kubia" successfully rolled out gcloud> k get rs NAME DESIRED CURRENT READY AGE kubia-6849468f7f 3 3 3 44m kubia-7c96bf698f 0 0 0 14m kubia-c79c89cc4 0 0 0 30m gcloud> k rollout history deployment/kubia deployment.apps/kubia REVISION CHANGE-CAUSE 3 kubectl create --filename=kubia-deployment-v1.yaml --record=true 4 kubectl create --filename=kubia-deployment-v1.yaml --record=true 5 kubectl create --filename=kubia-deployment-v1.yaml --record=true v3> cd ../v4 v4> docker build -t marcsf/kubia . v4> docker tag marcsf/kubia marcsf/kubia:v4 v4> docker push marcsf/kubia:v4 gcloud> k set image deployment kubia nodejs=marcsf/kubia:v4 deployment.apps/kubia image updated gcloud> k rollout pause deployment/kubia deployment.apps/kubia paused gcloud> k get rs NAME DESIRED CURRENT READY AGE kubia-6849468f7f 3 3 3 56m kubia-7c96bf698f 0 0 0 26m kubia-7cc57d57fb 1 1 1 21s kubia-c79c89cc4 0 0 0 42m ... This is v1 running in pod kubia-6849468f7f-28kt4 This is v4 running in pod kubia-7cc57d57fb-cw5s7 This is v1 running in pod kubia-6849468f7f-kxz2p This is v1 running in pod kubia-6849468f7f-28kt4 ... gcloud> k rollout resume deployment/kubia deployment.apps/kubia resumed gcloud> k apply -f kubia-deployment-v3-with-readinesscheck.yaml Warning: resource deployments/kubia is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. deployment.apps/kubia configured gcloud> k get deployment kubia -o yaml | grep -A1 last-applied-configuration kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"name":"kubia","namespace":"default"},"spec":{"minReadySeconds":10,"replicas":3,"selector":{"matchLabels":{"app":"kubia"}},"strategy":{"rollingUpdate":{"maxSurge":1,"maxUnavailable":0},"type":"RollingUpdate"},"template":{"metadata":{"labels":{"app":"kubia"},"name":"kubia"},"spec":{"containers":[{"image":"marcsf/kubia:v3","name":"nodejs","readinessProbe":{"httpGet":{"path":"/","port":8080},"periodSeconds":1}}]}}}} -- f:kubectl.kubernetes.io/last-applied-configuration: {} f:spec: gcloud> k rollout status deployment kubia Waiting for deployment "kubia" rollout to finish: 1 out of 3 new replicas have been updated... ... This is v2 running in pod kubia-c79c89cc4-kcdr4 This is v2 running in pod kubia-c79c89cc4-jsshr This is v2 running in pod kubia-c79c89cc4-jsshr This is v2 running in pod kubia-c79c89cc4-jsshr ...  [1]+ Stopped kubectl rollout status deployment kubia gcloud> k get po | grep kubia- kubia-5ccd77c779-h9tvt 0/1 Running 0 6m25s kubia-c79c89cc4-5db9x 1/1 Running 0 12m kubia-c79c89cc4-jsshr 1/1 Running 0 12m kubia-c79c89cc4-kcdr4 1/1 Running 0 11m gcloud> fg kubectl rollout status deployment kubia C-c C-z [1]+ Stopped kubectl rollout status deployment kubia gcloud> bg [1]+ kubectl rollout status deployment kubia & gcloud> error: deployment "kubia" exceeded its progress deadline k describe deployment kubia ... Conditions: Type Status Reason ---- ------ ------ Available True MinimumReplicasAvailable Progressing False ProgressDeadlineExceeded OldReplicaSets: kubia-c79c89cc4 (3/3 replicas created) NewReplicaSet: kubia-5ccd77c779 (1/1 replicas created) Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal ScalingReplicaSet 51m deployment-controller Scaled up replica set kubia-7c96bf698f to 1 Normal ScalingReplicaSet 51m deployment-controller Scaled up replica set kubia-7c96bf698f to 2 Normal ScalingReplicaSet 50m deployment-controller Scaled up replica set kubia-7c96bf698f to 3 Normal ScalingReplicaSet 50m deployment-controller Scaled down replica set kubia-c79c89cc4 to 1 Normal ScalingReplicaSet 50m deployment-controller Scaled down replica set kubia-c79c89cc4 to 0 Normal ScalingReplicaSet 49m deployment-controller Scaled down replica set kubia-7c96bf698f to 2 Normal ScalingReplicaSet 38m (x2 over 51m) deployment-controller Scaled down replica set kubia-c79c89cc4 to 2 Normal ScalingReplicaSet 24m deployment-controller Scaled up replica set kubia-7cc57d57fb to 1 Normal ScalingReplicaSet 22m (x2 over 67m) deployment-controller Scaled down replica set kubia-6849468f7f to 2 Normal ScalingReplicaSet 22m deployment-controller Scaled up replica set kubia-7cc57d57fb to 2 Normal ScalingReplicaSet 22m (x2 over 66m) deployment-controller Scaled down replica set kubia-6849468f7f to 1 Normal ScalingReplicaSet 22m deployment-controller Scaled up replica set kubia-7cc57d57fb to 3 Normal ScalingReplicaSet 22m (x2 over 66m) deployment-controller Scaled down replica set kubia-6849468f7f to 0 Normal ScalingReplicaSet 16m (x3 over 67m) deployment-controller Scaled up replica set kubia-c79c89cc4 to 1 Normal ScalingReplicaSet 15m (x3 over 67m) deployment-controller Scaled up replica set kubia-c79c89cc4 to 2 Normal ScalingReplicaSet 15m deployment-controller Scaled down replica set kubia-7cc57d57fb to 2 Normal ScalingReplicaSet 10m (x12 over 49m) deployment-controller (combined from similar events): Scaled up replica set kubia-5ccd77c779 to 1 [1]+ Exit 1 kubectl rollout status deployment kubia gcloud> k rollout undo deployment kubia deployment.apps/kubia rolled back -------------------------------- Start on boot: sudo systemctl enable docker.service sudo systemctl enable containerd.service Not done. But: ~> sudo systemctl list-unit-files | grep -i skype snap-skype-186.mount enabled snap-skype-187.mount enabled ~> sudo systemctl disable snap-skype-186.mount ~> sudo systemctl disable snap-skype-187.mount ~> sudo systemctl list-unit-files | grep -i skype snap-skype-186.mount disabled snap-skype-187.mount disabled ~> sudo systemctl -a | grep -i skype run-snapd-ns-skype.mnt.mount loaded active mounted /run/snapd/ns/skype.mnt snap-skype-186.mount loaded active mounted Mount unit for skype, revision 186 snap-skype-187.mount loaded active mounted Mount unit for skype, revision 187 Probably loaded, but not starting at boot anymore? ~> sudo systemctl -a | grep -i docker sys-devices-virtual-net-docker0.device loaded active plugged /sys/devices/virtual/net/docker0 sys-subsystem-net-devices-docker0.device loaded active plugged /sys/subsystem/net/devices/docker0 docker.service loaded active running Docker Application Container Engine docker.socket loaded active running Docker Socket for the API ~> sudo systemctl list-unit-files | grep -i docker docker.service enabled docker.socket enabled Found: ~> find /etc -type f -name docker\* 2>/dev/null /etc/default/docker /etc/init.d/docker /etc/init/docker.conf /etc/apt/sources.list.d/docker.list ~> docker images REPOSITORY TAG IMAGE ID CREATED SIZE hello-world latest feb5d9fea6a5 3 weeks ago 13.3kB busybox latest 16ea53ea7c65 5 weeks ago 1.24MB ~> docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES ~> docker login ~> docker run -p 8080:8080 -d marcsf/kubia ~> docker images REPOSITORY TAG IMAGE ID CREATED SIZE marcsf/kubia latest b46ecd7b639a 41 hours ago 660MB hello-world latest feb5d9fea6a5 3 weeks ago 13.3kB busybox latest 16ea53ea7c65 5 weeks ago 1.24MB ~> docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 1f663bab2e4c marcsf/kubia "node app.js" 21 seconds ago Up 19 seconds 0.0.0.0:8080->8080/tcp, :::8080->8080/tcp relaxed_shamir ~> curl localhost:8080 You've hit 1f663bab2e4c ~> docker exec -it relaxed_shamir bash root@1f663bab2e4c:/# ps PID TTY TIME CMD 12 pts/0 00:00:00 bash 18 pts/0 00:00:00 ps root@1f663bab2e4c:/# ps aux USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.1 614432 26476 ? Ssl 11:05 0:00 node app.js root 21 0.7 0.0 20244 3036 pts/0 Ss 11:10 0:00 bash root 27 0.0 0.0 17500 2072 pts/0 R+ 11:10 0:00 ps aux root@1f663bab2e4c:/# exit ~> ps aux | grep app.js root 5226 0.1 0.1 614432 26476 ? Ssl 12:05 0:00 node app.js marc 5387 0.0 0.0 18112 1056 pts/1 S+ 12:10 0:00 grep app.js ~> docker stop relaxed_shamir relaxed_shamir ~> docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES ~> docker images REPOSITORY TAG IMAGE ID CREATED SIZE marcsf/kubia latest b46ecd7b639a 41 hours ago 660MB hello-world latest feb5d9fea6a5 3 weeks ago 13.3kB busybox latest 16ea53ea7c65 5 weeks ago 1.24MB ~> docker rm relaxed_shamir relaxed_shamir ~> docker images REPOSITORY TAG IMAGE ID CREATED SIZE marcsf/kubia latest b46ecd7b639a 41 hours ago 660MB hello-world latest feb5d9fea6a5 3 weeks ago 13.3kB busybox latest 16ea53ea7c65 5 weeks ago 1.24MB -------------------------------- 2.2.2 GKE -- not fron SF Installed minikube on both ubuntu and mac -- but different kubectl? My First project (accepting default...) Configure standard cluster Downloaded gloud sdk and extracted under ~/tmp/gcloud 361.0.0 gcloud> gcloud components install kubectl gcloud> gcloud auth login gcloud> gcloud config set project dev-copilot-329712 gcloud> gcloud container clusters create kubia --num-nodes 3 --machine-type f1-micro --zone europe-west2 WARNING: Starting in January 2021, clusters will use the Regular release channel by default when `--cluster-version`, `--release-channel`, `--no-enable-autoupgrade`, and `--no-enable-autorepair` flags are not specified. WARNING: Currently VPC-native is the default mode during cluster creation for versions greater than 1.21.0-gke.1500. To create advanced routes based clusters, please pass the `--no-enable-ip-alias` flag WARNING: Starting with version 1.18, clusters will have shielded GKE nodes by default. WARNING: Your Pod address range (`--cluster-ipv4-cidr`) can accommodate at most 1008 node(s). WARNING: Starting with version 1.19, newly created clusters and node-pools will have COS_CONTAINERD as the default node image when no image type is specified. ERROR: (gcloud.container.clusters.create) ResponseError: code=400, message=Node pools of f1-micro machines are not supported due to insufficient memory. gcloud> gcloud container clusters create kubia --num-nodes 3 --machine-type e2-medium --zone europe-west2 WARNING: Starting in January 2021, clusters will use the Regular release channel by default when `--cluster-version`, `--release-channel`, `--no-enable-autoupgrade`, and `--no-enable-autorepair` flags are not specified. WARNING: Currently VPC-native is the default mode during cluster creation for versions greater than 1.21.0-gke.1500. To create advanced routes based clusters, please pass the `--no-enable-ip-alias` flag WARNING: Starting with version 1.18, clusters will have shielded GKE nodes by default. WARNING: Your Pod address range (`--cluster-ipv4-cidr`) can accommodate at most 1008 node(s). WARNING: Starting with version 1.19, newly created clusters and node-pools will have COS_CONTAINERD as the default node image when no image type is specified. ERROR: (gcloud.container.clusters.create) ResponseError: code=403, message=Insufficient regional quota to satisfy request: resource "IN_USE_ADDRESSES": request requires '9.0' and is short '1.0'. project has a quota of '8.0' with '8.0' available. View and manage quotas at https://console.cloud.google.com/iam-admin/quotas?usage=USED&project=dev-copilot-329712. Added options from the command found from the GUI (https://console.cloud.google.com/home/dashboard?project=dev-copilot-329712), but finally, copied the zone from the one in the book: gcloud> gcloud container clusters create kubia --num-nodes 3 --machine-type e2-medium --zone europe-west1-d --max-pods-per-node 110 --enable-ip-alias --no-enable-basic-auth --release-channel regular --disk-type pd-standard --disk-size 100 --metadata disable-legacy-endpoints=true --no-enable-master-authorized-networks --no-enable-intra-node-visibility ... Creating cluster kubia in europe-west1-d...done. Created [https://container.googleapis.com/v1/projects/dev-copilot-329712/zones/europe-west1-d/clusters/kubia]. To inspect the contents of your cluster, go to: https://console.cloud.google.com/kubernetes/workload_/gcloud/europe-west1-d/kubia?project=dev-copilot-329712 kubeconfig entry generated for kubia. NAME LOCATION MASTER_VERSION MASTER_IP MACHINE_TYPE NODE_VERSION NUM_NODES STATUS kubia europe-west1-d 1.20.10-gke.301 34.78.120.160 e2-medium 1.20.10-gke.301 3 RUNNING gcloud> type kubectl kubectl is /home/marc/tmp/gcloud/google-cloud-sdk/bin/kubectl gcloud> kubectl get nodes NAME STATUS ROLES AGE VERSION gke-kubia-default-pool-df743581-2r9p Ready 29m v1.20.10-gke.301 gke-kubia-default-pool-df743581-fwfr Ready 29m v1.20.10-gke.301 gke-kubia-default-pool-df743581-z83d Ready 29m v1.20.10-gke.301 gcloud> gcloud compute ssh gke-kubia-default-pool-df743581-2r9p WARNING: The private SSH key file for gcloud does not exist. WARNING: The public SSH key file for gcloud does not exist. WARNING: You do not have an SSH key for gcloud. WARNING: SSH keygen will be executed to generate a key. Generating public/private rsa key pair. Enter passphrase (empty for no passphrase): ... Welcome to Kubernetes v1.20.10-gke.301! ... marc@gke-kubia-default-pool-df743581-2r9p ~ $ hostname gke-kubia-default-pool-df743581-2r9p marc@gke-kubia-default-pool-df743581-2r9p ~ $ ls marc@gke-kubia-default-pool-df743581-2r9p ~ $ ps PID TTY TIME CMD 12711 pts/0 00:00:00 bash 13092 pts/0 00:00:00 ps marc@gke-kubia-default-pool-df743581-2r9p ~ $ uname -a Linux gke-kubia-default-pool-df743581-2r9p 5.4.120+ #1 SMP Fri Jul 23 10:06:55 PDT 2021 x86_64 Intel(R) Xeon(R) CPU @ 2.20GHz GenuineIntel GNU/Linux marc@gke-kubia-default-pool-df743581-2r9p ~ $ logout Connection to 34.78.92.62 closed. gcloud> kubectl describe node gke-kubia-default-pool-df743581-2r9p Name: gke-kubia-default-pool-df743581-2r9p Roles: Labels: beta.kubernetes.io/arch=amd64 beta.kubernetes.io/instance-type=e2-medium ... gcloud> alias k=kubectl gcloud> k run kubia --image=marcsf/kubia --port=8080 --generator=run/v1 Flag --generator has been deprecated, has no effect and will be removed in the future. pod/kubia created gcloud> k get pods NAME READY STATUS RESTARTS AGE kubia 1/1 Running 0 5m9s gcloud> k expose rc kubia --type=LoadBalancer --name kubia-http Error from server (NotFound): replicationcontrollers "kubia" not found gcloud> k get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.116.0.1 443/TCP 71m gcloud> k expose pod/kubia --type=LoadBalancer --name kubia-http service/kubia-http exposed gcloud> k get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.116.0.1 443/TCP 74m kubia-http LoadBalancer 10.116.13.34 8080:31169/TCP 19s gcloud> k get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.116.0.1 443/TCP 75m kubia-http LoadBalancer 10.116.13.34 35.187.165.42 8080:31169/TCP 60s gcloud> k get replicationcontrollers No resources found in default namespace. gcloud> kubectl describe node gke-kubia-default-pool-df743581-2r9p | grep IP InternalIP: 10.132.0.4 ExternalIP: 34.78.92.62 gcloud> k get ns NAME STATUS AGE default Active 25h kube-node-lease Active 25h kube-public Active 25h kube-system Active 25h No resources found in kube-node-lease namespace. gcloud> k get --namespace kube-public rc No resources found in kube-public namespace. gcloud> k get --namespace kube-system rc No resources found in kube-system namespace. gcloud> k get -h | grep namespace Prints a table of the most important information about the specified resources. You can filter the list using a label selector and the --selector flag. If the desired resource type is namespaced you will only see results in your current namespace unless you pass --all-namespaces. -A, --all-namespaces=false: If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. gcloud> k get --all-namespaces rc No resources found gcloud> k describe rc/kubia Error from server (NotFound): replicationcontrollers "kubia" not found gcloud> k describe pod/kubia ... gcloud> k describe svc/kubia-http | egrep '^(Sel|Typ)' Selector: run=kubia Type: LoadBalancer gcloud> k get rs No resources found in default namespace. gcloud> k get -A rs NAMESPACE NAME DESIRED CURRENT READY AGE kube-system event-exporter-gke-67986489c8 1 1 1 25h kube-system konnectivity-agent-54d48c955 3 3 3 25h kube-system konnectivity-agent-autoscaler-6cb774c9cc 1 1 1 25h kube-system kube-dns-autoscaler-844c9d9448 1 1 1 25h kube-system kube-dns-b4f5c58c7 2 2 2 25h kube-system l7-default-backend-56cb9644f6 1 1 1 25h kube-system metrics-server-v0.3.6-57bc866888 0 0 0 25h kube-system metrics-server-v0.3.6-886d66856 0 0 0 25h kube-system metrics-server-v0.3.6-9c5bbf784 1 1 1 25h Created a 'deployment' using app nginx-1 (from the GUI) https://console.cloud.google.com/kubernetes/deployment/europe-west1-d/kubia/default/nginx-1/overview?project=dev-copilot-329712 gcloud> k get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES kubia 1/1 Running 0 25h 10.112.2.4 gke-kubia-default-pool-df743581-2r9p nginx-1-754ddbcd6c-k8fsr 1/1 Running 0 92s 10.112.2.5 gke-kubia-default-pool-df743581-2r9p nginx-1-754ddbcd6c-qd62x 1/1 Running 0 92s 10.112.0.4 gke-kubia-default-pool-df743581-fwfr nginx-1-754ddbcd6c-rpn85 1/1 Running 0 92s 10.112.0.5 gke-kubia-default-pool-df743581-fwfr gcloud> k get pods NAME READY STATUS RESTARTS AGE kubia 1/1 Running 0 25h nginx-1-754ddbcd6c-k8fsr 1/1 Running 0 2m nginx-1-754ddbcd6c-qd62x 1/1 Running 0 2m nginx-1-754ddbcd6c-rpn85 1/1 Running 0 2m gcloud> k get nodes NAME STATUS ROLES AGE VERSION gke-kubia-default-pool-df743581-2r9p Ready 26h v1.20.10-gke.301 gke-kubia-default-pool-df743581-fwfr Ready 26h v1.20.10-gke.301 gke-kubia-default-pool-df743581-z83d Ready 26h v1.20.10-gke.301 gcloud> k get deployments NAME READY UP-TO-DATE AVAILABLE AGE nginx-1 3/3 3 3 3m8s gcloud> gcloud compute ssh gke-kubia-default-pool-df743581-2r9p ... marc@gke-kubia-default-pool-df743581-2r9p ~ $ last marc pts/0 86.42.12.211 Fri Oct 22 16:32 still logged in marc ssh 86.42.12.211 Fri Oct 22 16:32 still logged in marc pts/0 86.42.12.211 Thu Oct 21 14:26 - 14:28 (00:02) marc ssh 86.42.12.211 Thu Oct 21 14:26 - 14:28 (00:02) marc ssh 86.42.12.211 Thu Oct 21 14:26 - 14:26 (00:00) gke-bf62 ssh 34.78.120.160 Thu Oct 21 13:56 - 14:26 (00:30) reboot system boot 5.4.120+ Thu Oct 21 13:54 still running wtmp begins Tue Aug 24 05:50:54 2021 marc@gke-kubia-default-pool-df743581-2r9p ~ $ logout Connection to 34.78.92.62 closed. gcloud> gcloud compute ssh gke-kubia-default-pool-df743581-fwfr ... marc@gke-kubia-default-pool-df743581-fwfr ~ $ last marc pts/0 86.42.12.211 Fri Oct 22 16:33 still logged in marc ssh 86.42.12.211 Fri Oct 22 16:33 still logged in gke-bf62 ssh 34.78.120.160 Thu Oct 21 13:56 - 16:33 (1+02:37) reboot system boot 5.4.120+ Thu Oct 21 13:54 still running wtmp begins Tue Aug 24 05:50:54 2021 marc@gke-kubia-default-pool-df743581-fwfr ~ $ logout Connection to 34.76.93.123 closed. gcloud> k scale --current-replicas=3 --replicas=2 deployment nginx-1 deployment.apps/nginx-1 scaled gcloud> k get pods NAME READY STATUS RESTARTS AGE kubia 1/1 Running 0 26h nginx-1-754ddbcd6c-k8fsr 1/1 Running 0 29m nginx-1-754ddbcd6c-qd62x 1/1 Running 0 29m gcloud> k scale --current-replicas=2 --replicas=3 deployment nginx-1 deployment.apps/nginx-1 scaled gcloud> k get pods NAME READY STATUS RESTARTS AGE kubia 1/1 Running 0 26h nginx-1-754ddbcd6c-k8fsr 1/1 Running 0 30m nginx-1-754ddbcd6c-p96zs 1/1 Running 0 3s nginx-1-754ddbcd6c-qd62x 1/1 Running 0 30m And there is an EXPOSE button in the dashboad... for the nginx-1 workload: Exposing a deployment creates a Kubernetes Service. A service lets your deployment receive traffic and defines how your deployment is exposed. I accept the defaults: Port: 80 Service type: Load balancer Service name: nginx-1-service gcloud> k get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.116.0.1 443/TCP 27h kubia-http LoadBalancer 10.116.13.34 35.187.165.42 8080:31169/TCP 25h nginx-1-service LoadBalancer 10.116.2.138 34.140.201.240 80:31495/TCP 3m8s and the page at http://34.140.201.240/ (it is html, so... ugly with curl) Welcome to nginx! If you see this page, the nginx web server is successfully installed and working. Further configuration is required. For online documentation and support please refer to nginx.org. Commercial support is available at nginx.com. Thank you for using nginx. gcloud> k get po kubia -o yaml > kubia.yaml gcloud> wc -l kubia.yaml 156 kubia.yaml gcloud> k get po nginx-1-754ddbcd6c-k8fsr -o yaml > nginx1.yaml gcloud> k get po nginx-1-754ddbcd6c-p96zs -o yaml > nginx2.yaml gcloud> wc -l nginx*.yaml 168 nginx1.yaml 168 nginx2.yaml 336 total gcloud> diff nginx*.yaml | egrep -i 'ip|version' < k:{"ip":"10.112.2.5"}: > k:{"ip":"10.112.0.6"}: < resourceVersion: "476710" > resourceVersion: "485680" < hostIP: 10.132.0.4 > hostIP: 10.132.0.2 < podIP: 10.112.2.5 > podIP: 10.112.0.6 < - ip: 10.112.2.5 > - ip: 10.112.0.6 gcloud> k create -f kubia-manual.yaml pod/kubia-manual created gcloud> k logs kubia-manual Kubia server starting... gcloud> netstat -an | grep 8888 gcloud> k port-forward kubia-manual 8888:8080 Forwarding from 127.0.0.1:8888 -> 8080 Forwarding from [::1]:8888 -> 8080 ~> curl localhost:8888 You've hit kubia-manual ~> netstat -an | grep 8888 tcp 0 0 127.0.0.1:8888 0.0.0.0:* LISTEN tcp 0 0 127.0.0.1:43046 127.0.0.1:8888 TIME_WAIT tcp6 0 0 ::1:8888 :::* LISTEN ... Handling connection for 8888 C-c C-cgcloud> gcloud> k get po --show-labels NAME READY STATUS RESTARTS AGE LABELS kubia 1/1 Running 0 3d21h run=kubia kubia-manual 1/1 Running 0 17m nginx-1-754ddbcd6c-k8fsr 1/1 Running 0 2d19h app=nginx-1,pod-template-hash=754ddbcd6c nginx-1-754ddbcd6c-p96zs 1/1 Running 0 2d19h app=nginx-1,pod-template-hash=754ddbcd6c nginx-1-754ddbcd6c-qd62x 1/1 Running 0 2d19h app=nginx-1,pod-template-hash=754ddbcd6c gcloud> k label po kubia-manual creation_method=manual pod/kubia-manual labeled gcloud> k get po -L app,run NAME READY STATUS RESTARTS AGE APP RUN kubia 1/1 Running 0 3d23h kubia kubia-manual 1/1 Running 0 163m nginx-1-754ddbcd6c-k8fsr 1/1 Running 0 2d22h nginx-1 nginx-1-754ddbcd6c-p96zs 1/1 Running 0 2d21h nginx-1 nginx-1-754ddbcd6c-qd62x 1/1 Running 0 2d22h nginx-1 gcloud> k get po -l run NAME READY STATUS RESTARTS AGE kubia 1/1 Running 0 3d23h gcloud> k get po -l '!app' NAME READY STATUS RESTARTS AGE kubia 1/1 Running 0 3d23h kubia-manual 1/1 Running 0 164m gcloud> k get ns NAME STATUS AGE default Active 4d kube-node-lease Active 4d kube-public Active 4d kube-system Active 4d gcloud> k get po --namespace kube-system | head -5 NAME READY STATUS RESTARTS AGE event-exporter-gke-67986489c8-dp7dm 2/2 Running 0 4d fluentbit-gke-2mjv7 2/2 Running 0 4d fluentbit-gke-5xmcd 2/2 Running 0 4d fluentbit-gke-xh49j 2/2 Running 0 4d gcloud> k get po -n kube-node-lease No resources found in kube-node-lease namespace. gcloud> k get po NAME READY STATUS RESTARTS AGE kubia 1/1 Running 0 4d kubia-manual 1/1 Running 0 3h13m nginx-1-754ddbcd6c-k8fsr 1/1 Running 0 2d22h nginx-1-754ddbcd6c-p96zs 1/1 Running 0 2d22h nginx-1-754ddbcd6c-qd62x 1/1 Running 0 2d22h gcloud> k delete po --all pod "kubia" deleted pod "kubia-manual" deleted pod "nginx-1-754ddbcd6c-k8fsr" deleted pod "nginx-1-754ddbcd6c-p96zs" deleted pod "nginx-1-754ddbcd6c-qd62x" deleted gcloud> k get po NAME READY STATUS RESTARTS AGE nginx-1-754ddbcd6c-7s56c 1/1 Running 0 42s nginx-1-754ddbcd6c-l54z2 1/1 Running 0 43s nginx-1-754ddbcd6c-xwtbq 1/1 Running 0 42s gcloud> k delete all --all pod "nginx-1-754ddbcd6c-7s56c" deleted pod "nginx-1-754ddbcd6c-l54z2" deleted pod "nginx-1-754ddbcd6c-xwtbq" deleted service "kubernetes" deleted service "kubia-http" deleted service "nginx-1-service" deleted deployment.apps "nginx-1" deleted replicaset.apps "nginx-1-754ddbcd6c" deleted horizontalpodautoscaler.autoscaling "nginx-1-hpa-azsj" deleted gcloud> k get po No resources found in default namespace. gcloud> docker images REPOSITORY TAG IMAGE ID CREATED SIZE marcsf/kubia latest b46ecd7b639a 6 days ago 660MB hello-world latest feb5d9fea6a5 4 weeks ago 13.3kB busybox latest 16ea53ea7c65 6 weeks ago 1.24MB gcloud> docker login gcloud> docker run -p 8080:8080 -d luksa/kubia-unhealthy Unable to find image 'luksa/kubia-unhealthy:latest' locally latest: Pulling from luksa/kubia-unhealthy ... Status: Downloaded newer image for luksa/kubia-unhealthy:latest 4bd16cd7c07d340008293067171b14985d4e2c6550dcffe271aac8a05a5e3617 gcloud> docker images | grep luksa luksa/kubia-unhealthy latest 2b208508abf7 4 years ago 666MB gcloud> docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 4bd16cd7c07d luksa/kubia-unhealthy "node app.js" 2 minutes ago Up 2 minutes 0.0.0.0:8080->8080/tcp, :::8080->8080/tcp quirky_jackson gcloud> k run kubia-liveness --image=luksa/kubia-unhealthy --port=8080 pod/kubia-liveness created gcloud> k get po kubia-liveness NAME READY STATUS RESTARTS AGE kubia-liveness 1/1 Running 0 25s gcloud> k get po kubia-liveness -o yaml > kubia-liveness-probe.yaml But the liveness probe doesn't seem to work... gcloud> k get po kubia-liveness -o yaml | grep livenessProbe gcloud> k logs kubia-liveness Kubia server starting... gcloud> k get po kubia-liveness NAME READY STATUS RESTARTS AGE kubia-liveness 1/1 Running 0 7m23s gcloud> k get po kubia-liveness -o yaml | grep -i probe f:lastProbeTime: {} f:lastProbeTime: {} f:lastProbeTime: {} - lastProbeTime: null - lastProbeTime: null - lastProbeTime: null - lastProbeTime: null gcloud> k delete po --all pod "kubia-liveness" deleted gcloud> docker stop quirky_jackson quirky_jackson https://github.com/luksa/kubernetes-in-action/blob/master/Chapter04/kubia-unhealthy/app.js https://github.com/luksa/kubernetes-in-action/blob/master/Chapter04/kubia-rc.yaml s/luksa/marcsf/ gcloud> k create -f kubia-rc.yaml replicationcontroller/kubia created gcloud> k get rc NAME DESIRED CURRENT READY AGE kubia 3 3 2 24s gcloud> k get po NAME READY STATUS RESTARTS AGE kubia-5tz9j 1/1 Running 0 31s kubia-k2cv9 1/1 Running 0 30s kubia-qnbc6 1/1 Running 0 30s gcloud> k delete po kubia-qnbc6 pod "kubia-qnbc6" deleted But before this completed, I got: ~> k get po NAME READY STATUS RESTARTS AGE kubia-5tz9j 1/1 Running 0 2m43s kubia-82ltw 1/1 Running 0 32s kubia-k2cv9 1/1 Running 0 2m42s kubia-qnbc6 0/1 Terminating 0 2m42s gcloud> gcloud compute ssh gke-kubia-default-pool-df743581-2r9p marc@gke-kubia-default-pool-df743581-2r9p ~ $ sudo ifconfig eth0 down and from the side: ~> k get nodes NAME STATUS ROLES AGE VERSION gke-kubia-default-pool-df743581-2r9p NotReady 5d21h v1.20.10-gke.301 gke-kubia-default-pool-df743581-fwfr Ready 5d21h v1.20.10-gke.301 gke-kubia-default-pool-df743581-z83d Ready 5d21h v1.20.10-gke.301 ~> k get nodes NAME READY STATUS RESTARTS AGE kubia-5tz9j 1/1 Terminating 0 14m kubia-82ltw 1/1 Running 0 12m kubia-k2cv9 1/1 Running 0 14m kubia-w7x2d 1/1 Running 0 29s ~> k describe po kubia-82ltw | grep Controlled Controlled By: ReplicationController/kubia ~> k get nodes NAME STATUS ROLES AGE VERSION gke-kubia-default-pool-df743581-fwfr Ready 5d21h v1.20.10-gke.301 gke-kubia-default-pool-df743581-z83d Ready 5d21h v1.20.10-gke.301 gcloud> gcloud compute instances reset gke-kubia-default-pool-df743581-2r9p No zone specified. Using zone [europe-west1-d] for instance: [gke-kubia-default-pool-df743581-2r9p]. Updated [https://www.googleapis.com/compute/v1/projects/dev-copilot-329712/zones/europe-west1-d/instances/gke-kubia-default-pool-df743581-2r9p]. Updates are available for some Cloud SDK components. To install them, please run: $ gcloud components update gcloud> gcloud components update ERROR: gcloud crashed (ValueError): invalid width 0 (must be > 0) If you would like to report this issue, please run the following command: gcloud feedback To check gcloud for common problems, please run the following command: gcloud info --run-diagnostics gcloud> gcloud info --run-diagnostics Network diagnostic detects and fixes local network connection issues. Reachability Check passed. Network diagnostic passed (1/1 checks passed). Property diagnostic detects issues that may be caused by properties. Hidden Property Check passed. Property diagnostic passed (1/1 checks passed). ~> k get nodes NAME STATUS ROLES AGE VERSION gke-kubia-default-pool-df743581-2r9p Ready 4m10s v1.20.10-gke.301 gke-kubia-default-pool-df743581-fwfr Ready 5d21h v1.20.10-gke.301 gke-kubia-default-pool-df743581-z83d Ready 5d21h v1.20.10-gke.301 ~> gcloud components update To help improve the quality of this product, we collect anonymized usage data and anonymized stacktraces when crashes are encountered; additional information is available at . This data is handled in accordance with our privacy policy . You may choose to opt in this collection now (by choosing 'Y' at the below prompt), or at any time in the future by running the following command: gcloud config set disable_usage_reporting false Do you want to opt-in (y/N)? y ... Update done! To revert your SDK to the previously installed version, you may run: $ gcloud components update --version 361.0.0 gcloud> k delete rc kubia --cascade=false warning: --cascade=false is deprecated (boolean value) and can be replaced with --cascade=orphan. replicationcontroller "kubia" deleted gcloud> k get pod NAME READY STATUS RESTARTS AGE kubia-82ltw 1/1 Running 0 175m kubia-k2cv9 1/1 Running 0 177m kubia-w7x2d 1/1 Running 0 163m gcloud> k get rs No resources found in default namespace. gcloud> k create -f kubia-replicaset.yaml error: unable to recognize "kubia-replicaset.yaml": no matches for kind "ReplicaSet" in version "apps/v1beta2" Used apps/v1 instead of v1beta2: gcloud> grep apps/v kubia-replicaset.yaml apiVersion: apps/v1 gcloud> k create -f kubia-replicaset.yaml replicaset.apps/kubia created gcloud> k get rs NAME DESIRED CURRENT READY AGE kubia 3 3 3 92s gcloud> k describe po kubia-82ltw | grep Controlled Controlled By: ReplicaSet/kubia gcloud> k edit rs kubia Waiting for Emacs... error: replicasets.apps "kubia" is invalid ... # replicasets.apps "kubia" was not valid: # * : Invalid value: "The edited file failed validation": [yaml: line 15: mapping values are not allowed in this context, invalid character 'a' looking for beginning of value] https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ # replicasets.apps "kubia" was not valid: # * spec.selector: Invalid value: v1.LabelSelector{MatchLabels:map[string]string(nil), MatchExpressions:[]v1.LabelSelectorRequirement{v1.LabelSelectorRequirement{Key:"app", Operator:"In", Values:[]string{"kubia"}}}}: field is immutable Tried (but failed): selector: matchExpressions: - {key: app, operator: In, values: [kubia]} gcloud> k delete rs kubia replicaset.apps "kubia" deleted gcloud> k create -f ssd-monitor-daemonset.yaml daemonset.apps/ssd-monitor created gcloud> k get no NAME STATUS ROLES AGE VERSION gke-kubia-default-pool-df743581-2r9p Ready 3h51m v1.20.10-gke.301 gke-kubia-default-pool-df743581-fwfr Ready 6d1h v1.20.10-gke.301 gke-kubia-default-pool-df743581-z83d Ready 6d1h v1.20.10-gke.301 gcloud> k label no gke-kubia-default-pool-df743581-z83d disk=ssd node/gke-kubia-default-pool-df743581-z83d labeled gcloud> k get po NAME READY STATUS RESTARTS AGE ssd-monitor-rdgbl 1/1 Running 0 12s gcloud> k label no gke-kubia-default-pool-df743581-z83d disk=hdd --overwrite node/gke-kubia-default-pool-df743581-z83d labeled gcloud> k get po NAME READY STATUS RESTARTS AGE ssd-monitor-rdgbl 1/1 Terminating 0 116s gcloud> k get ds NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE ssd-monitor 0 0 0 0 0 disk=ssd 3m38s gcloud> k delete ds ssd-monitor daemonset.apps "ssd-monitor" deleted gcloud> k create -f kubia-svc.yaml service/kubia created gcloud> k get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.116.0.1 443/TCP 3d1h kubia ClusterIP 10.116.14.25 80/TCP 20m gcloud> k get po No resources found in default namespace. gcloud> k create -f kubia.yaml pod/kubia created gcloud> k get po NAME READY STATUS RESTARTS AGE kubia 0/1 ContainerCreating 0 5s gcloud> k exec kubia -- curl -s http://10.116.14.25 command terminated with exit code 7 gcloud> k get no NAME STATUS ROLES AGE VERSION gke-kubia-default-pool-df743581-2r9p Ready 29h v1.20.10-gke.301 gke-kubia-default-pool-df743581-fwfr Ready 7d2h v1.20.10-gke.301 gke-kubia-default-pool-df743581-z83d Ready 7d2h v1.20.10-gke.301 gcloud> k exec gke-kubia-default-pool-df743581-2r9p -- curl -s http://10.116.14.25 Error from server (NotFound): pods "gke-kubia-default-pool-df743581-2r9p" not found gcloud> gcloud container clusters create kubia --num-nodes 3 --machine-type e2-medium --zone europe-west1-d WARNING: Starting in January 2021, clusters will use the Regular release channel by default when `--cluster-version`, `--release-channel`, `--no-enable-autoupgrade`, and `--no-enable-autorepair` flags are not specified. WARNING: Currently VPC-native is the default mode during cluster creation for versions greater than 1.21.0-gke.1500. To create advanced routes based clusters, please pass the `--no-enable-ip-alias` flag WARNING: Starting with version 1.18, clusters will have shielded GKE nodes by default. WARNING: Your Pod address range (`--cluster-ipv4-cidr`) can accommodate at most 1008 node(s). WARNING: Starting with version 1.19, newly created clusters and node-pools will have COS_CONTAINERD as the default node image when no image type is specified. ERROR: (gcloud.container.clusters.create) ResponseError: code=409, message=Already exists: projects/dev-copilot-329712/zones/europe-west1-d/clusters/kubia. ~> gcloud container clusters list NAME LOCATION MASTER_VERSION MASTER_IP MACHINE_TYPE NODE_VERSION NUM_NODES STATUS kubia europe-west1-d 1.20.10-gke.301 34.78.120.160 e2-medium 1.20.10-gke.301 3 RUNNING ~> gcloud container clusters describe --zone europe-west1-d kubia | grep node nodeConfig: nodePoolDefaults: nodeConfigDefaults: {} nodePools: selfLink: https://container.googleapis.com/v1/projects/dev-copilot-329712/zones/europe-west1-d/clusters/kubia/nodePools/default-pool gcloud> k label no gke-kubia-default-pool-df743581-2r9p app=kubia node/gke-kubia-default-pool-df743581-2r9p labeled gcloud> k get po --show-labels NAME READY STATUS RESTARTS AGE LABELS kubia 1/1 Running 0 37m run=kubia gcloud> k label po kubia app=kubia pod/kubia labeled gcloud> k get po --show-labels NAME READY STATUS RESTARTS AGE LABELS kubia 1/1 Running 0 43m app=kubia,run=kubia gcloud> k exec kubia -- curl -s http://10.116.14.25 You've hit kubia gcloud> k describe svc kubia | egrep ^Sel Selector: app=kubia gcloud> k get po -L app,run NAME READY STATUS RESTARTS AGE APP RUN kubia 1/1 Running 0 48m kubia kubia gcloud> k get no -L app,run NAME STATUS ROLES AGE VERSION APP RUN gke-kubia-default-pool-df743581-2r9p Ready 30h v1.20.10-gke.301 kubia gke-kubia-default-pool-df743581-fwfr Ready 7d3h v1.20.10-gke.301 gke-kubia-default-pool-df743581-z83d Ready 7d3h v1.20.10-gke.301 gcloud> k label no gke-kubia-default-pool-df743581-fwfr app=kubia node/gke-kubia-default-pool-df743581-fwfr labeled gcloud> k label no gke-kubia-default-pool-df743581-z83d app=kubia node/gke-kubia-default-pool-df743581-z83d labeled gcloud> k get no -L app,run NAME STATUS ROLES AGE VERSION APP RUN gke-kubia-default-pool-df743581-2r9p Ready 30h v1.20.10-gke.301 kubia gke-kubia-default-pool-df743581-fwfr Ready 7d3h v1.20.10-gke.301 kubia gke-kubia-default-pool-df743581-z83d Ready 7d3h v1.20.10-gke.301 kubia gcloud> k get no -L beta.kubernetes.io/arch NAME STATUS ROLES AGE VERSION ARCH gke-kubia-default-pool-df743581-2r9p Ready 30h v1.20.10-gke.301 amd64 gke-kubia-default-pool-df743581-fwfr Ready 7d3h v1.20.10-gke.301 amd64 gke-kubia-default-pool-df743581-z83d Ready 7d3h v1.20.10-gke.301 amd64 gcloud> k exec kubia env kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin HOSTNAME=kubia NPM_CONFIG_LOGLEVEL=info NODE_VERSION=7.10.1 YARN_VERSION=0.24.4 KUBIA_PORT=tcp://10.116.14.25:80 KUBIA_PORT_80_TCP=tcp://10.116.14.25:80 KUBIA_PORT_80_TCP_ADDR=10.116.14.25 KUBERNETES_PORT_443_TCP_PROTO=tcp KUBIA_PORT_80_TCP_PROTO=tcp KUBIA_PORT_80_TCP_PORT=80 KUBERNETES_PORT_443_TCP_PORT=443 KUBERNETES_PORT_443_TCP_ADDR=10.116.0.1 KUBIA_SERVICE_HOST=10.116.14.25 KUBIA_SERVICE_PORT=80 KUBERNETES_SERVICE_HOST=10.116.0.1 KUBERNETES_SERVICE_PORT=443 KUBERNETES_PORT_443_TCP=tcp://10.116.0.1:443 KUBERNETES_SERVICE_PORT_HTTPS=443 KUBERNETES_PORT=tcp://10.116.0.1:443 HOME=/root gcloud> k exec kubia -- env | egrep ^KUBIA_SERVICE KUBIA_SERVICE_HOST=10.116.14.25 KUBIA_SERVICE_PORT=80 gcloud> k exec -it kubia -- bash root@kubia:/# curl http://kubia.default.svc.cluster.local curl http://kubia.default.svc.cluster.local You've hit kubia root@kubia:/# stty -echo stty -echo root@kubia:/# curl http://kubia.default.svc.cluster.local You've hit kubia root@kubia:/# curl http://kubia.default You've hit kubia root@kubia:/# curl http://kubia curl: (7) Failed to connect to kubia port 80: Connection refused root@kubia:/# cat /etc/resolv.conf search default.svc.cluster.local svc.cluster.local cluster.local europe-west1-d.c.dev-copilot-329712.internal c.dev-copilot-329712.internal google.internal nameserver 10.116.0.10 options ndots:5 root@kubia:/# ping kubia PING kubia (10.112.2.4): 56 data bytes 64 bytes from 10.112.2.4: icmp_seq=0 ttl=64 time=0.077 ms 64 bytes from 10.112.2.4: icmp_seq=1 ttl=64 time=0.050 ms 64 bytes from 10.112.2.4: icmp_seq=2 ttl=64 time=0.071 ms 64 bytes from 10.112.2.4: icmp_seq=3 ttl=64 time=0.047 ms 64 bytes from 10.112.2.4: icmp_seq=4 ttl=64 time=0.048 ms C-c C-c64 bytes from 10.112.2.4: icmp_seq=5 ttl=64 time=0.060 ms --- kubia ping statistics --- 6 packets transmitted, 6 packets received, 0% packet loss round-trip min/avg/max/stddev = 0.047/0.059/0.077/0.000 ms gcloud> k get ep NAME ENDPOINTS AGE kubernetes 34.78.120.160:443 3d23h kubia 10.112.2.4:8080 22h gcloud> k create -f external-service.yaml service/external-service created gcloud> k get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE external-service ClusterIP 10.116.2.100 80/TCP 19s kubernetes ClusterIP 10.116.0.1 443/TCP 3d23h kubia ClusterIP 10.116.14.25 80/TCP 22h gcloud> k create -f external-service-endpoints.yaml endpoints/external-service created gcloud> k get ep NAME ENDPOINTS AGE external-service 11.11.11.11:80,22.22.22.22:80 65s kubernetes 34.78.120.160:443 3d23h kubia 10.112.2.4:8080 22h gcloud> k create -f external-service-externalname.yaml Error from server (AlreadyExists): error when creating "external-service-externalname.yaml": services "external-service" already exists gcloud> k create -f kubia-svc-nodeport.yaml service/kubia-nodeport created gcloud> k get svc kubia-nodeport NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubia-nodeport NodePort 10.116.9.43 80:30123/TCP 57s p 136 expected instead of gcloud> gcloud compute firewall-rules create kubia-svc-rule --allow=tcp:30123 Creating firewall...⠹Created [https://www.googleapis.com/compute/v1/projects/dev-copilot-329712/global/firewalls/kubia-svc-rule]. Creating firewall...done. NAME NETWORK DIRECTION PRIORITY ALLOW DENY DISABLED kubia-svc-rule default INGRESS 1000 tcp:30123 False gcloud> k get no -o jsonpath='{.items[*].status.addresses[?(@.type=="ExternalIP")].address}'; echo 34.140.243.120 34.76.93.123 34.77.252.162 gcloud> curl http://34.140.243.120:30123 You've hit kubia gcloud> k create -f kubia-svc-loadbalancer.yaml service/kubia-loadbalancer created gcloud> k get svc kubia-loadbalancer NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubia-loadbalancer LoadBalancer 10.116.9.215 35.205.211.203 80:30827/TCP 70s gcloud> curl http://35.205.211.203 You've hit kubia Both work also from the Mac in Salesforce VPN. gcloud> k describe svc kubia-loadbalancer | grep -i affinity Session Affinity: None gcloud> k logs --since=3600s --timestamps=true kubia 2021-10-29T17:01:32.579308630Z Received request from ::ffff:10.112.2.1 2021-10-29T17:06:04.645556516Z Received request from ::ffff:10.112.2.1 2021-10-29T17:11:42.831280743Z Received request from ::ffff:10.132.0.3 2021-10-29T17:12:29.337716021Z Received request from ::ffff:10.112.2.1 2021-10-29T17:13:51.471301169Z Received request from ::ffff:10.132.0.3 2021-10-29T17:13:51.710613617Z Received request from ::ffff:10.132.0.3 gcloud> k describe svc kubia-loadbalancer | grep Port Port: 80/TCP TargetPort: 8080/TCP NodePort: 30827/TCP gcloud> k create -f kubia-ingress.yaml Warning: extensions/v1beta1 Ingress is deprecated in v1.14+, unavailable in v1.22+; use networking.k8s.io/v1 Ingress ingress.extensions/kubia created gcloud> k get po --all-namespaces | grep ingress gcloud> k get ingresses NAME CLASS HOSTS ADDRESS PORTS AGE kubia kubia.example.com 34.107.136.15 80 2m11s gcloud> grep kubia /etc/hosts 34.107.136.15 kubia.example.com gcloud> curl http://kubia.example.com You've hit kubia gcloud> openssl genrsa -out tls.key 2048 Generating RSA private key, 2048 bit long modulus (2 primes) ..........................................+++++ .........................+++++ e is 65537 (0x010001) gcloud> openssl req -new -x509 -key tls.key -out tls.cert -days 360 -subj /CN=kubia.example.com Can't load /home/marc/.rnd into RNG 140249527476672:error:2406F079:random number generator:RAND_load_file:Cannot open file:../crypto/rand/randfile.c:88:Filename=/home/marc/.rnd gcloud> grep RANDFILE /etc/ssl/openssl.cnf RANDFILE = $ENV::HOME/.rnd RANDFILE = $dir/private/.rand # private random number file Commented away... https://stackoverflow.com/questions/63893662/cant-load-root-rnd-into-rng gcloud> grep RANDFILE /etc/ssl/openssl.cnf #RANDFILE = $ENV::HOME/.rnd RANDFILE = $dir/private/.rand # private random number file gcloud> openssl req -new -x509 -key tls.key -out tls.cert -days 360 -subj /CN=kubia.example.com gcloud> ll ~/.rnd -rw------- 1 marc marc 1024 Oct 30 09:39 /home/marc/.rnd Restored... gcloud> grep RANDFILE /etc/ssl/openssl.cnf RANDFILE = $ENV::HOME/.rnd RANDFILE = $dir/private/.rand # private random number file gcloud> k create secret tls tls-secret --cert=tls.cert --key=tls.key secret/tls-secret created gcloud> k apply -f kubia-ingress-tls.yaml Warning: extensions/v1beta1 Ingress is deprecated in v1.14+, unavailable in v1.22+; use networking.k8s.io/v1 Ingress Warning: resource ingresses/kubia is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. ingress.extensions/kubia configured gcloud> k get ingresses NAME CLASS HOSTS ADDRESS PORTS AGE kubia kubia.example.com 34.107.136.15 80, 443 28m gcloud> curl https://kubia.example.com curl: (35) OpenSSL SSL_connect: SSL_ERROR_SYSCALL in connection to kubia.example.com:443 gcloud> k edit rc kubia Error from server (NotFound): replicationcontrollers "kubia" not found gcloud> k create -f kubia-rc.yaml replicationcontroller/kubia created gcloud> k edit rc kubia Waiting for Emacs... error: replicationcontrollers "kubia" is invalid # replicationcontrollers "kubia" was not valid: # * : Invalid value: "The edited file failed validation": [yaml: line 25: found character that cannot start any token, invalid character 'a' looking for beginning of value] I added: readynessProbe: exec: command: - ls - /var/ready Without the added comments with the error, line 25 is the second 'spec:', but indeed, in the book, 'containers:' is followed with '-name: kubia', and not in the image!? I force the book layout ('name' before 'image', followed with 'readynessProbe'), but I get now: # * : Invalid value: "The edited file failed validation": [yaml: line 26: found character that cannot start any token, invalid character 'a' looking for beginning of value] https://github.com/moraes/config/issues/1 OK, this was probably a TAB... Now: # * : Invalid value: "The edited file failed validation": ValidationError(ReplicationController.spec.template.spec.containers[0]): unknown field "readynessProbe" in io.k8s.api.core.v1.Container Typo! readinessProbe ... replicationcontroller/kubia edited gcloud> k get po NAME READY STATUS RESTARTS AGE kubia 1/1 Running 0 2d23h kubia-6h9pd 1/1 Running 0 16m kubia-dxx42 1/1 Running 0 16m gcloud> k delete po kubia pod "kubia" deleted gcloud> k get po NAME READY STATUS RESTARTS AGE kubia-6h9pd 1/1 Running 0 18m kubia-dxx42 1/1 Running 0 18m kubia-phx8p 0/1 Running 0 43s gcloud> k delete po kubia-6h9pd pod "kubia-6h9pd" deleted gcloud> k delete po kubia-dxx42 pod "kubia-dxx42" deleted gcloud> k get po NAME READY STATUS RESTARTS AGE kubia-5gg7x 0/1 Running 0 71s kubia-phx8p 0/1 Running 0 2m18s kubia-xxjxf 0/1 Running 0 32s gcloud> k exec kubia-5gg7x -- touch /var/ready gcloud> k get po NAME READY STATUS RESTARTS AGE kubia-5gg7x 1/1 Running 0 2m55s kubia-phx8p 0/1 Running 0 4m2s kubia-xxjxf 0/1 Running 0 2m16s gcloud> k describe po kubia-5gg7x | grep -i readiness Readiness: exec [ls /var/ready] delay=0s timeout=1s period=10s #success=1 #failure=3 Readiness Gates: Normal LoadBalancerNegNotReady 5m15s neg-readiness-reflector Waiting for pod to become healthy in at least one of the NEG(s): [k8s1-105246cb-default-kubia-nodeport-80-b56c78cc] Normal LoadBalancerNegReady 5m8s neg-readiness-reflector Pod has become Healthy in NEG "Key{\"k8s1-105246cb-default-kubia-nodeport-80-b56c78cc\", zone: \"europe-west1-d\"}" attached to BackendService "Key{\"k8s1-105246cb-default-kubia-nodeport-80-b56c78cc\"}". Marking condition "cloud.google.com/load-balancer-neg-ready" to True. Warning Unhealthy 2m51s (x15 over 5m11s) kubelet Readiness probe failed: ls: cannot access /var/ready: No such file or directory gcloud> k describe po kubia-phx8p | grep -i readiness Readiness: exec [ls /var/ready] delay=0s timeout=1s period=10s #success=1 #failure=3 Readiness Gates: Normal LoadBalancerNegNotReady 7m7s (x2 over 7m7s) neg-readiness-reflector Waiting for pod to become healthy in at least one of the NEG(s): [k8s1-105246cb-default-kubia-nodeport-80-b56c78cc] Normal LoadBalancerNegReady 7m neg-readiness-reflector Pod has become Healthy in NEG "Key{\"k8s1-105246cb-default-kubia-nodeport-80-b56c78cc\", zone: \"europe-west1-d\"}" attached to BackendService "Key{\"k8s1-105246cb-default-kubia-nodeport-80-b56c78cc\"}". Marking condition "cloud.google.com/load-balancer-neg-ready" to True. Warning Unhealthy 119s (x31 over 6m59s) kubelet Readiness probe failed: ls: cannot access /var/ready: No such file or directory gcloud> k exec kubia-phx8p -- touch /var/ready gcloud> k describe po kubia-phx8p | grep -i readiness Readiness: exec [ls /var/ready] delay=0s timeout=1s period=10s #success=1 #failure=3 Readiness Gates: Normal LoadBalancerNegNotReady 8m22s (x2 over 8m22s) neg-readiness-reflector Waiting for pod to become healthy in at least one of the NEG(s): [k8s1-105246cb-default-kubia-nodeport-80-b56c78cc] Normal LoadBalancerNegReady 8m15s neg-readiness-reflector Pod has become Healthy in NEG "Key{\"k8s1-105246cb-default-kubia-nodeport-80-b56c78cc\", zone: \"europe-west1-d\"}" attached to BackendService "Key{\"k8s1-105246cb-default-kubia-nodeport-80-b56c78cc\"}". Marking condition "cloud.google.com/load-balancer-neg-ready" to True. Warning Unhealthy 3m14s (x31 over 8m14s) kubelet Readiness probe failed: ls: cannot access /var/ready: No such file or directory gcloud> k get po NAME READY STATUS RESTARTS AGE kubia-5gg7x 1/1 Running 0 7m17s kubia-phx8p 0/1 Running 0 8m24s kubia-xxjxf 0/1 Running 0 6m38s gcloud> k get endpoints kubia-loadbalancer NAME ENDPOINTS AGE kubia-loadbalancer 10.112.0.13:8080,10.112.2.6:8080 47h gcloud> k get po NAME READY STATUS RESTARTS AGE kubia-5gg7x 1/1 Running 0 11m kubia-phx8p 1/1 Running 0 12m kubia-xxjxf 0/1 Running 0 11m gcloud> k describe po kubia-phx8p | grep -i readiness Readiness: exec [ls /var/ready] delay=0s timeout=1s period=10s #success=1 #failure=3 Readiness Gates: Normal LoadBalancerNegNotReady 14m (x2 over 14m) neg-readiness-reflector Waiting for pod to become healthy in at least one of the NEG(s): [k8s1-105246cb-default-kubia-nodeport-80-b56c78cc] Normal LoadBalancerNegReady 13m neg-readiness-reflector Pod has become Healthy in NEG "Key{\"k8s1-105246cb-default-kubia-nodeport-80-b56c78cc\", zone: \"europe-west1-d\"}" attached to BackendService "Key{\"k8s1-105246cb-default-kubia-nodeport-80-b56c78cc\"}". Marking condition "cloud.google.com/load-balancer-neg-ready" to True. Warning Unhealthy 8m52s (x31 over 13m) kubelet Readiness probe failed: ls: cannot access /var/ready: No such file or directory gcloud> curl http://kubia.example.com You've hit kubia-xxjxf gcloud> curl http://10.112.0.13:8080 curl: (7) Failed to connect to 10.112.0.13 port 8080: Connection timed out gcloud> curl http://10.112.2.6:8080 curl: (7) Failed to connect to 10.112.2.6 port 8080: Connection timed out ??? gcloud> k create -f kubia-svc-headless.yaml service/kubia-headless created gcloud> k get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE external-service ClusterIP 10.116.2.100 80/TCP 2d2h kubernetes ClusterIP 10.116.0.1 443/TCP 6d1h kubia ClusterIP 10.116.14.25 80/TCP 3d kubia-headless ClusterIP None 80/TCP 60s kubia-loadbalancer LoadBalancer 10.116.9.215 35.205.211.203 80:30827/TCP 47h kubia-nodeport NodePort 10.116.9.43 80:30123/TCP 2d gcloud> k get svc kubia-headless NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubia-headless ClusterIP None 80/TCP 9m18s gcloud> k run dnsutils --image=tutum/dnsutils --generator=run-pod/v1 --command -- sleep infinity Flag --generator has been deprecated, has no effect and will be removed in the future. pod/dnsutils created gcloud> k exec dnsutils -- nslookup kubia-headless Server: 10.116.0.10 Address: 10.116.0.10#53 Name: kubia-headless.default.svc.cluster.local Address: 10.112.2.6 Name: kubia-headless.default.svc.cluster.local Address: 10.112.0.13 gcloud> k get po -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES dnsutils 1/1 Running 0 3m56s 10.112.0.14 gke-kubia-default-pool-df743581-fwfr kubia-5gg7x 1/1 Running 0 35m 10.112.2.6 gke-kubia-default-pool-df743581-2r9p 1/1 kubia-phx8p 1/1 Running 0 37m 10.112.0.13 gke-kubia-default-pool-df743581-fwfr 1/1 kubia-xxjxf 0/1 Running 0 35m 10.112.2.7 gke-kubia-default-pool-df743581-2r9p 1/1 gcloud> k exec dnsutils -- nslookup kubia Server: 10.116.0.10 Address: 10.116.0.10#53 Name: kubia.default.svc.cluster.local Address: 10.116.14.25 gcloud> cd ~/tmp/docker/fortune fortune> docker build -t marcfs/fortune . fortune> docker push marcfs/fortune Using default tag: latest The push refers to repository [docker.io/marcfs/fortune] a5b2854347d7: Preparing f58de555d8e7: Preparing 9f54eef41275: Preparing denied: requested access to the resource is denied From the web ui, generated an access token, and used it with: fortune> docker login -u marcsf Password: fortune> docker push marcsf/fortune Using default tag: latest The push refers to repository [docker.io/marcsf/fortune] An image does not exist locally with the tag: marcsf/fortune fortune> docker build -t marcfs/fortune . Sending build context to Docker daemon 4.096kB Step 1/4 : FROM ubuntu:latest ---> ba6acccedd29 Step 2/4 : RUN apt-get update ; apt-get -y install fortune ---> Using cache ---> d19e455e6fd7 Step 3/4 : ADD fortuneloop.sh /bin/fortuneloop.sh ---> Using cache ---> a6390192b2ea Step 4/4 : ENTRYPOINT /bin/fortuneloop.sh ---> Using cache ---> c30b02bd45f3 Successfully built c30b02bd45f3 Successfully tagged marcfs/fortune:latest fortune> docker push marcsf/fortune Using default tag: latest The push refers to repository [docker.io/marcsf/fortune] An image does not exist locally with the tag: marcsf/fortune fortune> docker pull marcsf/fortune Using default tag: latest Error response from daemon: manifest for marcsf/fortune:latest not found: manifest unknown: manifest unknown f https://forums.docker.com/t/can-not-push-image-to-dockerhub/92507/7 Created from the mac, and pulled with: fortune> docker pull marcsf/fortune:latest gcloud> k create -f fortune-pod.yaml pod/fortune created gcloud> k get po fortune NAME READY STATUS RESTARTS AGE fortune 1/2 CrashLoopBackOff 3 100s gcloud> k port-forward fortune 8080:80 Forwarding from 127.0.0.1:8080 -> 80 Forwarding from [::1]:8080 -> 80 Handling connection for 8080 Handling connection for 8080 Killed it and tried: gcloud> k create -f fortune-svc.yaml service/fortune created kubia-nodeport NodePort 10.116.9.43 80:30123/TCP 4d gcloud> k get svc fortune NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE fortune ClusterIP 10.116.0.131 80/TCP 41s fortune> curl http://localhost:8080 curl: (7) Failed to connect to localhost port 8080: Connection refused Trying myself: gcloud> k get no -L run,app NAME STATUS ROLES AGE VERSION RUN APP gke-kubia-default-pool-df743581-2r9p Ready 6d6h v1.20.10-gke.301 kubia gke-kubia-default-pool-df743581-fwfr Ready 12d v1.20.10-gke.301 kubia gke-kubia-default-pool-df743581-z83d Ready 12d v1.20.10-gke.301 kubia gcloud> k label po fortune run=fortune pod/fortune labeled gcloud> k label no gke-kubia-default-pool-df743581-2r9p run=fortune node/gke-kubia-default-pool-df743581-2r9p labeled gcloud> k get po --show-labels fortune NAME READY STATUS RESTARTS AGE LABELS fortune 1/2 CrashLoopBackOff 9 24m run=fortune gcloud> k get no -L run,app NAME STATUS ROLES AGE VERSION RUN APP gke-kubia-default-pool-df743581-2r9p Ready 6d6h v1.20.10-gke.301 fortune kubia gke-kubia-default-pool-df743581-fwfr Ready 12d v1.20.10-gke.301 kubia gke-kubia-default-pool-df743581-z83d Ready 12d v1.20.10-gke.301 kubia No result. gcloud> k logs --since=3600s --timestamps=true fortune html-generator 2021-11-03T10:03:02.390228681Z /bin/sh: 1: /bin/fortuneloop.sh: Permission denied Added to Dockerfile: RUN chmod +x /bin/fortuneloop.sh Rebuilt, re-pushed (from the mac) gcloud> k logs --since=60s --timestamps=true fortune html-generator 2021-11-03T10:15:28.750054587Z Wed Nov 3 10:15:28 UTC 2021 Writing fortune to /var/htdocs/index.html 2021-11-03T10:15:38.757167897Z Wed Nov 3 10:15:38 UTC 2021 Writing fortune to /var/htdocs/index.html 2021-11-03T10:15:48.764758104Z Wed Nov 3 10:15:48 UTC 2021 Writing fortune to /var/htdocs/index.html 2021-11-03T10:15:58.776995011Z Wed Nov 3 10:15:58 UTC 2021 Writing fortune to /var/htdocs/index.html 2021-11-03T10:16:08.783098509Z Wed Nov 3 10:16:08 UTC 2021 Writing fortune to /var/htdocs/index.html 2021-11-03T10:16:18.790840131Z Wed Nov 3 10:16:18 UTC 2021 Writing fortune to /var/htdocs/index.html gcloud> k get po --show-labels fortune NAME READY STATUS RESTARTS AGE LABELS fortune 2/2 Running 205 17h run=fortune gcloud> k port-forward fortune 8080:80 Forwarding from 127.0.0.1:8080 -> 80 Forwarding from [::1]:8080 -> 80 Handling connection for 8080 fortune> curl http://localhost:8080 Q: How many Marxists does it take to screw in a light bulb? A: None: The light bulb contains the seeds of its own revolution. gcloud> k create -f gitrepo-volume-pod.yaml pod/gitrepo-volume-pod created gcloud> k get po gitrepo-volume-pod NAME READY STATUS RESTARTS AGE gitrepo-volume-pod 1/1 Running 0 20s gcloud> k port-forward gitrepo-volume-pod 8080:80 Forwarding from 127.0.0.1:8080 -> 80 Forwarding from [::1]:8080 -> 80 Handling connection for 8080 fortune> curl http://localhost:8080 Hello there. gcloud> k delete po gitrepo-volume-pod pod "gitrepo-volume-pod" deleted gcloud> k create -f gitrepo-volume-pod.yaml pod/gitrepo-volume-pod created gcloud> k port-forward gitrepo-volume-pod 8080:80 Forwarding from 127.0.0.1:8080 -> 80 Forwarding from [::1]:8080 -> 80 Handling connection for 8080 fortune> curl http://localhost:8080 Life is hard and then you die. Added a second container: - image: marcsf/pullgitrepo name: pullgitrepo volumeMounts: - name: html mountPath: . gcloud> k delete po gitrepo-volume-pod pod "gitrepo-volume-pod" deleted gcloud> k create -f gitrepo-volume-pod.yaml pod/gitrepo-volume-pod created gcloud> k logs --since=60s --timestamps=true gitrepo-volume-pod pullgitrepo gcloud> k get po gitrepo-volume-pod NAME READY STATUS RESTARTS AGE gitrepo-volume-pod 1/2 CrashLoopBackOff 6 9m2s OK. I didn't read the hints about sidecar containers... gcloud> docker pull bitnami/git Using default tag: latest latest: Pulling from bitnami/git Digest: sha256:639baf2acaa6388a9239b76c1a6acd32be47f957708fb2a8b8eea4b99f2f8775 Status: Downloaded newer image for bitnami/git:latest docker.io/bitnami/git:latest gcloud> k delete po gitrepo-volume-pod pod "gitrepo-volume-pod" deleted 6.3.2, p 170 gcloud> k get po --namespace kube-system | wc -l 23 gcloud> k get po --namespace kube-system | grep fluent fluentbit-gke-2mjv7 2/2 Running 0 13d fluentbit-gke-vsq4r 2/2 Running 2 7d22h fluentbit-gke-xh49j 2/2 Running 0 13d gcloud> k describe po fluentbit-gke-2mjv7 --namespace kube-system | perl -nle 'next unless $ok or /^Vol/; if(/^Vol/){$ok=1; next};if($ok){if(/^ /){print}else{close ARGV}}' varrun: Type: HostPath (bare host directory volume) Path: /var/run/google-fluentbit/pos-files HostPathType: varlog: Type: HostPath (bare host directory volume) Path: /var/log HostPathType: varlibkubeletpods: Type: HostPath (bare host directory volume) Path: /var/lib/kubelet/pods HostPathType: varlibdockercontainers: Type: HostPath (bare host directory volume) Path: /var/lib/docker/containers HostPathType: config-volume: Type: ConfigMap (a volume populated by a ConfigMap) Name: fluentbit-gke-config-v1.0.6 Optional: false fluentbit-gke-token-lr67w: Type: Secret (a volume populated by a Secret) SecretName: fluentbit-gke-token-lr67w Optional: false gcloud> sudo ls -l /var/lib/docker/containers total 24 drwx--x--- 4 root root 4096 Nov 2 16:30 2b2ee55616da75fe3fb8b8ccd86fb0ca588bf5a6be1ae0475d8882b5b2ddd8d2 drwx--x--- 4 root root 4096 Nov 2 16:30 415b20cd37732e7320edd458ec1d5bbe2d82d78e3b49f72b870d0187026b6390 drwx--x--- 4 root root 4096 Nov 2 16:30 4bd16cd7c07d340008293067171b14985d4e2c6550dcffe271aac8a05a5e3617 drwx--x--- 4 root root 4096 Nov 2 16:30 8a76391447808018c04dc345b99c71a36fa3f590ee90fa425bfb21a4a1bdd367 drwx--x--- 4 root root 4096 Nov 2 16:30 c6eabb3f778106a35fb517d30e7af2bf1c52235f092ce252a3e9574869a26f99 drwx--x--- 4 root root 4096 Nov 2 16:30 cf4e21d06e9c40130cdb83ec76e73fbdacc2fbfe7aedc9ac2c29ecadde8d1db7 gcloud> gcloud container clusters list NAME LOCATION MASTER_VERSION MASTER_IP MACHINE_TYPE NODE_VERSION NUM_NODES STATUS kubia europe-west1-d 1.20.10-gke.301 34.78.120.160 e2-medium 1.20.10-gke.301 3 RUNNING gcloud> gcloud compute disks create --size=10GiB --zone=europe-west1-d mongodb WARNING: You have selected a disk size of under [200GB]. This may result in poor I/O performance. For more information, see: https://developers.google.com/compute/docs/disks#performance. Created [https://www.googleapis.com/compute/v1/projects/dev-copilot-329712/zones/europe-west1-d/disks/mongodb]. NAME ZONE SIZE_GB TYPE STATUS mongodb europe-west1-d 10 pd-standard READY New disks are unformatted. You must format and mount a disk before it can be used. You can find instructions on how to do this at: https://cloud.google.com/compute/docs/disks/add-persistent-disk#formatting fortune> gcloud compute ssh gke-kubia-default-pool-df743581-2r9p marc@gke-kubia-default-pool-df743581-2r9p ~ $ sudo lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sda 8:0 0 100G 0 disk |-sda1 8:1 0 95.9G 0 part /mnt/stateful_partition |-sda2 8:2 0 16M 0 part |-sda3 8:3 0 2G 0 part | `-vroot 253:0 0 2G 1 dm / |-sda4 8:4 0 16M 0 part |-sda5 8:5 0 2G 0 part |-sda6 8:6 0 512B 0 part |-sda7 8:7 0 512B 0 part |-sda8 8:8 0 16M 0 part /usr/share/oem |-sda9 8:9 0 512B 0 part |-sda10 8:10 0 512B 0 part |-sda11 8:11 0 8M 0 part `-sda12 8:12 0 32M 0 part Not there... But I can see it from: https://console.cloud.google.com/compute/disks?project=dev-copilot-329712 gcloud> gcloud container clusters list NAME LOCATION MASTER_VERSION MASTER_IP MACHINE_TYPE NODE_VERSION NUM_NODES STATUS kubia europe-west1-d 1.20.10-gke.301 34.78.120.160 e2-medium 1.20.10-gke.301 3 RUNNING gcloud> gcloud compute instances list NAME ZONE MACHINE_TYPE PREEMPTIBLE INTERNAL_IP EXTERNAL_IP STATUS gke-kubia-default-pool-df743581-2r9p europe-west1-d e2-medium 10.132.0.5 34.140.243.120 RUNNING gke-kubia-default-pool-df743581-fwfr europe-west1-d e2-medium 10.132.0.2 34.76.93.123 RUNNING gke-kubia-default-pool-df743581-z83d europe-west1-d e2-medium 10.132.0.3 34.77.252.162 RUNNING gcloud> gcloud compute instances attach-disk gke-kubia-default-pool-df743581-2r9p --disk mongodb No zone specified. Using zone [europe-west1-d] for instance: [gke-kubia-default-pool-df743581-2r9p]. Updated [https://www.googleapis.com/compute/v1/projects/dev-copilot-329712/zones/europe-west1-d/instances/gke-kubia-default-pool-df743581-2r9p]. and now indeed: marc@gke-kubia-default-pool-df743581-2r9p ~ $ sudo lsblk | egrep ^sdb sdb 8:16 0 10G 0 disk marc@gke-kubia-default-pool-df743581-2r9p ~ $ sudo mkfs.ext4 -m 0 -E lazy_itable_init=0,lazy_journal_init=0,discard /dev/sdb mke2fs 1.46.2 (28-Feb-2021) Discarding device blocks: done Creating filesystem with 2621440 4k blocks and 655360 inodes Filesystem UUID: 36d82c6a-d456-409f-9526-629c889b9a6e Superblock backups stored on blocks: 32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632 Allocating group tables: done Writing inode tables: done Creating journal (16384 blocks): done Writing superblocks and filesystem accounting information: done $ sudo mkdir -p /mnt/disks/mongodb $ sudo mount -o discard,defaults /dev/sdb /mnt/disks/mongodb $ sudo chmod a+w /mnt/disks/mongodb $ sudo mkdir -p /data/db mkdir: cannot create directory ‘/data’: Read-only file system Asked in mongodb chat: I am reading Kubernetes in Action (v1), by Marco Luksa. In 6.4.1, he creates a mongodb volume in a GCE context. But I cannot create the /data/db mount point -- can I use /mnt/disks/mongodb ? Replaced /data/db with /mnt/disks/mongodb as mountPoint in the yaml. gcloud> k create -f mongodb-pod-gcepd.yaml pod/mongodb created gcloud> k exec -it mongodb -- mongo error: unable to upgrade connection: container not found ("mongodb") gcloud> k get po mongodb NAME READY STATUS RESTARTS AGE mongodb 0/1 ContainerCreating 0 5m22s gcloud> k logs --since=60s --timestamps=true mongodb Error from server (BadRequest): container "mongodb" in pod "mongodb" is waiting to start: ContainerCreating Found that somebody uses a command such as: sudo docker run -it -v /mnt/mongo:/data/db --name mongodb e6fa3383f923 Tried to append :/data/db to the mount point... gcloud> k delete po mongodb pod "mongodb" deleted gcloud> k create -f mongodb-pod-gcepd.yaml pod/mongodb created gcloud> k logs --since=60s --timestamps=true mongodb Error from server (BadRequest): container "mongodb" in pod "mongodb" is waiting to start: ContainerCreating gcloud> k get po mongodb NAME READY STATUS RESTARTS AGE mongodb 0/1 ContainerCreating 0 23s $ mount | grep mongo /dev/sdb on /mnt/disks/mongodb type ext4 (rw,relatime,discard) $ tail -3 /var/log/fluentbit.log [2021/11/04 18:04:23] [ info] [output:http:http.0] 127.0.0.1:2021, HTTP status=200 [2021/11/04 18:04:23] [ info] [output:http:http.0] 127.0.0.1:2021, HTTP status=200 [2021/11/04 18:04:23] [ info] [output:http:http.0] 127.0.0.1:2021, HTTP status=200 gcloud> k get po mongodb -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES mongodb 0/1 ContainerCreating 0 5m53s gke-kubia-default-pool-df743581-fwfr fortune> gcloud compute ssh gke-kubia-default-pool-df743581-fwfr gcloud> gcloud compute instances attach-disk gke-kubia-default-pool-df743581-fwfr --disk mongodb No zone specified. Using zone [europe-west1-d] for instance: [gke-kubia-default-pool-df743581-fwfr]. ERROR: (gcloud.compute.instances.attach-disk) Could not fetch resource: - The disk resource 'projects/dev-copilot-329712/zones/europe-west1-d/disks/mongodb' is already being used by 'projects/dev-copilot-329712/zones/europe-west1-d/instances/gke-kubia-default-pool-df743581-2r9p' gcloud> gcloud compute instances detach-disk gke-kubia-default-pool-df743581-2r9p --disk mongodb No zone specified. Using zone [europe-west1-d] for instance: [gke-kubia-default-pool-df743581-2r9p]. Updated [https://www.googleapis.com/compute/v1/projects/dev-copilot-329712/zones/europe-west1-d/instances/gke-kubia-default-pool-df743581-2r9p]. gcloud> gcloud compute instances attach-disk gke-kubia-default-pool-df743581-fwfr --disk mongodb No zone specified. Using zone [europe-west1-d] for instance: [gke-kubia-default-pool-df743581-fwfr]. Updated [https://www.googleapis.com/compute/v1/projects/dev-copilot-329712/zones/europe-west1-d/instances/gke-kubia-default-pool-df743581-fwfr]. $ sudo lsblk | egrep ^sdb sdb 8:16 0 10G 0 disk $ sudo mkdir -p /data/db mkdir: cannot create directory ‘/data’: Read-only file system $ sudo mkdir -p /mnt/disks/mongodb $ sudo mount -o discard,defaults /dev/sdb /mnt/disks/mongodb $ sudo chmod a+w /mnt/disks/mongodb $ alias a=alias $ a ll='ls -latr' $ ll /mnt/disks/mongodb total 20 drwx------ 2 root root 16384 Nov 4 10:41 lost+found drwxrwxrwx 3 root root 4096 Nov 4 10:41 . drwxr-xr-x 3 root root 60 Nov 4 18:14 .. gcloud> k get po mongodb -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES mongodb 0/1 ContainerCreating 0 14m gke-kubia-default-pool-df743581-fwfr gcloud> k logs --since=60s --timestamps=true mongodb Error from server (BadRequest): container "mongodb" in pod "mongodb" is waiting to start: ContainerCreating gcloud> k exec -it mongodb -- mongo error: unable to upgrade connection: container not found ("mongodb") $ tail -3 /var/log/fluentbit.log [2021/11/04 18:17:25] [ info] [output:http:http.0] 127.0.0.1:2021, HTTP status=200 [2021/11/04 18:17:35] [ info] [output:http:http.0] 127.0.0.1:2021, HTTP status=200 [2021/11/04 18:17:40] [ info] [output:http:http.0] 127.0.0.1:2021, HTTP status=200 https://www.mongodb.com/community/forums/t/reading-kubernetes-in-action-by-marco-luksa-mount-point-woes/130766 6.5.2 gcloud> k get po mongodb NAME READY STATUS RESTARTS AGE mongodb 0/1 ContainerCreating 0 19h gcloud> k get pv No resources found gcloud> k create -f mongodb-pv-gcepd.yaml persistentvolume/mongodb-pv created gcloud> k get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE mongodb-pv 1Gi RWO,ROX Retain Available 64s gcloud> k delete po mongodb pod "mongodb" deleted gcloud> k create -f mongodb-pvc.yaml persistentvolumeclaim/mongodb-pvc created gcloud> k get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE mongodb-pvc Bound mongodb-pv 1Gi RWO,ROX 42s gcloud> k get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE mongodb-pv 1Gi RWO,ROX Retain Bound default/mongodb-pvc 10m gcloud> k create -f mongodb-pod-pvc.yaml pod/mongodb created gcloud> k get po mongodb NAME READY STATUS RESTARTS AGE mongodb 0/1 ContainerCreating 0 60s gcloud> k logs --since=60s --timestamps=true mongodb Error from server (BadRequest): container "mongodb" in pod "mongodb" is waiting to start: ContainerCreating gcloud> k delete po mongodb pod "mongodb" deleted gcloud> k delete pvc mongodb-pvc persistentvolumeclaim "mongodb-pvc" deleted gcloud> k get pvc No resources found in default namespace. gcloud> k get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE mongodb-pv 1Gi RWO,ROX Retain Released default/mongodb-pvc 15m gcloud> k get sc NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE premium-rwo pd.csi.storage.gke.io Delete WaitForFirstConsumer true 15d standard (default) kubernetes.io/gce-pd Delete Immediate true 15d standard-rwo pd.csi.storage.gke.io Delete WaitForFirstConsumer true 15d gcloud> k create -f storageclass-fast-gcepd.yaml storageclass.storage.k8s.io/fast created gcloud> k get sc NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE fast kubernetes.io/gce-pd Delete Immediate false 7s premium-rwo pd.csi.storage.gke.io Delete WaitForFirstConsumer true 15d standard (default) kubernetes.io/gce-pd Delete Immediate true 15d standard-rwo pd.csi.storage.gke.io Delete WaitForFirstConsumer true 15d gcloud> k create -f mongodb-pvc-dp.yaml persistentvolumeclaim/mongodb-pvc created gcloud> k get pvc mongodb-pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE mongodb-pvc Bound pvc-908fbd0e-c0b6-46c9-85d3-b0a78f21f250 1Gi RWO fast 92s gcloud> k get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE mongodb-pv 1Gi RWO,ROX Retain Released default/mongodb-pvc 51m pvc-908fbd0e-c0b6-46c9-85d3-b0a78f21f250 1Gi RWO Delete Bound default/mongodb-pvc fast 2m10s gcloud> gcloud compute disks list NAME LOCATION LOCATION_SCOPE SIZE_GB TYPE STATUS gke-kubia-bf626f1a-dyn-pvc-908fbd0e-c0b6-46c9-85d3-b0a78f21f250 europe-west1-d zone 1 pd-ssd READY gke-kubia-default-pool-df743581-2r9p europe-west1-d zone 100 pd-standard READY gke-kubia-default-pool-df743581-fwfr europe-west1-d zone 100 pd-standard READY gke-kubia-default-pool-df743581-z83d europe-west1-d zone 100 pd-standard READY mongodb europe-west1-d zone 10 pd-standard READY gcloud> k create -f mongodb-pvc-dp-nostorageclass.yaml persistentvolumeclaim/mongodb-pvc2 created gcloud> k get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE mongodb-pvc Bound pvc-908fbd0e-c0b6-46c9-85d3-b0a78f21f250 1Gi RWO fast 17m mongodb-pvc2 Bound pvc-9913cc69-3634-4d95-84d6-5f0924c3d8cf 1Gi RWO standard 30s gcloud> k get pv pvc-9913cc69-3634-4d95-84d6-5f0924c3d8cf NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE pvc-9913cc69-3634-4d95-84d6-5f0924c3d8cf 1Gi RWO Delete Bound default/mongodb-pvc2 standard 103s gcloud> gcloud compute disks list | grep pvc gke-kubia-bf626f1a-dyn-pvc-908fbd0e-c0b6-46c9-85d3-b0a78f21f250 europe-west1-d zone 1 pd-ssd READY gke-kubia-bf626f1a-dyn-pvc-9913cc69-3634-4d95-84d6-5f0924c3d8cf europe-west1-d zone 1 pd-standard READY gcloud> gcloud compute instances detach-disk gke-kubia-default-pool-df743581-fwfr --disk mongodb No zone specified. Using zone [europe-west1-d] for instance: [gke-kubia-default-pool-df743581-fwfr]. Updated [https://www.googleapis.com/compute/v1/projects/dev-copilot-329712/zones/europe-west1-d/instances/gke-kubia-default-pool-df743581-fwfr]. gcloud> gcloud compute disks delete mongodb --zone=europe-west1-d The following disks will be deleted: - [mongodb] in [europe-west1-d] Do you want to continue (Y/n)? Deleted [https://www.googleapis.com/compute/v1/projects/dev-copilot-329712/zones/europe-west1-d/disks/mongodb]. 7.2.1, p 194 -- Nov 8 fortune> docker build -t docker.io/marcsf/fortune:args . Sending build context to Docker daemon 5.12kB Step 1/6 : FROM ubuntu:latest ---> ba6acccedd29 Step 2/6 : RUN apt-get update ; apt-get -y install fortune ---> Using cache ---> d19e455e6fd7 Step 3/6 : ADD fortuneloop.sh /bin/fortuneloop.sh ---> 66177d83497f Step 4/6 : RUN chmod +x /bin/fortuneloop.sh ---> Running in 0596e6394054 Removing intermediate container 0596e6394054 ---> db3746bb4a12 Step 5/6 : ENTRYPOINT ["/bin/fortuneloop.sh"] ---> Running in ce29dfa9b294 Removing intermediate container ce29dfa9b294 ---> c1959c331c03 Step 6/6 : CMD ["10"] ---> Running in 6386dce92ee3 Removing intermediate container 6386dce92ee3 ---> 944e9bc91139 Successfully built 944e9bc91139 Successfully tagged marcsf/fortune:args fortune> docker push docker.io/marcsf/fortune:args The push refers to repository [docker.io/marcsf/fortune] 78b75b5028f7: Pushed f58de555d8e7: Layer already exists 9f54eef41275: Layer already exists args: digest: sha256:b9f2193e1972f13f9533d9e36c59d79097a121df9d416be50b895ed97e4314fc size: 1155 fortune> docker run -it docker.io/marcsf/fortune:args Configured to generate new fortune every 10 seconds Mon Nov 8 10:51:07 UTC 2021 Writing fortune to /var/htdocs/index.html Mon Nov 8 10:51:17 UTC 2021 Writing fortune to /var/htdocs/index.html fortune> docker container ls CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES bc74b2a730e3 marcsf/fortune:args "/bin/fortuneloop.sh…" 54 seconds ago Up 53 seconds pedantic_sutherland fortune> docker exec bc74b2a730e3 ps x PID TTY STAT TIME COMMAND 1 pts/0 Ss+ 0:00 /bin/bash /bin/fortuneloop.sh 10 40 pts/0 S+ 0:00 sleep 10 41 ? Rs 0:00 ps x fortune> docker exec bc74b2a730e3 cat /var/htdocs/index.html As to the Adjective: when in doubt, strike it out. -- Mark Twain, "Pudd'nhead Wilson's Calendar" gcloud> k create -f fortune-pod-args.yaml pod/fortune2s created gcloud> k get po fortune2s NAME READY STATUS RESTARTS AGE fortune2s 2/2 Running 0 40s gcloud> k port-forward fortune2s 8080:80 Forwarding from 127.0.0.1:8080 -> 80 Forwarding from [::1]:8080 -> 80 Handling connection for 8080 fortune> curl http://localhost:8080 Extreme fear can neither fight nor fly. -- William Shakespeare, "The Rape of Lucrece" gcloud> k create -f fortune-pod-env.yaml pod/fortune3s created gcloud> k get po fortune3s NAME READY STATUS RESTARTS AGE fortune3s 2/2 Running 0 12s gcloud> k logs --since=60s --timestamps=true fortune3s html-generator 2021-11-08T12:03:07.728801939Z Mon Nov 8 12:03:07 UTC 2021 Writing fortune to /var/htdocs/index.html 2021-11-08T12:03:37.736133640Z Mon Nov 8 12:03:37 UTC 2021 Writing fortune to /var/htdocs/index.html gcloud> k delete po fortune2s pod "fortune2s" deleted gcloud> k create configmap fortune-config --from-literal=sleep-interval=25 configmap/fortuner-config created gcloud> k get cm NAME DATA AGE fortune-config 1 15s kube-root-ca.crt 1 18d gcloud> k describe cm fortune-config Name: fortune-config Namespace: default Labels: Annotations: Data ==== sleep-interval: ---- 25 Events: gcloud> k get cm fortune-config -o yaml apiVersion: v1 data: sleep-interval: "25" kind: ConfigMap metadata: creationTimestamp: "2021-11-08T14:14:00Z" managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:data: .: {} f:sleep-interval: {} manager: kubectl-create operation: Update time: "2021-11-08T14:14:00Z" name: fortune-config namespace: default resourceVersion: "7766229" uid: 225ced15-3d7a-4128-871d-6ae62e226d03 gcloud> k create -f fortune-pod-env-configmap.yaml pod/fortune-env-from-configmap created gcloud> k logs --since=60s --timestamps=true fortune-env-from-configmap html-generator 2021-11-08T14:34:54.479426675Z Mon Nov 8 14:34:54 UTC 2021 Writing fortune to /var/htdocs/index.html 2021-11-08T14:35:19.486813144Z Mon Nov 8 14:35:19 UTC 2021 Writing fortune to /var/htdocs/index.html gcloud> k delete cm fortune-config configmap "fortune-config" deleted gcloud> mkdir configmap-files gcloud> echo 25 > configmap-files/sleep-interval gcloud> ll configmap-files/ total 16 drwxrwxr-x 2 marc marc 4096 Nov 8 14:51 . drwxrwxr-x 4 marc marc 4096 Nov 8 14:50 .. -rw-rw-r-- 1 marc marc 242 Nov 8 14:47 my-nginx-config.conf -rw-rw-r-- 1 marc marc 3 Nov 8 14:51 sleep-interval gcloud> k create configmap fortune-config --from-file=configmap-files configmap/fortune-config created gcloud> k create -f fortune-pod-configmap-volume.yaml pod/fortune-configmap-volume created fortune> curl -H "Accept-Encoding: gzip" -I localhost:8080 HTTP/1.1 200 OK Server: nginx/1.21.3 Date: Mon, 08 Nov 2021 15:02:33 GMT Content-Type: text/html Last-Modified: Mon, 08 Nov 2021 15:02:12 GMT Connection: keep-alive ETag: W/"61893bf4-34" Content-Encoding: gzip fortune> k exec fortune-configmap-volume -c web-server -- ls /etc/nginx/conf.d my-nginx-config.conf sleep-interval gcloud> k edit cm fortune-config Waiting for Emacs... configmap/fortune-config edited fortune> k exec fortune-configmap-volume -c web-server -- cat /etc/nginx/conf.d/my-nginx-config.conf server { listen 80; server_name www.kubia-example.com; gzip off; gzip_types text/plain application/xml; location / { root /usr/share/nginx/html; index index.html index.htm; } } gcloud> k exec fortune-configmap-volume -c web-server -- nginx -s reload 2021/11/08 16:55:56 [notice] 52#52: signal process started gcloud> k port-forward fortune-configmap-volume 8080:80 Forwarding from 127.0.0.1:8080 -> 80 Forwarding from [::1]:8080 -> 80 Handling connection for 8080 fortune>curl -H "Accept-Encoding: gzip" -I localhost:8080f HTTP/1.1 200 OK Server: nginx/1.21.3 Date: Mon, 08 Nov 2021 16:56:20 GMT Content-Type: text/html Content-Length: 57 Last-Modified: Mon, 08 Nov 2021 16:56:00 GMT Connection: keep-alive ETag: "618956a0-39" Accept-Ranges: bytes 7.5 p 214, Nov 9 gcloud> k get secrets NAME TYPE DATA AGE default-token-28fph kubernetes.io/service-account-token 3 18d tls-secret kubernetes.io/tls 2 10d gcloud> openssl genrsa -out https.key 2048 Generating RSA private key, 2048 bit long modulus (2 primes) ............+++++ ................................................................................................+++++ e is 65537 (0x010001) gcloud> openssl req -new -x509 -key https.key -out https.cert -days 3650 -subj /CN=www.kubia-example.com gcloud> ll https.* -rw-rw-r-- 1 marc marc 1147 Nov 9 11:06 https.cert -rw------- 1 marc marc 1675 Nov 9 11:05 https.key gcloud> echo bar > foo gcloud> k create secret generic fortune-https --from-file=https.key --from-file=https.cert --from-file=foo secret/fortune-https created gcloud> k get secrets fortune-https NAME TYPE DATA AGE fortune-https Opaque 3 61s gcloud> k edit cm fortune-config Waiting for Emacs... configmap/fortune-config edited gcloud> k create -f fortune-pod-https.yaml pod/fortune-https created gcloud> k port-forward fortune-https 8443:443 & [1] 27462 gcloud> Forwarding from 127.0.0.1:8443 -> 443 Forwarding from [::1]:8443 -> 443 gcloud> curl https://localhost:8443 -k Handling connection for 8443 He jests at scars who never felt a wound. -- Shakespeare, "Romeo and Juliet, II. 2" gcloud> curl https://localhost:8443 -k -v ... * Connected to localhost (127.0.0.1) port 8443 (#0) ... * SSL connection using TLSv1.2 / ECDHE-RSA-AES256-GCM-SHA384 * ALPN, server accepted to use http/1.1 * Server certificate: * subject: CN=www.kubia-example.com * start date: Nov 9 11:06:43 2021 GMT * expire date: Nov 7 11:06:43 2031 GMT * issuer: CN=www.kubia-example.com * SSL certificate verify result: self signed certificate (18), continuing anyway. ... gcloud> k exec fortune-https -c web-server -- mount | grep certs tmpfs on /etc/nginx/certs type tmpfs (ro,relatime) 8.1.2 p 227, Nov 10 gcloud> k create -f downward-api-env.yaml pod/downward created gcloud> k exec downward -- env PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin HOSTNAME=downward POD_NAME=downward POD_NAMESPACE=default POD_IP=10.112.3.10 NODE_NAME=gke-kubia-default-pool-df743581-mmpz SERVICE_ACCOUNT=default CONTAINER_CPU_REQUEST_MILLICORES=15 CONTAINER_MEMORY_LIMIT_KIBIBYTES=4096 KUBIA_SERVICE_PORT=80 FORTUNE_PORT_80_TCP_ADDR=10.116.0.131 KUBIA_PORT_80_TCP=tcp://10.116.14.25:80 ... [53 lines total] KUBERNETES_SERVICE_HOST=10.116.0.1 EXTERNAL_SERVICE_PORT_80_TCP_PORT=80 EXTERNAL_SERVICE_PORT_80_TCP_ADDR=10.116.2.100 HOME=/root gcloud> k delete pod downward pod "downward" deleted gcloud> k create -f downward-api-volume.yaml pod/downward created gcloud> k exec downward -- ls -l /etc/downward total 0 lrwxrwxrwx 1 root root 18 Nov 10 08:46 annotations -> ..data/annotations lrwxrwxrwx 1 root root 36 Nov 10 08:46 containerCpuRequestMilliCores -> ..data/containerCpuRequestMilliCores lrwxrwxrwx 1 root root 32 Nov 10 08:46 containerMemoryLimitBytes -> ..data/containerMemoryLimitBytes lrwxrwxrwx 1 root root 13 Nov 10 08:46 labels -> ..data/labels lrwxrwxrwx 1 root root 14 Nov 10 08:46 podName -> ..data/podName lrwxrwxrwx 1 root root 19 Nov 10 08:46 podNamespace -> ..data/podNamespace gcloud> k exec downward -- cat /etc/downward/labels; echo foo="bar" gcloud> k exec downward -- cat /etc/downward/annotations; echo key1="value1" key2="multi\nline\nvalue\n" kubernetes.io/config.seen="2021-11-10T08:46:17.787804245Z" kubernetes.io/config.source="api" gcloud> k cluster-info Kubernetes control plane is running at https://34.78.120.160 GLBCDefaultBackend is running at https://34.78.120.160/api/v1/namespaces/kube-system/services/default-http-backend:http/proxy KubeDNS is running at https://34.78.120.160/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy Metrics-server is running at https://34.78.120.160/api/v1/namespaces/kube-system/services/https:metrics-server:/proxy To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. gcloud> curl https://34.78.120.160 -k { "kind": "Status", "apiVersion": "v1", "metadata": { }, "status": "Failure", "message": "forbidden: User \"system:anonymous\" cannot get path \"/\"", "reason": "Forbidden", "details": { }, "code": 403 } gcloud> k cluster-info dump > clinfo.dump gcloud> wc clinfo.dump 289381 1225449 13540565 clinfo.dump gcloud> k proxy Starting to serve on 127.0.0.1:8001 [ does not return until killed ] gcloud> curl localhost:8001 | wc -l % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 6515 0 6515 0 0 181k 0 --:--:-- --:--:-- --:--:-- 181k 158 gcloud> curl localhost:8001 | head % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 10{ 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 "paths": [ "/.well-known/openid-configuration", "/api", 6 "/api/v1", 5 "/apis", 1 "/apis/", 5 "/apis/admissionregistration.k8s.io", "/apis/admissionregistration.k8s.io/v1", "/apis/admissionregistration.k8s.io/v1beta1", 0 6515 0 0 198k 0 --:--:-- --:--:-- --:--:-- 198k (23) Failed writing body gcloud> curl localhost:8001 | grep /apis/batch/ % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 6515 "/apis/batch/v1",0 0 0 --:--:-- --:--:-- --:--:-- 0 "/apis/batch/v1beta1", 0 6515 0 0 192k 0 --:--:-- --:--:-- --:--:-- 192k gcloud> curl localhost:8001 2>/dev/null | grep /apis/batch "/apis/batch", "/apis/batch/v1", "/apis/batch/v1beta1", gcloud> curl localhost:8001 2>/dev/null | head { "paths": [ "/.well-known/openid-configuration", "/api", "/api/v1", "/apis", "/apis/", "/apis/admissionregistration.k8s.io", "/apis/admissionregistration.k8s.io/v1", "/apis/admissionregistration.k8s.io/v1beta1", gcloud> curl localhost:8001/apis/batch { "kind": "APIGroup", "apiVersion": "v1", "name": "batch", "versions": [ { "groupVersion": "batch/v1", "version": "v1" }, { "groupVersion": "batch/v1beta1", "version": "v1beta1" } ], "preferredVersion": { "groupVersion": "batch/v1", "version": "v1" } } gcloud> curl localhost:8001/apis/batch/v1 { "kind": "APIResourceList", "apiVersion": "v1", "groupVersion": "batch/v1", "resources": [ { "name": "jobs", "singularName": "", "namespaced": true, "kind": "Job", "verbs": [ "create", "delete", "deletecollection", "get", "list", "patch", "update", "watch" ], "categories": [ "all" ], "storageVersionHash": "mudhfqk/qZY=" }, { "name": "jobs/status", "singularName": "", "namespaced": true, "kind": "Job", "verbs": [ "get", "patch", "update" ] } ] } gcloud> curl localhost:8001/apis/batch/v1/jobs { "kind": "JobList", "apiVersion": "batch/v1", "metadata": { "resourceVersion": "8542960" }, "items": [] } gcloud> k create -f my-job.yaml job.batch/my-job created gcloud> curl localhost:8001/apis/batch/v1/jobs { "kind": "JobList", "apiVersion": "batch/v1", "metadata": { "resourceVersion": "8543913" }, "items": [ { "metadata": { "name": "my-job", "namespace": "default", "uid": "cf43cead-3117-4fb9-9335-07e091b9ef3a", ... gcloud> curl localhost:8001/apis/batch/v1/jobs 2>/dev/null | wc -l 94 gcloud> curl localhost:8001/apis/batch/v1/namespaces/default/jobs/my-job 2>/dev/null | wc -l 87 gcloud> curl localhost:8001/apis/batch/v1/jobs 2>/dev/null | tail "lastTransitionTime": "2021-11-10T09:30:27Z" } ], "startTime": "2021-11-10T09:28:21Z", "completionTime": "2021-11-10T09:30:27Z", "succeeded": 1 } } ] }gcloud> curl localhost:8001/apis/batch/v1/namespaces/default/jobs/my-job 2>/dev/null | tail -5 "startTime": "2021-11-10T09:28:21Z", "completionTime": "2021-11-10T09:30:27Z", "succeeded": 1 } } gcloud> curl localhost:8001/apis/batch/v1/namespaces/default/jobs/my-job 2>/dev/null | head -5 { "kind": "Job", "apiVersion": "batch/v1", "metadata": { "name": "my-job", gcloud> k get job my-job -o json | wc -l 138 gcloud> k get job my-job -o json | head -5 { "apiVersion": "batch/v1", "kind": "Job", "metadata": { "creationTimestamp": "2021-11-10T09:28:21Z", gcloud> k get job my-job -o json | tail -5 ], "startTime": "2021-11-10T09:28:21Z", "succeeded": 1 } } gcloud> docker pull tutum/curl Using default tag: latest Error response from daemon: pull access denied for tutum/curl, repository does not exist or may require 'docker login': denied: requested access to the resource is denied gcloud> docker pull curlimages/curl Using default tag: latest latest: Pulling from curlimages/curl Digest: sha256:d588ff348c251f8e4d1b2053125c34d719a98ff3ef20895c49684b3743995073 Status: Downloaded newer image for curlimages/curl:latest docker.io/curlimages/curl:latest gcloud> k get po curl NAME READY STATUS RESTARTS AGE curl 1/1 Running 0 104s gcloud> k logs --since=60s --timestamps=true curl main gcloud> k exec -it curl -- bash error: Internal error occurred: error executing command in container: failed to exec in container: failed to start exec "277a53cb651696c7d9fc4c465a9986d03a4b254cf227a8b945484dcbd33c1db6": OCI runtime exec failed: exec failed: container_linux.go:380: starting container process caused: exec: "bash": executable file not found in $PATH: unknown gcloud> k delete po curl pod "curl" deleted gcloud> mkdir ~/tmp/docker/curl gcloud> cd ~/tmp/docker/curl/ curl> cp /bin/bash . curl> cp /usr/bin/curl . curl> docker build -t docker.io/marcsf/curl . Sending build context to Docker daemon 1.34MB Step 1/4 : FROM ubuntu:latest ---> ba6acccedd29 Step 2/4 : ADD curl /bin/curl ---> 34f61754b7a6 Step 3/4 : ADD bash /bin/bash ---> 015c11306e10 Step 4/4 : ENTRYPOINT /bin/bash ---> Running in caef44a3bb59 Removing intermediate container caef44a3bb59 ---> b04583de25fd Successfully built b04583de25fd Successfully tagged marcsf/curl:latest curl> docker push docker.io/marcsf/curl Using default tag: latest The push refers to repository [docker.io/marcsf/curl] latest: digest: sha256:b22bd396ff5754552873aad6de0fc61e63bc250a03995d1b2ea2b6acf3a1f0e6 size: 949 curl> rm bash curl curl> cd - /home/marc/tmp/gcloud gcloud> k create -f curl.yaml pod/curl created gcloud> k exec -it curl -- bash error: unable to upgrade connection: container not found ("main") gcloud> k get po curl NAME READY STATUS RESTARTS AGE curl 0/1 ErrImagePull 0 115s gcloud> k logs --since=60s --timestamps=true curl main Error from server (BadRequest): container "main" in pod "curl" is waiting to start: trying and failing to pull image gcloud> ldd /usr/bin/curl | wc -l 43 gcloud> ldd /bin/bash | wc -l 5 gcloud> docker rmi marcsf/curl Untagged: marcsf/curl:latest Untagged: marcsf/curl@sha256:b22bd396ff5754552873aad6de0fc61e63bc250a03995d1b2ea2b6acf3a1f0e6 Deleted: sha256:b04583de25fda3e19c1a2b0f1fcd14ab4bd496e992fc9382140134581e5946b5 Deleted: sha256:015c11306e1048c63d696030d7fd4eb3b37fa857a80a67688241335e444ad35b Deleted: sha256:7687941dfca0009006852dec7c7a93bf56c6313db2764295b9b5bf0fe2fb52df Deleted: sha256:34f61754b7a64f529cbf0f4bf5c0dcfccbfed3ce8f60b8ddf15c5bb783afd353 Deleted: sha256:d6a9efac320cd2f114c1d623b5732513ae2dc2d69cd3d7f96c988c7b1d056ffe gcloud> docker rmi curlimages/curl Cannot find an image with bash and curl!? I keep getting: gcloud> k exec -it curl -- bash error: Internal error occurred: error executing command in container: failed to exec in container: failed to start exec "aa405a7ab5144fdac76dfb3493fc891089d6ab398787f35175841398816a3b53": OCI runtime exec failed: exec failed: container_linux.go:380: starting container process caused: exec: "bash": executable file not found in $PATH: unknown curl> cat Dockerfile FROM ubuntu:latest RUN apt-get update ; apt-get -y install curl RUN apt-get update ; apt-get -y install bash ENTRYPOINT /bin/bash curl> docker build -t docker.io/marcsf/curl . ... Successfully built d9638e68aa11 Successfully tagged marcsf/curl:latest curl> docker push docker.io/marcsf/curl Using default tag: latest The push refers to repository [docker.io/marcsf/curl] 1ce85c6c8a42: Pushed 32aa7d9b9914: Pushed 9f54eef41275: Layer already exists latest: digest: sha256:3b2b95dd6ef8b0bb6fcb93f020ab47f2c0d104be45cd5e3c2d4cb42e36ebfd18 size: 951 gcloud> k create -f curl.yaml pod/curl created gcloud> k logs --since=60s --timestamps=true curl main gcloud> k get po curl NAME READY STATUS RESTARTS AGE curl 1/1 Running 0 29s gcloud> k logs --since=60s --timestamps=true curl main gcloud> k get svc | egrep ^kubernetes kubernetes ClusterIP 10.116.0.1 443/TCP 15d gcloud> k exec -it curl -- bash root@curl:/# env | grep KUBERNETES_SERVICE KUBERNETES_SERVICE_PORT_HTTPS=443 KUBERNETES_SERVICE_PORT=443 KUBERNETES_SERVICE_HOST=10.116.0.1 root@curl:/# curl https://kubernetes curl: (60) SSL certificate problem: unable to get local issuer certificate More details here: https://curl.haxx.se/docs/sslcerts.html curl failed to verify the legitimacy of the server and therefore could not establish a secure connection to it. To learn more about this situation and how to fix it, please visit the web page mentioned above. root@curl:/# ls /var/run/secrets/kubernetes.io/serviceaccount/ ca.crt namespace token root@curl:/# curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt https://kubernetes { "kind": "Status", "apiVersion": "v1", "metadata": { }, "status": "Failure", "message": "forbidden: User \"system:anonymous\" cannot get path \"/\"", "reason": "Forbidden", "details": { }, "code": 403 root@curl:/# export CURL_CA_BUNDLE=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt root@curl:/# TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) root@curl:/# curl -H "Authorization: Bearer $TOKEN" https://kubernetes { "kind": "Status", "apiVersion": "v1", "metadata": { }, "status": "Failure", "message": "forbidden: User \"system:serviceaccount:default:default\" cannot get path \"/\"", "reason": "Forbidden", "details": { }, "code": 403 https://medium.com/@antoine_martin/kubernetes-access-the-api-inside-a-pod-eb49af8c8b06 gcloud> k create role pod-reader --verb=get --verb=list --verb=watch --resource=pods,services,deployments role.rbac.authorization.k8s.io/pod-reader created gcloud> k create rolebinding default-pod-reader --role=pod-reader --serviceaccount=default:default --namespace=default rolebinding.rbac.authorization.k8s.io/default-pod-reader created root@curl:/# exit gcloud> k exec -it curl -- bash root@curl:/# TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) root@curl:/# export CURL_CA_BUNDLE=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt root@curl:/# NS=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace) root@curl:/# curl -H "Authorization: Bearer $TOKEN" https://kubernetes/api/v1/namespaces/$NS/pods { "kind": "PodList", "apiVersion": "v1", "metadata": { "resourceVersion": "8577388" }, "items": [ ... root@curl:/# curl -H "Authorization: Bearer $TOKEN" https://kubernetes/api/v1/namespaces/$NS/pods 2>/dev/null | wc -l 2001 root@curl:/# kapi() { curl -s -H "Authorization: Bearer $TOKEN" https://kubernetes/api/v1/namespaces/$NS/$1 ;} root@curl:/# kapi pods/`hostname` | wc -l 146 But p 242: gcloud> k create clusterrolebinding permissive-binding --clusterrole=cluster-admin --group=system:serviceaccounts clusterrolebinding.rbac.authorization.k8s.io/permissive-binding created root@curl:/# curl -s -H "Authorization: Bearer $TOKEN" https://kubernetes | wc -l 158 root@curl:/# curl -s -H "Authorization: Bearer $TOKEN" https://kubernetes | head { "paths": [ "/.well-known/openid-configuration", "/api", "/api/v1", "/apis", "/apis/", "/apis/admissionregistration.k8s.io", "/apis/admissionregistration.k8s.io/v1", "/apis/admissionregistration.k8s.io/v1beta1", (23) Failed writing body gcloud> k get clusterrolebinding permissive-binding NAME ROLE AGE permissive-binding ClusterRole/cluster-admin 27m gcloud> k get rolebinding NAME ROLE AGE default-pod-reader Role/pod-reader 51m gcloud> k get role NAME CREATED AT pod-reader 2021-11-10T11:12:34Z -------------------------------- November 23 ~> cd tmp/docker/kubia-pet-image/ kubia-pet-image> docker build -t docker.io/marcsf/kubia-pet . Sending build context to Docker daemon 3.584kB Step 1/3 : FROM node:7 ---> d9aed20b68a4 Step 2/3 : ADD app.js /app.js ---> 6dfa703ef39a Step 3/3 : ENTRYPOINT ["node", "app.js"] ---> Running in ca5f7f98208d Removing intermediate container ca5f7f98208d ---> ce04606760db Successfully built ce04606760db Successfully tagged marcsf/kubia-pet:latest kubia-pet-image> docker push docker.io/marcsf/kubia-pet Using default tag: latest The push refers to repository [docker.io/marcsf/kubia-pet] c8686c0b0ac8: Pushed ab90d83fa34a: Mounted from marcsf/kubia 8ee318e54723: Mounted from marcsf/kubia e6695624484e: Mounted from marcsf/kubia da59b99bbd3b: Mounted from marcsf/kubia 5616a6292c16: Mounted from marcsf/kubia f3ed6cb59ab0: Mounted from marcsf/kubia 654f45ecb7e3: Mounted from marcsf/kubia 2c40c66f7667: Mounted from marcsf/kubia latest: digest: sha256:3586508c8e1d9b5df20816a743755f40e710dafb772f524ea057c6780dfa7367 size: 2213 gcloud> gcloud components update Beginning update. This process may take several minutes. Your current Cloud SDK version is: 363.0.0 You will be upgraded to version: 365.0.1 ┌─────────────────────────────────────────────────────────┐ │ These components will be updated. │ ├─────────────────────────────────┬────────────┬──────────┤ │ Name │ Version │ Size │ ├─────────────────────────────────┼────────────┼──────────┤ │ BigQuery Command Line Tool │ 2.0.71 │ < 1 MiB │ │ Cloud SDK Core Libraries │ 2021.11.19 │ 21.2 MiB │ │ Cloud Storage Command Line Tool │ 5.5 │ 8.1 MiB │ │ gcloud cli dependencies │ 2021.11.05 │ 11.1 MiB │ └─────────────────────────────────┴────────────┴──────────┘ ... Update done! To revert your SDK to the previously installed version, you may run: $ gcloud components update --version 363.0.0 10.3.2 p 291 I don't know whether 'my cluster' supports 'dynamic provisioning', but I'll hope it does, because it was tricky to create the mongo-db disk, and I failed to mount it, etc... gcloud> k create -f persistent-volumes-gcepd.yaml Error from server (Forbidden): persistentvolumes "pv-a" is forbidden: error querying GCE PD volume pv-a: disk is not found Error from server (Forbidden): persistentvolumes "pv-b" is forbidden: error querying GCE PD volume pv-b: disk is not found Error from server (Forbidden): persistentvolumes "pv-c" is forbidden: error querying GCE PD volume pv-c: disk is not found OK, maybe it does not... gcloud> gcloud compute disks create --size=10GiB --zone=europe-west1-d pv-a WARNING: You have selected a disk size of under [200GB]. This may result in poor I/O performance. For more information, see: https://developers.google.com/compute/docs/disks#performance. Created [https://www.googleapis.com/compute/v1/projects/dev-copilot-329712/zones/europe-west1-d/disks/pv-a]. NAME ZONE SIZE_GB TYPE STATUS pv-a europe-west1-d 10 pd-standard READY New disks are unformatted. You must format and mount a disk before it can be used. You can find instructions on how to do this at: https://cloud.google.com/compute/docs/disks/add-persistent-disk#formatting gcloud> gcloud compute disks create --size=10GiB --zone=europe-west1-d pv-b gcloud> gcloud compute disks create --size=10GiB --zone=europe-west1-d pv-c gcloud> gcloud compute disks list | egrep ^pv- pv-a europe-west1-d zone 10 pd-standard READY pv-b europe-west1-d zone 10 pd-standard READY pv-c europe-west1-d zone 10 pd-standard READY gcloud> gcloud compute instances list NAME ZONE MACHINE_TYPE PREEMPTIBLE INTERNAL_IP EXTERNAL_IP STATUS gke-kubia-default-pool-df743581-c0lb europe-west1-d e2-medium 10.132.0.8 34.140.243.120 RUNNING gke-kubia-default-pool-df743581-mmpz europe-west1-d e2-medium 10.132.0.6 34.140.104.98 RUNNING gke-kubia-default-pool-df743581-o6pi europe-west1-d e2-medium 10.132.0.7 34.77.223.125 RUNNING As last time, I can see the 3 disks from https://console.cloud.google.com/compute/disks?project=dev-copilot-329712 gcloud> gcloud compute instances attach-disk gke-kubia-default-pool-df743581-c0lb --disk pv-a No zone specified. Using zone [europe-west1-d] for instance: [gke-kubia-default-pool-df743581-c0lb]. Updated [https://www.googleapis.com/compute/v1/projects/dev-copilot-329712/zones/europe-west1-d/instances/gke-kubia-default-pool-df743581-c0lb]. gcloud> gcloud compute instances attach-disk gke-kubia-default-pool-df743581-mmpz --disk pv-b --zone europe-west1-d Updated [https://www.googleapis.com/compute/v1/projects/dev-copilot-329712/zones/europe-west1-d/instances/gke-kubia-default-pool-df743581-mmpz]. gcloud> gcloud compute instances attach-disk gke-kubia-default-pool-df743581-o6pi --disk pv-c --zone europe-west1-d Updated [https://www.googleapis.com/compute/v1/projects/dev-copilot-329712/zones/europe-west1-d/instances/gke-kubia-default-pool-df743581-o6pi]. kubia-pet-image> gcloud beta compute ssh --zone "europe-west1-d" "gke-kubia-default-pool-df743581-o6pi" --tunnel-through-iap --project "dev-copilot-329712" You do not currently have this command group installed. Using it requires the installation of components: [beta] ... marc@gke-kubia-default-pool-df743581-o6pi ~ $ sudo lsblk | egrep ^sdb sdb 8:16 0 10G 0 disk marc@gke-kubia-default-pool-df743581-o6pi ~ $ sudo mkfs.ext4 -m 0 -E lazy_itable_init=0,lazy_journal_init=0,discard /dev/sdb ... marc@gke-kubia-default-pool-df743581-o6pi ~ $ sudo mkdir -p /mnt/disks/pv-c marc@gke-kubia-default-pool-df743581-o6pi ~ $ sudo mount -o discard,defaults /dev/sdb /mnt/disks/pv-c Note: I skip for now the instruction for: 'Configuring automatic mounting on VM restart' kubia-pet-image> gcloud beta compute ssh --zone "europe-west1-d" "gke-kubia-default-pool-df743581-c0lb" --tunnel-through-iap --project "dev-copilot-329712" marc@gke-kubia-default-pool-df743581-c0lb ~ $ sudo lsblk | egrep ^sdb sdb 8:16 0 10G 0 disk marc@gke-kubia-default-pool-df743581-c0lb ~ $ sudo mkfs.ext4 -m 0 -E lazy_itable_init=0,lazy_journal_init=0,discard /dev/sdb marc@gke-kubia-default-pool-df743581-c0lb ~ $ sudo mkdir -p /mnt/disks/pv-a marc@gke-kubia-default-pool-df743581-c0lb ~ $ sudo mount -o discard,defaults /dev/sdb /mnt/disks/pv-a kubia-pet-image> gcloud beta compute ssh --zone "europe-west1-d" "gke-kubia-default-pool-df743581-mmpz" --tunnel-through-iap --project "dev-copilot-329712" marc@gke-kubia-default-pool-df743581-mmpz ~ $ sudo lsblk | egrep ^sdb sdb 8:16 0 10G 0 disk marc@gke-kubia-default-pool-df743581-mmpz ~ $ sudo mkfs.ext4 -m 0 -E lazy_itable_init=0,lazy_journal_init=0,discard /dev/sdb marc@gke-kubia-default-pool-df743581-mmpz ~ $ sudo mkdir -p /mnt/disks/pv-b marc@gke-kubia-default-pool-df743581-mmpz ~ $ sudo mount -o discard,defaults /dev/sdb /mnt/disks/pv-b gcloud> k create -f persistent-volumes-gcepd.yaml persistentvolume/pv-a created persistentvolume/pv-b created persistentvolume/pv-c created gcloud> k get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE external-service ClusterIP 10.116.2.100 80/TCP 25d fortune ClusterIP 10.116.0.131 80/TCP 21d kubernetes ClusterIP 10.116.0.1 443/TCP 29d kubia LoadBalancer 10.116.12.105 34.140.4.154 80:31017/TCP 8d kubia-headless ClusterIP None 80/TCP 23d kubia-loadbalancer LoadBalancer 10.116.9.215 35.205.211.203 80:30827/TCP 25d kubia-nodeport NodePort 10.116.9.43 80:30123/TCP 25d gcloud> k delete svc kubia service "kubia" deleted gcloud> k create -f kubia-service-headless.yaml service/kubia created gcloud> k create -f kubia-statefulset.yaml statefulset.apps/kubia created gcloud> k get po | egrep ^kubia- kubia-0 1/1 Running 0 2m22s kubia-1 1/1 Running 0 2m1s kubia-c79c89cc4-5db9x 1/1 Running 0 8d kubia-c79c89cc4-jsshr 1/1 Running 0 8d kubia-c79c89cc4-kcdr4 1/1 Running 0 8d gcloud> k get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE data-kubia-0 Bound pvc-37158c4e-4ada-411e-8668-2ec7bf3ef7a1 1Gi RWO standard 7m data-kubia-1 Bound pvc-8753eb16-427b-4b46-9319-8667b24fc3fe 1Gi RWO standard 6m39s mongodb-pvc Bound pvc-908fbd0e-c0b6-46c9-85d3-b0a78f21f250 1Gi RWO fast 18d mongodb-pvc2 Bound pvc-9913cc69-3634-4d95-84d6-5f0924c3d8cf 1Gi RWO standard 18d gcloud> k proxy Starting to serve on 127.0.0.1:8001 kubia-pet-image> curl localhost:8001/api/v1/namespaces/default/pods/kubia-0/proxy/ You've hit kubia-0 Data stored on this pod: No data posted yet kubia-pet-image> curl -X POST -d "Bonjour kubia-0" localhost:8001/api/v1/namespaces/default/pods/kubia-0/proxy/ Data stored on pod kubia-0 kubia-pet-image> curl localhost:8001/api/v1/namespaces/default/pods/kubia-0/proxy/ You've hit kubia-0 Data stored on this pod: Bonjour kubia-0 kubia-pet-image> curl localhost:8001/api/v1/namespaces/default/pods/kubia-1/proxy/ You've hit kubia-1 Data stored on this pod: No data posted yet ... C-c C-c gcloud> k delete po kubia-0 pod "kubia-0" deleted gcloud> k get po | egrep '^kubia-[0-9]' kubia-0 0/1 ContainerCreating 0 4s kubia-1 1/1 Running 0 15m gcloud> k get po | egrep '^kubia-[0-9]' kubia-0 1/1 Running 0 25s kubia-1 1/1 Running 0 16m gcloud> k proxy & [1] 19518 gcloud> Starting to serve on 127.0.0.1:8001 kubia-pet-image> curl localhost:8001/api/v1/namespaces/default/pods/kubia-0/proxy/ You've hit kubia-0 Data stored on this pod: Bonjour kubia-0 gcloud> k create -f kubia-service-public.yaml service/kubia-public created -------------------------------- November 24 continuing... gcloud> jobs [1]+ Running kubectl proxy & kubia-pet-image> curl localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ You've hit kubia-0 Data stored on this pod: 0x%5B%5D=Graber kubia-pet-image> curl localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ You've hit kubia-1 Data stored on this pod: allall kubia-pet-image> curl localhost:8001/api/v1/namespaces/default/pods/kubia-0/proxy/ You've hit kubia-0 Data stored on this pod: 0x%5B%5D=Graber kubia-pet-image> curl localhost:8001/api/v1/namespaces/default/pods/kubia-1/proxy/ You've hit kubia-1 Data stored on this pod: allall kubia-pet-image> curl -X POST -d "Bonjour kubia-public" localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ Data stored on pod kubia-1 kubia-pet-image> curl localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ This is v2 running in pod kubia-c79c89cc4-jsshr kubia-pet-image> curl localhost:8001/api/v1/namespaces/default/pods/kubia-1/proxy/ You've hit kubia-1 Data stored on this pod: Bonjour kubia-public 10.4, p 300 gcloud> k run -it srvlookup --image=tutum/dnsutils --rm --restart=Never -- dig SRV kubia.default.svc.cluster.local ; <<>> DiG 9.9.5-3ubuntu0.2-Ubuntu <<>> SRV kubia.default.svc.cluster.local ;; global options: +cmd ;; Got answer: ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 742 ;; flags: qr aa rd ra; QUERY: 1, ANSWER: 5, AUTHORITY: 0, ADDITIONAL: 5 ;; QUESTION SECTION: ;kubia.default.svc.cluster.local. IN SRV ;; ANSWER SECTION: kubia.default.svc.cluster.local. 30 IN SRV 10 20 0 61633638313733.kubia.default.svc.cluster.local. kubia.default.svc.cluster.local. 30 IN SRV 10 20 0 kubia-0.kubia.default.svc.cluster.local. kubia.default.svc.cluster.local. 30 IN SRV 10 20 0 3631653430366566.kubia.default.svc.cluster.local. kubia.default.svc.cluster.local. 30 IN SRV 10 20 0 3266666331663835.kubia.default.svc.cluster.local. kubia.default.svc.cluster.local. 30 IN SRV 10 20 0 kubia-1.kubia.default.svc.cluster.local. ;; ADDITIONAL SECTION: 61633638313733.kubia.default.svc.cluster.local. 30 IN A 10.112.0.30 kubia-0.kubia.default.svc.cluster.local. 30 IN A 10.112.0.33 3631653430366566.kubia.default.svc.cluster.local. 30 IN A 10.112.2.14 3266666331663835.kubia.default.svc.cluster.local. 30 IN A 10.112.3.19 kubia-1.kubia.default.svc.cluster.local. 30 IN A 10.112.3.20 ;; Query time: 28 msec ;; SERVER: 10.116.0.10#53(10.116.0.10) ;; WHEN: Wed Nov 24 10:30:13 UTC 2021 ;; MSG SIZE rcvd: 449 pod "srvlookup" deleted I checked that there is of course no /var/data directory on the nodes, so I decide to try to replace this with a writable mount point kubia-pet-image> gcloud beta compute ssh --zone "europe-west1-d" "gke-kubia-default-pool-df743581-mmpz" --tunnel-through-iap --project "dev-copilot-329712" marc@gke-kubia-default-pool-df743581-mmpz ~ $ sudo mkdir -p /mnt/disks/pv marc@gke-kubia-default-pool-df743581-mmpz ~ $ sudo mount -o discard,defaults /dev/sdb /mnt/disks/pv marc@gke-kubia-default-pool-df743581-mmpz ~ $ tput rmam marc@gke-kubia-default-pool-df743581-mmpz ~ $ mount | egrep ^/dev/sdb /dev/sdb on /mnt/disks/pv-b type ext4 (rw,relatime,discard) /dev/sdb on /mnt/disks/pv type ext4 (rw,relatime,discard) And the same for the other two. Note that I got a warning: mount: /mnt/disks/pv: /dev/sdb already mounted on /mnt/disks/pv-a. ...but this created a directory hard link!!! marc@gke-kubia-default-pool-df743581-c0lb ~ $ sudo touch /mnt/disks/pv/foo marc@gke-kubia-default-pool-df743581-c0lb ~ $ ls -l /mnt/disks/pv/foo -rw-r--r-- 1 root root 0 Nov 24 10:52 /mnt/disks/pv/foo marc@gke-kubia-default-pool-df743581-c0lb ~ $ ls -l /mnt/disks/pv-a/foo -rw-r--r-- 1 root root 0 Nov 24 10:52 /mnt/disks/pv-a/foo marc@gke-kubia-default-pool-df743581-c0lb ~ $ sudo rm /mnt/disks/pv/foo marc@gke-kubia-default-pool-df743581-c0lb ~ $ sudo mkdir -p /mnt/disks/pv0 marc@gke-kubia-default-pool-df743581-c0lb ~ $ ls -ld /mnt/disks/pv* drwxr-xr-x 3 root root 4096 Nov 24 10:53 /mnt/disks/pv drwxr-xr-x 3 root root 4096 Nov 24 10:53 /mnt/disks/pv-a drwxr-xr-x 2 root root 40 Nov 24 10:55 /mnt/disks/pv0 marc@gke-kubia-default-pool-df743581-c0lb ~ $ sudo rmdir /mnt/disks/pv0 kubia-pet-peers-image> ln ../kubia-pet-image/Dockerfile . kubia-pet-peers-image> ll total 20 drwxrwxr-x 2 marc marc 4096 Nov 24 10:59 . drwxrwxr-x 7 marc marc 4096 Nov 24 10:56 .. -rw-rw-r-- 1 marc marc 2193 Nov 24 10:58 app.js -rw-rw-r-- 1 marc marc 2141 Nov 24 10:36 app.js~ -rw-rw-r-- 2 marc marc 61 Nov 23 18:38 Dockerfile kubia-pet-peers-image> diff app.js~ app.js 6c6,7 < const dataFile = "/var/data/kubia.txt"; --- > const dataFile = "/mnt/disks/pv/data/kubia.txt"; > // const dataFile = "/var/data/kubia.txt"; Just that I don't understand yet how this is mapped to (lines in app.js): 41: if (request.url == '/data') { ... 60: path: '/data' Maybe it is... 42-43. kubia-pet-peers-image> docker build -t docker.io/marcsf/kubia-pet-peers . kubia-pet-peers-image> docker push docker.io/marcsf/kubia-pet-peers gcloud> k edit statefulset kubia Waiting for Emacs... statefulset.apps/kubia edited gcloud> k get po | egrep '^kubia-[012]' kubia-0 1/1 Running 0 14h kubia-1 1/1 Terminating 0 15h kubia-2 1/1 Running 0 43s gcloud> k get po | egrep '^kubia-[012]' kubia-0 1/1 Running 0 14h kubia-1 0/1 ContainerCreating 0 9s kubia-2 1/1 Running 0 62s gcloud> k get po | egrep '^kubia-[012]' kubia-0 1/1 Running 0 14h kubia-1 1/1 Running 0 17s kubia-2 1/1 Running 0 70s gcloud> k get po | egrep '^kubia-[012]' kubia-0 1/1 Terminating 0 15h kubia-1 1/1 Running 0 39s kubia-2 1/1 Running 0 92s gcloud> k get po | egrep '^kubia-[012]' kubia-0 1/1 Running 0 23s kubia-1 1/1 Running 0 73s kubia-2 1/1 Running 0 2m6s kubia-pet-peers-image> curl -X POST -d "Salut kubia peers" localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ This is v2 running in pod kubia-c79c89cc4-jsshr kubia-pet-peers-image> curl -X POST -d "Some more kubia peers" localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ { "kind": "Status", "apiVersion": "v1", "metadata": { }, "status": "Failure", "message": "error trying to reach service: EOF", "code": 500 } kubia-pet-peers-image> curl -X POST -d "Really, kubia peers?" localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ This is v2 running in pod kubia-c79c89cc4-5db9x kubia-pet-peers-image> curl localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ You've hit kubia-1 Data stored in the cluster: - kubia-1.kubia.default.svc.cluster.local: No data posted yet - kubia-0.kubia.default.svc.cluster.local: No data posted yet - 61633638313733.kubia.default.svc.cluster.local: This is v2 running in pod kubia-c79c89cc4-jsshr - 3631653430366566.kubia.default.svc.cluster.local: This is v2 running in pod kubia-c79c89cc4-kcdr4 - 3266666331663835.kubia.default.svc.cluster.local: This is v2 running in pod kubia-c79c89cc4-5db9x - kubia-2.kubia.default.svc.cluster.local: No data posted yet kubia-pet-peers-image> curl -X POST -d "Trying v2, kubia peers?" localhost:8001/api/v2/namespaces/default/services/kubia-public/proxy/ { "kind": "Status", "apiVersion": "v1", "metadata": { }, "status": "Failure", "message": "the server could not find the requested resource", "reason": "NotFound", "details": { }, "code": 404 } I went to c0lb (pv-a, aka kubia-0) marc@gke-kubia-default-pool-df743581-c0lb ~ $ ls -l /mnt/disks/pv total 16 drwx------ 2 root root 16384 Nov 23 19:24 lost+found marc@gke-kubia-default-pool-df743581-c0lb ~ $ sudo mkdir /mnt/disks/pv/data kubia-pet-peers-image> curl -X POST -d "After creating data on pv-a" localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ This is v2 running in pod kubia-c79c89cc4-5db9x kubia-pet-peers-image> curl localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ This is v2 running in pod kubia-c79c89cc4-5db9x kubia-pet-peers-image> curl localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ You've hit kubia-1 Data stored in the cluster: - 3631653430366566.kubia.default.svc.cluster.local: This is v2 running in pod kubia-c79c89cc4-kcdr4 - kubia-2.kubia.default.svc.cluster.local: No data posted yet - kubia-0.kubia.default.svc.cluster.local: No data posted yet - 3266666331663835.kubia.default.svc.cluster.local: This is v2 running in pod kubia-c79c89cc4-5db9x - kubia-1.kubia.default.svc.cluster.local: No data posted yet - 61633638313733.kubia.default.svc.cluster.local: This is v2 running in pod kubia-c79c89cc4-jsshr and nothing in: marc@gke-kubia-default-pool-df743581-c0lb ~ $ ls -l /mnt/disks/pv/data/ total 0 kubia-pet-peers-image> curl -X POST -d "After creating data on pv-a (1)" localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ This is v2 running in pod kubia-c79c89cc4-jsshr kubia-pet-peers-image> curl -X POST -d "After creating data on pv-a (2)" localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ { "kind": "Status", "apiVersion": "v1", "metadata": { }, "status": "Failure", "message": "error trying to reach service: EOF", "code": 500 kubia-pet-peers-image> curl -X POST -d "After creating data on pv-a (3)" localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ { "kind": "Status", "apiVersion": "v1", "metadata": { }, "status": "Failure", "message": "error trying to reach service: EOF", "code": 500 } kubia-pet-peers-image>curl localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ You've hit kubia-0 Data stored in the cluster: - 61633638313733.kubia.default.svc.cluster.local: This is v2 running in pod kubia-c79c89cc4-jsshr - kubia-0.kubia.default.svc.cluster.local: No data posted yet - kubia-2.kubia.default.svc.cluster.local: No data posted yet - 3631653430366566.kubia.default.svc.cluster.local: This is v2 running in pod kubia-c79c89cc4-kcdr4 - 3266666331663835.kubia.default.svc.cluster.local: This is v2 running in pod kubia-c79c89cc4-5db9x - kubia-1.kubia.default.svc.cluster.local: No data posted yet gcloud> k get po | egrep ^kubia kubia-0 1/1 Running 1 13m kubia-1 1/1 Running 1 13m kubia-2 1/1 Running 1 14m kubia-c79c89cc4-5db9x 1/1 Running 0 8d kubia-c79c89cc4-jsshr 1/1 Running 0 8d kubia-c79c89cc4-kcdr4 1/1 Running 0 8d gcloud> k get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE external-service ClusterIP 10.116.2.100 80/TCP 25d fortune ClusterIP 10.116.0.131 80/TCP 21d kubernetes ClusterIP 10.116.0.1 443/TCP 29d kubia ClusterIP None 80/TCP 15h kubia-headless ClusterIP None 80/TCP 23d kubia-loadbalancer LoadBalancer 10.116.9.215 35.205.211.203 80:30827/TCP 25d kubia-nodeport NodePort 10.116.9.43 80:30123/TCP 25d kubia-public ClusterIP 10.116.13.103 80/TCP 15h gcloud> k delete svc kubia-loadbalancer service "kubia-loadbalancer" deleted gcloud> k delete svc kubia-nodeport service "kubia-nodeport" deleted gcloud> k delete svc kubia-headless service "kubia-headless" deleted gcloud> k delete svc fortune service "fortune" deleted gcloud> k delete svc external-service service "external-service" deleted gcloud> k get po | egrep ^kubia kubia-0 1/1 Running 1 20m kubia-1 1/1 Running 1 21m kubia-2 1/1 Running 1 21m kubia-c79c89cc4-5db9x 1/1 Running 0 8d kubia-c79c89cc4-jsshr 1/1 Running 0 8d kubia-c79c89cc4-kcdr4 1/1 Running 0 8d gcloud> k delete po kubia-c79c89cc4-5db9x pod "kubia-c79c89cc4-5db9x" deleted gcloud> k delete po kubia-c79c89cc4-jsshr gcloud> pod "kubia-c79c89cc4-jsshr" deleted gcloud> k delete po kubia-c79c89cc4-kcdr4 gcloud> pod "kubia-c79c89cc4-kcdr4" deleted gcloud> k get po | egrep ^kubia kubia-0 1/1 Running 1 22m kubia-1 1/1 Running 1 23m kubia-2 1/1 Running 1 24m kubia-c79c89cc4-j7dwh 1/1 Running 0 40s kubia-c79c89cc4-szbf8 1/1 Running 0 81s kubia-c79c89cc4-trrqz 1/1 Running 0 113s Well... It doesn't work. I restore the original path in the image, build, push... kubia-pet-peers-image> curl -X POST -d "Restored /var/data in app.js" localhost:8001/api/v1/namespaces/default/service/kubia-public/proxy/ Data stored on pod kubia-2 kubia-pet-peers-image> curl localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ You've hit kubia-1 Data stored in the cluster: - 6634366365316336.kubia.default.svc.cluster.local: This is v2 running in pod kubia-c79c89cc4-j7dwh - kubia-0.kubia.default.svc.cluster.local: No data posted yet - 6234666235653434.kubia.default.svc.cluster.local: This is v2 running in pod kubia-c79c89cc4-szbf8 - kubia-2.kubia.default.svc.cluster.local: Restored /var/data in app.js - 6266636563366633.kubia.default.svc.cluster.local: This is v2 running in pod kubia-c79c89cc4-trrqz - kubia-1.kubia.default.svc.cluster.local: Bonjour kubia-public kubia-pet-peers-image> curl -s localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ | egrep 'kubia-[012]' You've hit kubia-1 - kubia-1.kubia.default.svc.cluster.local: Bonjour kubia-public - kubia-0.kubia.default.svc.cluster.local: No data posted yet - kubia-2.kubia.default.svc.cluster.local: Restored /var/data in app.js I have in kubia-statefulset.yaml 24: mountPath: /var/data Replacing with: - mountPath: /mnt/disks/pv/data kubia-pet-peers-image> docker images | grep marcsf/kubia-pet marcsf/kubia-pet-peers latest 1630d096b581 11 minutes ago 660MB marcsf/kubia-pet-peers baf879155215 40 minutes ago 660MB marcsf/kubia-pet latest ce04606760db 17 hours ago 660MB kubia-pet-peers-image> docker tag baf879155215 marcsf/kubia-pet:mnt kubia-pet-peers-image> docker images | grep marcsf/kubia-pet marcsf/kubia-pet-peers latest 1630d096b581 14 minutes ago 660MB marcsf/kubia-pet mnt baf879155215 44 minutes ago 660MB marcsf/kubia-pet-peers baf879155215 44 minutes ago 660MB marcsf/kubia-pet latest ce04606760db 17 hours ago 660MB kubia-pet-peers-image> docker tag baf879155215 marcsf/kubia-pet-peers:mnt kubia-pet-peers-image> docker images | grep marcsf/kubia-pet marcsf/kubia-pet-peers latest 1630d096b581 15 minutes ago 660MB marcsf/kubia-pet-peers mnt baf879155215 44 minutes ago 660MB marcsf/kubia-pet mnt baf879155215 44 minutes ago 660MB marcsf/kubia-pet latest ce04606760db 17 hours ago 660MB kubia-pet-peers-image> docker push docker.io/marcsf/kubia-pet-peers:mnt kubia-pet-peers-image> curl -X POST -d "mnt mount in app.js" localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ This is v2 running in pod kubia-c79c89cc4-trrqz kubia-pet-peers-image> curl -s localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ | egrep 'kubia-[012]' You've hit kubia-0 - kubia-0.kubia.default.svc.cluster.local: No data posted yet - kubia-1.kubia.default.svc.cluster.local: No data posted yet - kubia-2.kubia.default.svc.cluster.local: No data posted yet gcloud> k delete po kubia-0 pod "kubia-0" deleted gcloud> k get po | egrep '^kubia-[012]' kubia-0 1/1 Running 0 19s kubia-1 1/1 Running 1 8m48s kubia-2 1/1 Running 0 9m35s gcloud> k delete po kubia-1 pod "kubia-1" deleted gcloud> k delete po kubia-2 pod "kubia-2" deleted gcloud> k get po | egrep '^kubia-[012]' kubia-0 1/1 Running 0 2m45s kubia-1 1/1 Running 0 75s kubia-2 1/1 Running 0 31s kubia-pet-peers-image> curl -X POST -d "deleted pods after fixing the mount point" localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ This is v2 running in pod kubia-c79c89cc4-trrqz kubia-pet-peers-image> curl -s localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ | egrep 'kubia-[012]' You've hit kubia-2 - kubia-0.kubia.default.svc.cluster.local: No data posted yet - kubia-2.kubia.default.svc.cluster.local: No data posted yet - kubia-1.kubia.default.svc.cluster.local: No data posted yet kubia-pet-peers-image> curl -s localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ | egrep 'kubia-[012]' You've hit kubia-1 - kubia-1.kubia.default.svc.cluster.local: No data posted yet - kubia-0.kubia.default.svc.cluster.local: No data posted yet - kubia-2.kubia.default.svc.cluster.local: No data posted yet kubia-pet-peers-image> curl -s localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ | egrep 'kubia-[012]' You've hit kubia-0 - kubia-1.kubia.default.svc.cluster.local: No data posted yet - kubia-0.kubia.default.svc.cluster.local: No data posted yet - kubia-2.kubia.default.svc.cluster.local: No data posted yet gcloud> k get statefulset kubia -o yaml | grep mount k:{"mountPath":"/mnt/disks/pv/data"}: f:mountPath: {} - mountPath: /mnt/disks/pv/data kubia-pet-peers-image> docker push docker.io/marcsf/kubia-pet-peers:latest gcloud> k edit statefulset kubia Waiting for Emacs... statefulset.apps/kubia edited back to /var/data... gcloud> k get po | egrep '^kubia-[012]' kubia-0 1/1 Running 0 10m kubia-1 1/1 Terminating 0 9m14s kubia-2 1/1 Running 0 40s kubia-pet-peers-image> curl -s localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ | egrep 'kubia-[012]' You've hit kubia-0 - kubia-0.kubia.default.svc.cluster.local: No data posted yet - kubia-2.kubia.default.svc.cluster.local: Restored /var/data in app.js kubia-pet-peers-image> curl -X POST -d "back to square 1" localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ Data stored on pod kubia-1 kubia-pet-peers-image> curl -s localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ | egrep 'kubia-[012]' You've hit kubia-2 - kubia-2.kubia.default.svc.cluster.local: Restored /var/data in app.js - kubia-1.kubia.default.svc.cluster.local: back to square 1 - kubia-0.kubia.default.svc.cluster.local: 0x%5B%5D=Graber gcloud> k get po | egrep '^kubia-[012]' kubia-0 1/1 Running 0 50s kubia-1 1/1 Running 0 113s kubia-2 1/1 Running 0 2m40s kubia-pet-peers-image> curl -X POST -d "back to square 1/2" localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ Data stored on pod kubia-2 kubia-pet-peers-image> curl -s localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ | egrep 'kubia-[012]' You've hit kubia-2 - kubia-0.kubia.default.svc.cluster.local: 0x%5B%5D=Graber - kubia-2.kubia.default.svc.cluster.local: back to square 1/2 - kubia-1.kubia.default.svc.cluster.local: back to square 1 kubia-pet-peers-image> curl -X POST -d "back to square 1/3" localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ Data stored on pod kubia-1 kubia-pet-peers-image> curl -s localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ | egrep 'kubia-[012]' kubia-pet-peers-image> curl -s localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ | egrep 'kubia-[012]' kubia-pet-peers-image> curl -s localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ | egrep 'kubia-[012]' You've hit kubia-1 - kubia-1.kubia.default.svc.cluster.local: back to square 1/3 - kubia-0.kubia.default.svc.cluster.local: 0x%5B%5D=Graber - kubia-2.kubia.default.svc.cluster.local: back to square 1/2 kubia-pet-peers-image> curl -X POST -d "back to square 1/4" localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ Data stored on pod kubia-0 kubia-pet-peers-image> curl -s localhost:8001/api/v1/namespaces/default/services/kubia-public/proxy/ | egrep 'kubia-[012]' You've hit kubia-1 - kubia-1.kubia.default.svc.cluster.local: back to square 1/3 - kubia-2.kubia.default.svc.cluster.local: back to square 1/2 - kubia-0.kubia.default.svc.cluster.local: back to square 1/4 kubia-pet-peers-image> gcloud beta compute ssh --zone "europe-west1-d" "gke-kubia-default-pool-df743581-c0lb" --tunnel-through-iap --project "dev-copilot-329712" marc@gke-kubia-default-pool-df743581-c0lb ~ $ sudo ifconfig eth0 down gcloud> k get no NAME STATUS ROLES AGE VERSION gke-kubia-default-pool-df743581-c0lb NotReady 18d v1.20.10-gke.1600 gke-kubia-default-pool-df743581-mmpz Ready 18d v1.20.10-gke.1600 gke-kubia-default-pool-df743581-o6pi Ready 18d v1.20.10-gke.1600 gcloud> k get po | egrep '^kubia-[012]' kubia-0 1/1 Terminating 0 90m kubia-1 1/1 Running 0 91m kubia-2 1/1 Terminating 0 92m gcloud> k describe po kubia-0 ... Node: gke-kubia-default-pool-df743581-c0lb/10.132.0.8 ... Status: Terminating (lasts ) Termination Grace Period: 30s ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- Warning NodeNotReady 5m19s node-controller Node is not ready gcloud> k describe po kubia-2 ... Node: gke-kubia-default-pool-df743581-c0lb/10.132.0.8 ... gcloud> k delete po kubia-0 --force --grace-period 0 warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "kubia-0" force deleted gcloud> k get po | egrep '^kubia-[012]' kubia-0 0/1 ContainerCreating 0 17s kubia-1 1/1 Running 0 97m kubia-2 1/1 Terminating 0 97m gcloud> k delete po kubia-2 --force --grace-period 0 warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. pod "kubia-2" force deleted gcloud> k get po | egrep '^kubia-[012]' kubia-0 0/1 ContainerCreating 0 50s kubia-1 1/1 Running 0 97m gcloud> k get po | egrep '^kubia-[012]' kubia-0 1/1 Running 0 2m40s kubia-1 1/1 Running 0 99m kubia-2 1/1 Running 0 91s kubia-pet-peers-image> gcloud compute instances reset --zone "europe-west1-d" "gke-kubia-default-pool-df743581-c0lb" --project "dev-copilot-329712" Updated [https://www.googleapis.com/compute/v1/projects/dev-copilot-329712/zones/europe-west1-d/instances/gke-kubia-default-pool-df743581-c0lb]. gcloud> k get no NAME STATUS ROLES AGE VERSION gke-kubia-default-pool-df743581-c0lb Ready 18d v1.20.10-gke.1600 gke-kubia-default-pool-df743581-mmpz Ready 18d v1.20.10-gke.1600 gke-kubia-default-pool-df743581-o6pi Ready 18d v1.20.10-gke.1600 -------------------------------- December 1 go> sudo systemctl list-unit-files | grep -i skype snap-skype-190.mount enabled snap-skype-194.mount enabled go> sudo systemctl -a | grep -i skype run-snapd-ns-skype.mnt.mount loaded active mounted /run/snapd/ns/skype.mnt snap-skype-190.mount loaded active mounted Mount unit for skype, revision 190 snap-skype-194.mount loaded active mounted Mount unit for skype, revision 194 go> sudo systemctl disable snap-skype-190.mount go> sudo systemctl disable snap-skype-194.mount go> sudo systemctl list-unit-files | grep -i skype snap-skype-190.mount disabled snap-skype-194.mount disabled go> sudo systemctl -a | grep -i skype run-snapd-ns-skype.mnt.mount loaded active mounted /run/snapd/ns/skype.mnt snap-skype-190.mount loaded active mounted Mount unit for skype, revision 190 snap-skype-194.mount loaded active mounted Mount unit for skype, revision 194 Chapter 11, p 311 gcloud> k get componentstatuses Warning: v1 ComponentStatus is deprecated in v1.19+ NAME STATUS MESSAGE ERROR scheduler Healthy ok etcd-0 Healthy {"health":"true"} etcd-1 Healthy {"health":"true"} controller-manager Healthy ok gcloud> k get po -o custom-columns=POD:metadata.name,NODE:spec.nodeName --sort-by spec.nodeName -n kube-system POD NODE gke-metrics-agent-js2dg gke-kubia-default-pool-df743581-c0lb fluentbit-gke-br5z6 gke-kubia-default-pool-df743581-c0lb pdcsi-node-9hmch gke-kubia-default-pool-df743581-c0lb kube-proxy-gke-kubia-default-pool-df743581-c0lb gke-kubia-default-pool-df743581-c0lb konnectivity-agent-54d48c955-2zl5x gke-kubia-default-pool-df743581-mmpz metrics-server-v0.3.6-9c5bbf784-wzfnz gke-kubia-default-pool-df743581-mmpz gke-metrics-agent-29cpm gke-kubia-default-pool-df743581-mmpz fluentbit-gke-v77cg gke-kubia-default-pool-df743581-mmpz konnectivity-agent-54d48c955-66rts gke-kubia-default-pool-df743581-mmpz l7-default-backend-56cb9644f6-d96vp gke-kubia-default-pool-df743581-mmpz konnectivity-agent-autoscaler-6cb774c9cc-vrh7j gke-kubia-default-pool-df743581-mmpz kube-dns-autoscaler-844c9d9448-vr4sp gke-kubia-default-pool-df743581-mmpz kube-proxy-gke-kubia-default-pool-df743581-mmpz gke-kubia-default-pool-df743581-mmpz kube-dns-b4f5c58c7-zqrsg gke-kubia-default-pool-df743581-mmpz event-exporter-gke-67986489c8-dvv4s gke-kubia-default-pool-df743581-mmpz pdcsi-node-nfdn8 gke-kubia-default-pool-df743581-mmpz kube-proxy-gke-kubia-default-pool-df743581-o6pi gke-kubia-default-pool-df743581-o6pi konnectivity-agent-54d48c955-pz6d9 gke-kubia-default-pool-df743581-o6pi gke-metrics-agent-8wkf7 gke-kubia-default-pool-df743581-o6pi fluentbit-gke-rt2sx gke-kubia-default-pool-df743581-o6pi pdcsi-node-d8ff7 gke-kubia-default-pool-df743581-o6pi kube-dns-b4f5c58c7-vsjgj gke-kubia-default-pool-df743581-o6pi So... wrt the book: - no master node - no kube-controller-manager-master - two kube-dns-b4f5c58c7 on different nodes - one kube-dns-autoscaler (on one of the two nodes running a dns) - no etcd-master - no kube-apiserver-master - no kube-scheduler-master - no kube-flannel-ds only the kube-proxies are there. Lots of other stuff: - one fluentbit-gke per node - one konnectivity-agent per node plus one autoscaler - one gke-metrics-agent per node - one pdcsi-node per node - one event-exporter - one l7-default-backend - one metrics-server gcloud> sudo etcdctl ls /registry Error: client: etcd cluster is unavailable or misconfigured; error #0: dial tcp 127.0.0.1:2379: connect: connection refused ; error #1: dial tcp 127.0.0.1:4001: connect: connection refused error #0: dial tcp 127.0.0.1:2379: connect: connection refused error #1: dial tcp 127.0.0.1:4001: connect: connection refused gcloud> sudo snap install kube-apiserver kube-apiserver 1.22.4 from Canonical✓ installed gcloud> kube-apiserver -h | grep enable-admission-plugins --admission-control strings Admission is divided into two phases. In the first phase, only mutating admission plugins run. In the second phase, only validating admission plugins run. The names in the below list may represent a validating plugin, a mutating plugin, or both. The order of plugins in which they are passed to this flag does not matter. Comma-delimited list of: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyServiceExternalIPs, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurity, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. (DEPRECATED: Use --enable-admission-plugins or --disable-admission-plugins instead. Will be removed in a future version.) --enable-admission-plugins strings admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, PodSecurity, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyServiceExternalIPs, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurity, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. -------------------------------- December 2 gcloud> kube-apiserver -h | grep enable-admission-plugins | tail -1 | perl -nle '($a)=/^.*: (.*?)\.[^:]*$/; print for split/, /,$a' AlwaysAdmit AlwaysDeny AlwaysPullImages CertificateApproval CertificateSigning CertificateSubjectRestriction DefaultIngressClass DefaultStorageClass DefaultTolerationSeconds DenyServiceExternalIPs EventRateLimit ExtendedResourceToleration ImagePolicyWebhook LimitPodHardAntiAffinityTopology LimitRanger MutatingAdmissionWebhook NamespaceAutoProvision NamespaceExists NamespaceLifecycle NodeRestriction OwnerReferencesPermissionEnforcement PersistentVolumeClaimResize PersistentVolumeLabel PodNodeSelector PodSecurity PodSecurityPolicy PodTolerationRestriction Priority ResourceQuota RuntimeClass SecurityContextDeny ServiceAccount StorageObjectInUseProtection TaintNodesByCondition ValidatingAdmissionWebhook gcloud> kube-apiserver -h | grep enable-admission-plugins | tail -1 | perl -nle 's/.*\((.*)\).*$/$1/; print for split/, /,$_' NamespaceLifecycle LimitRanger ServiceAccount TaintNodesByCondition PodSecurity Priority DefaultTolerationSeconds DefaultStorageClass StorageObjectInUseProtection PersistentVolumeClaimResize RuntimeClass CertificateApproval CertificateSigning CertificateSubjectRestriction DefaultIngressClass MutatingAdmissionWebhook ValidatingAdmissionWebhook ResourceQuota The latter are 'default enabled admission plugins' The former, all 'admission plugins' And indeed, all the latter are found among the former ones. gcloud> k get po --watch NAME READY STATUS RESTARTS AGE downward 1/1 Running 0 22d fortune3s 2/2 Running 0 23d kubia-0 1/1 Running 0 7d19h kubia-1 1/1 Running 0 7d21h kubia-2 1/1 Running 1 7d19h kubia-c79c89cc4-j7dwh 1/1 Running 0 7d22h kubia-c79c89cc4-jgh9b 1/1 Running 0 7d20h kubia-c79c89cc4-trrqz 1/1 Running 0 7d22h ... downward 1/1 Terminating 0 22d downward 0/1 Terminating 0 22d downward 0/1 Terminating 0 22d downward 0/1 Terminating 0 22d fortune3s 2/2 Terminating 0 23d fortune3s 0/2 Terminating 0 23d fortune3s 0/2 Terminating 0 23d fortune3s 0/2 Terminating 0 23d In parallel: ~> k delete pod downward pod "downward" deleted ~> k delete pod fortune3s pod "fortune3s" deleted Looked at the replicaset controller code. It is in Go, so... no constructor and method strictly speaking... syncHandler is a function object inside the ReplicaSetController struct. On another hand, from the history, it looks like it was always the case. p 329 gcloud> k get rc -n kube-system No resources found in kube-system namespace. But this would have been for minikube... gcloud> k get deploy -n kube-system NAME READY UP-TO-DATE AVAILABLE AGE event-exporter-gke 1/1 1 1 41d konnectivity-agent 3/3 3 3 41d konnectivity-agent-autoscaler 1/1 1 1 41d kube-dns 2/2 2 2 41d kube-dns-autoscaler 1/1 1 1 41d l7-default-backend 1/1 1 1 41d metrics-server-v0.3.6 1/1 1 1 41d gcloud> k get events LAST SEEN TYPE REASON OBJECT MESSAGE 29s Warning NegCRError servicenetworkendpointgroup/k8s1-105246cb-default-kubia-nodeport-80-b56c78cc failed to delete NEG k8s1-105246cb-default-kubia-nodeport-80-b56c78cc in europe-west1-d: googleapi: Error 400: The network_endpoint_group resource 'projects/dev-copilot-329712/zones/europe-west1-d/networkEndpointGroups/k8s1-105246cb-default-kubia-nodeport-80-b56c78cc' is already being used by 'projects/dev-copilot-329712/global/backendServices/k8s1-105246cb-default-kubia-nodeport-80-b56c78cc', resourceInUseByAnotherResource 35m Normal Sync ingress/kubia Scheduled for sync 13m Warning Translate ingress/kubia Translation failed: invalid ingress spec: could not find service "default/kubia-nodeport" gcloud> k get po nginx NAME READY STATUS RESTARTS AGE nginx 1/1 Running 0 33s gcloud> k get no NAME STATUS ROLES AGE VERSION gke-kubia-default-pool-df743581-c0lb Ready 26d v1.20.10-gke.1600 gke-kubia-default-pool-df743581-mmpz Ready 26d v1.20.10-gke.1600 gke-kubia-default-pool-df743581-o6pi Ready 26d v1.20.10-gke.1600 Trying all 3 in turns: ~> gcloud beta compute ssh --zone "europe-west1-d" "gke-kubia-default-pool-df743581-o6pi" --tunnel-through-iap --project "dev-copilot-329712" marc@gke-kubia-default-pool-df743581-o6pi ~ $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES No nginx... gcloud> k get events LAST SEEN TYPE REASON OBJECT MESSAGE 92s Warning NegCRError servicenetworkendpointgroup/k8s1-105246cb-default-kubia-nodeport-80-b56c78cc failed to delete NEG k8s1-105246cb-default-kubia-nodeport-80-b56c78cc in europe-west1-d: googleapi: Error 400: The network_endpoint_group resource 'projects/dev-copilot-329712/zones/europe-west1-d/networkEndpointGroups/k8s1-105246cb-default-kubia-nodeport-80-b56c78cc' is already being used by 'projects/dev-copilot-329712/global/backendServices/k8s1-105246cb-default-kubia-nodeport-80-b56c78cc', resourceInUseByAnotherResource 42m Normal Sync ingress/kubia Scheduled for sync 3m57s Warning Translate ingress/kubia Translation failed: invalid ingress spec: could not find service "default/kubia-nodeport" 3m10s Normal Scheduled pod/nginx Successfully assigned default/nginx to gke-kubia-default-pool-df743581-c0lb 3m9s Normal Pulling pod/nginx Pulling image "nginx" 3m3s Normal Pulled pod/nginx Successfully pulled image "nginx" in 6.210106946s 3m1s Normal Created pod/nginx Created container nginx 3m1s Normal Started pod/nginx Started container nginx gcloud> k delete svc kubia-nodeport Error from server (NotFound): services "kubia-nodeport" not found gcloud> k get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.116.0.1 443/TCP 37d kubia ClusterIP None 80/TCP 8d kubia-public ClusterIP 10.116.13.103 80/TCP 8d gcloud> k get ingress NAME CLASS HOSTS ADDRESS PORTS AGE kubia kubia.example.com 34.107.136.15 80, 443 33d gcloud> k delete ingress kubia ingress.networking.k8s.io "kubia" deleted gcloud> k get ingress No resources found in default namespace. gcloud> k get events LAST SEEN TYPE REASON OBJECT MESSAGE 3m33s Warning NegCRError servicenetworkendpointgroup/k8s1-105246cb-default-kubia-nodeport-80-b56c78cc failed to delete NEG k8s1-105246cb-default-kubia-nodeport-80-b56c78cc in europe-west1-d: googleapi: Error 400: The network_endpoint_group resource 'projects/dev-copilot-329712/zones/europe-west1-d/networkEndpointGroups/k8s1-105246cb-default-kubia-nodeport-80-b56c78cc' is already being used by 'projects/dev-copilot-329712/global/backendServices/k8s1-105246cb-default-kubia-nodeport-80-b56c78cc', resourceInUseByAnotherResource 69s Normal Sync ingress/kubia Scheduled for sync 10m Warning Translate ingress/kubia Translation failed: invalid ingress spec: could not find service "default/kubia-nodeport" 9m13s Normal Scheduled pod/nginx Successfully assigned default/nginx to gke-kubia-default-pool-df743581-c0lb 9m12s Normal Pulling pod/nginx Pulling image "nginx" 9m6s Normal Pulled pod/nginx Successfully pulled image "nginx" in 6.210106946s 9m4s Normal Created pod/nginx Created container nginx 9m4s Normal Started pod/nginx Started container nginx gcloud> k describe po nginx | egrep ^Node Node: gke-kubia-default-pool-df743581-c0lb/10.132.0.9 Node-Selectors: But there as well, even after some time: marc@gke-kubia-default-pool-df743581-c0lb ~ $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -------------------------------- December 4 12.1.2, p 350 gcloud> k create sa foo serviceaccount/foo created gcloud> k describe sa foo Name: foo Namespace: default Labels: Annotations: Image pull secrets: Mountable secrets: foo-token-pxpz7 Tokens: foo-token-pxpz7 Events: gcloud> k describe secret foo-token-pxpz7 Name: foo-token-pxpz7 Namespace: default Labels: Annotations: kubernetes.io/service-account.name: foo kubernetes.io/service-account.uid: a43c6466-0c5a-412f-aec7-c5c9cb2a57dc Type: kubernetes.io/service-account-token Data ==== namespace: 7 bytes token: eyJhbGciOiJSUzI1NiIsImtpZCI6InRaUWszVDF3WnJBSmw5QUlsNDFETXJDRFA5Q0dNZDZQOTlkdDhuQ2F1YmcifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImZvby10b2tlbi1weHB6NyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJmb28iLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJhNDNjNjQ2Ni0wYzVhLTQxMmYtYWVjNy1jNWM5Y2IyYTU3ZGMiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6ZGVmYXVsdDpmb28ifQ.A7BiE7udCwO4vXLkrIxOgMSnB8nY9vCj-ZwV56Iv1XZSz6WRpaoixNEGM4W_rpuLWfUkluhOel6KI5CCmJfR-jK8QOSj4Q73LKVkBKTE1BS1_rfAkkr7fKETT-UeiVTxeuWITtA_dbolWhC_iW_LsuFSToRGz3zHbooEMytJLf8pTayrtgR2PSk5PikrHZVNIhlKsXiY9hebdTY0i9gciml3D8RHDNXYI6sMBVPdNEaJwQtQVFx31zG5sfc_yfq7vLoNdvwm1rbgSyR7d2AwBUuqg1-0jvYJYvNiKk0JSjL5aNERGa-LopxFYOjqgWtNY-9SbBvxsXCXFX-RQEvmsA ca.crt: 1509 bytes gcloud> k create -f sa-image-pull-secrets.yaml serviceaccount/my-service-account created gcloud> k create -f curl-custom-sa.yaml pod/curl-custom-sa created gcloud> k exec -it curl-custom-sa -c main -- cat /var/run/secrets/kubernetes.io/serviceaccount/token error: unable to upgrade connection: container not found ("main") Reusing the marcsf/curl docker image I created last time I had the same issue, now in curl-custom-sa.yaml gcloud> k delete po curl-custom-sa pod "curl-custom-sa" deleted gcloud> k create -f curl-custom-sa.yaml pod/curl-custom-sa created gcloud> k get po curl-custom-sa NAME READY STATUS RESTARTS AGE curl-custom-sa 2/2 Running 0 32s gcloud> k exec -it curl-custom-sa -c main -- cat /var/run/secrets/kubernetes.io/serviceaccount/token eyJhbGciOiJSUzI1NiIsImtpZCI6InRaUWszVDF3WnJBSmw5QUlsNDFETXJDRFA5Q0dNZDZQOTlkdDhuQ2F1YmcifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImZvby10b2tlbi1weHB6NyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJmb28iLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJhNDNjNjQ2Ni0wYzVhLTQxMmYtYWVjNy1jNWM5Y2IyYTU3ZGMiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6ZGVmYXVsdDpmb28ifQ.A7BiE7udCwO4vXLkrIxOgMSnB8nY9vCj-ZwV56Iv1XZSz6WRpaoixNEGM4W_rpuLWfUkluhOel6KI5CCmJfR-jK8QOSj4Q73LKVkBKTE1BS1_rfAkkr7fKETT-UeiVTxeuWITtA_dbolWhC_iW_LsuFSToRGz3zHbooEMytJLf8pTayrtgR2PSk5PikrHZVNIhlKsXiY9hebdTY0i9gciml3D8RHDNXYI6sMBVPdNEaJwQtQVFx31zG5sfc_yfq7vLoNdvwm1rbgSyR7d2AwBUuqg1-0jvYJYvNiKk0JSjL5aNERGa-LopxFYOjqgWtNY-9SbBvxsXCXFX-RQEvmsA gcloud> k exec -it curl-custom-sa -c main -- curl localhost:8001/api/v1/pods { "kind": "PodList", "apiVersion": "v1", "metadata": { "resourceVersion": "19039405" }, "items": [ ... gcloud> k exec -it curl-custom-sa -c main -- curl localhost:8001/api/v1/pods | egrep '^ "name": ' "name": "curl-custom-sa", "name": "kubia-0", "name": "kubia-1", "name": "kubia-2", "name": "kubia-c79c89cc4-j7dwh", "name": "kubia-c79c89cc4-jgh9b", "name": "kubia-c79c89cc4-trrqz", "name": "nginx", "name": "event-exporter-gke-67986489c8-dvv4s", "name": "fluentbit-gke-br5z6", "name": "fluentbit-gke-rt2sx", "name": "fluentbit-gke-v77cg", "name": "gke-metrics-agent-29cpm", "name": "gke-metrics-agent-8wkf7", "name": "gke-metrics-agent-js2dg", "name": "konnectivity-agent-54d48c955-2zl5x", "name": "konnectivity-agent-54d48c955-66rts", "name": "konnectivity-agent-54d48c955-pz6d9", "name": "konnectivity-agent-autoscaler-6cb774c9cc-vrh7j", "name": "kube-dns-autoscaler-844c9d9448-vr4sp", "name": "kube-dns-b4f5c58c7-vsjgj", "name": "kube-dns-b4f5c58c7-zqrsg", "name": "kube-proxy-gke-kubia-default-pool-df743581-c0lb", "name": "kube-proxy-gke-kubia-default-pool-df743581-mmpz", "name": "kube-proxy-gke-kubia-default-pool-df743581-o6pi", "name": "l7-default-backend-56cb9644f6-d96vp", "name": "metrics-server-v0.3.6-9c5bbf784-wzfnz", "name": "pdcsi-node-9hmch", "name": "pdcsi-node-d8ff7", "name": "pdcsi-node-nfdn8", gcloud> k get clusterrolebinding | grep permissive permissive-binding ClusterRole/cluster-admin 24d gcloud> k delete clusterrolebinding permissive-binding clusterrolebinding.rbac.authorization.k8s.io "permissive-binding" deleted gcloud> k create ns foo namespace/foo created gcloud> k run test --image=luksa/kubectl-proxy -n foo pod/test created gcloud> k create ns bar namespace/bar created gcloud> k run test --image=luksa/kubectl-proxy -n bar pod/test created gcloud> k get po -n foo NAME READY STATUS RESTARTS AGE test 1/1 Running 0 2m7s ~> k exec -it test -n foo -- sh / # curl localhost:8001/api/v1/namespaces/foo/services { "kind": "Status", "apiVersion": "v1", "metadata": { }, "status": "Failure", "message": "services is forbidden: User \"system:serviceaccount:foo:default\" cannot list resource \"services\" in API group \"\" in the namespace \"foo\"", "reason": "Forbidden", "details": { "kind": "services" }, "code": 403 }/ # ~> k exec -it test -n bar -- sh / # gcloud> k create -f service-reader.yaml role.rbac.authorization.k8s.io/service-reader created Note: I forgot '-n foo' gcloud> k get roles -n foo NAME CREATED AT service-reader 2021-12-04T17:40:21Z gcloud> k get clusterrolebindings | wc -l 89 gcloud> k get clusterrolebindings | grep cluster-admin cluster-admin ClusterRole/cluster-admin 44d kubelet-cluster-admin ClusterRole/system:node 44d storage-version-migration-migrator-v2 ClusterRole/cluster-admin 44d gcloud> k describe clusterrolebinding cluster-admin Name: cluster-admin Labels: kubernetes.io/bootstrapping=rbac-defaults Annotations: rbac.authorization.kubernetes.io/autoupdate: true Role: Kind: ClusterRole Name: cluster-admin Subjects: Kind Name Namespace ---- ---- --------- Group system:masters gcloud> k create rolebinding test --role=service-reader --serviceaccount=foo:default -n foo rolebinding.rbac.authorization.k8s.io/test created And now indeed, from the foo terminal: / # curl localhost:8001/api/v1/namespaces/foo/services { "kind": "ServiceList", "apiVersion": "v1", "metadata": { "resourceVersion": "19048890" }, "items": [] }/ # gcloud> k get rolebinding test -n foo -o yaml apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: creationTimestamp: "2021-12-04T17:48:12Z" managedFields: - apiVersion: rbac.authorization.k8s.io/v1 fieldsType: FieldsV1 fieldsV1: f:roleRef: f:apiGroup: {} f:kind: {} f:name: {} f:subjects: {} manager: kubectl-create operation: Update time: "2021-12-04T17:48:12Z" name: test namespace: foo resourceVersion: "19048809" uid: cd5d7319-4410-404d-877c-d277543a086c roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: service-reader subjects: - kind: ServiceAccount name: default namespace: foo gcloud> k edit rolebinding test -n foo Waiting for Emacs... rolebinding.rbac.authorization.k8s.io/test edited Added bar to the subjects And indeed. I can access the foo services from bar -------------------------------- December 5 12.2.4, p 362 gcloud> k get clusterrole | grep reader external-metrics-reader 2021-10-21T13:55:54Z system:gke-hpa-service-reader 2021-10-21T13:55:54Z system:gke-uas-collection-reader 2021-10-21T13:55:54Z system:gke-uas-metrics-reader 2021-10-21T13:55:54Z gcloud> k create clusterrole pv-reader --verb=get,list --resource=persistentvolumes clusterrole.rbac.authorization.k8s.io/pv-reader created gcloud> k get clusterrole pv-reader NAME CREATED AT pv-reader 2021-12-05T09:18:12Z / # curl localhost:8001/api/v1/persistentvolumes { "kind": "Status", "apiVersion": "v1", "metadata": { }, "status": "Failure", "message": "persistentvolumes is forbidden: User \"system:serviceaccount:foo:default\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope", "reason": "Forbidden", "details": { "kind": "persistentvolumes" }, "code": 403 gcloud> k create rolebinding pv-test --clusterrole=pv-reader --serviceaccount=foo:default -n foo rolebinding.rbac.authorization.k8s.io/pv-test created But it doesn't help gcloud> k delete rolebinding pv-test -n foo rolebinding.rbac.authorization.k8s.io "pv-test" deleted gcloud> k create clusterrolebinding pv-test --clusterrole=pv-reader --serviceaccount=foo:default clusterrolebinding.rbac.authorization.k8s.io/pv-test created Now it works: / # curl -s localhost:8001/api/v1/persistentvolumes | egrep '^ "name":' "name": "mongodb-pv", "name": "pv-a", "name": "pv-b", "name": "pv-c", "name": "pvc-37158c4e-4ada-411e-8668-2ec7bf3ef7a1", "name": "pvc-8753eb16-427b-4b46-9319-8667b24fc3fe", "name": "pvc-908fbd0e-c0b6-46c9-85d3-b0a78f21f250", "name": "pvc-9913cc69-3634-4d95-84d6-5f0924c3d8cf", "name": "pvc-c19d2311-13d8-4cdc-a014-2ac50496a12f", gcloud> k get clusterrole system:discovery -o yaml | perl -nle '$pr=1 if /^rules:/; print if $pr' rules: - nonResourceURLs: - /api - /api/* - /apis - /apis/* - /healthz - /livez - /openapi - /openapi/* - /readyz - /version - /version/ verbs: - get gcloud> k get clusterrolebinding system:discovery -o yaml | perl -nle '$pr=1 if /^roleRef:/; print if $pr' roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:discovery subjects: - apiGroup: rbac.authorization.k8s.io kind: Group name: system:authenticated gcloud> k get clusterrole view -o yaml | perl -nle '$pr=1 if /^rules:/; $pr++ if /^- apiGroups:/; print if $pr and $pr<3' rules: - apiGroups: - "" resources: - configmaps - endpoints - persistentvolumeclaims - persistentvolumeclaims/status - pods - replicationcontrollers - replicationcontrollers/scale - serviceaccounts - services - services/status verbs: - get - list - watch / # curl -s localhost:8001/api/v1/pods | grep status "status": "Failure", / # curl -s localhost:8001/api/v1/namespaces/foo/pods | grep status "status": "Failure", gcloud> k create clusterrolebinding view-test --clusterrole=view --serviceaccount=foo:default clusterrolebinding.rbac.authorization.k8s.io/view-test created / # curl -s localhost:8001/api/v1/pods | egrep '^ "name":' "name": "test", "name": "curl-custom-sa", "name": "kubia-0", "name": "kubia-1", "name": "kubia-2", "name": "kubia-c79c89cc4-j7dwh", "name": "kubia-c79c89cc4-jgh9b", "name": "kubia-c79c89cc4-trrqz", "name": "nginx", "name": "test", "name": "event-exporter-gke-67986489c8-dvv4s", "name": "fluentbit-gke-br5z6", "name": "fluentbit-gke-rt2sx", "name": "fluentbit-gke-v77cg", "name": "gke-metrics-agent-29cpm", "name": "gke-metrics-agent-8wkf7", "name": "gke-metrics-agent-js2dg", "name": "konnectivity-agent-54d48c955-2zl5x", "name": "konnectivity-agent-54d48c955-66rts", "name": "konnectivity-agent-54d48c955-pz6d9", "name": "konnectivity-agent-autoscaler-6cb774c9cc-vrh7j", "name": "kube-dns-autoscaler-844c9d9448-vr4sp", "name": "kube-dns-b4f5c58c7-vsjgj", "name": "kube-dns-b4f5c58c7-zqrsg", "name": "kube-proxy-gke-kubia-default-pool-df743581-c0lb", "name": "kube-proxy-gke-kubia-default-pool-df743581-mmpz", "name": "kube-proxy-gke-kubia-default-pool-df743581-o6pi", "name": "l7-default-backend-56cb9644f6-d96vp", "name": "metrics-server-v0.3.6-9c5bbf784-wzfnz", "name": "pdcsi-node-9hmch", "name": "pdcsi-node-d8ff7", "name": "pdcsi-node-nfdn8", / # curl -s localhost:8001/api/v1/namespaces/foo/pods | egrep '^ "name":' "name": "test", / # curl -s localhost:8001/api/v1/namespaces/bar/pods | egrep '^ "name":' "name": "test", gcloud> k delete clusterrolebinding view-test clusterrolebinding.rbac.authorization.k8s.io "view-test" deleted gcloud> k create rolebinding view-test --clusterrole=view --serviceaccount=foo:default -n foo rolebinding.rbac.authorization.k8s.io/view-test created / # curl -s localhost:8001/api/v1/pods | grep status "status": "Failure", / # curl -s localhost:8001/api/v1/namespaces/foo/pods | egrep '^ "name":' "name": "test", gcloud> k get clusterrole | grep system:controller | wc -l 30 gcloud> k get clusterrole | grep system:controller | head -5 system:controller:attachdetach-controller 2021-10-21T13:55:05Z system:controller:certificate-controller 2021-10-21T13:55:05Z system:controller:clusterrole-aggregation-controller 2021-10-21T13:55:05Z system:controller:cronjob-controller 2021-10-21T13:55:05Z system:controller:daemon-set-controller 2021-10-21T13:55:05Z gcloud> k get clusterrole | grep system:kube-scheduler system:kube-scheduler 2021-10-21T13:55:05Z -------------------------------- December 8 Chapter 13, p 377 gcloud compute firewall-rules ... See Chapter 5, p 136 -------------------------------- December 9 Chapter 14, p 416 gcloud> ll /sys/fs/cgroup total 0 drwxr-xr-x 15 root root 380 Dec 1 11:50 . drwxr-xr-x 11 root root 0 Dec 1 11:50 .. dr-xr-xr-x 4 root root 0 Dec 1 11:50 blkio lrwxrwxrwx 1 root root 11 Dec 1 11:50 cpu -> cpu,cpuacct lrwxrwxrwx 1 root root 11 Dec 1 11:50 cpuacct -> cpu,cpuacct dr-xr-xr-x 4 root root 0 Dec 1 11:50 cpu,cpuacct dr-xr-xr-x 2 root root 0 Dec 1 11:50 cpuset dr-xr-xr-x 4 root root 0 Dec 1 11:50 devices dr-xr-xr-x 4 root root 0 Dec 1 11:50 freezer dr-xr-xr-x 2 root root 0 Dec 1 11:50 hugetlb dr-xr-xr-x 4 root root 0 Dec 1 11:50 memory lrwxrwxrwx 1 root root 16 Dec 1 11:50 net_cls -> net_cls,net_prio dr-xr-xr-x 2 root root 0 Dec 1 11:50 net_cls,net_prio lrwxrwxrwx 1 root root 16 Dec 1 11:50 net_prio -> net_cls,net_prio dr-xr-xr-x 2 root root 0 Dec 1 11:50 perf_event dr-xr-xr-x 4 root root 0 Dec 1 11:50 pids dr-xr-xr-x 2 root root 0 Dec 1 11:50 rdma dr-xr-xr-x 5 root root 0 Dec 1 11:50 systemd dr-xr-xr-x 5 root root 0 Dec 1 11:50 unified gcloud> k describe quota Name: gke-resource-quotas Namespace: default Resource Used Hard -------- ---- ---- count/ingresses.extensions 0 100 count/ingresses.networking.k8s.io 0 100 count/jobs.batch 1 5k pods 8 1500 services 3 500 gcloud> k get sc NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE fast kubernetes.io/gce-pd Delete Immediate false 34d premium-rwo pd.csi.storage.gke.io Delete WaitForFirstConsumer true 49d standard (default) kubernetes.io/gce-pd Delete Immediate true 49d standard-rwo pd.csi.storage.gke.io Delete WaitForFirstConsumer true 49d p 431 gcloud> k top node NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% gke-kubia-default-pool-df743581-c0lb 54m 5% 709Mi 25% gke-kubia-default-pool-df743581-mmpz 121m 12% 826Mi 29% gke-kubia-default-pool-df743581-o6pi 65m 6% 899Mi 31% gcloud> k top pod NAME CPU(cores) MEMORY(bytes) curl-custom-sa 1m 10Mi kubia-0 0m 7Mi kubia-1 0m 8Mi kubia-2 0m 18Mi kubia-c79c89cc4-j7dwh 0m 9Mi kubia-c79c89cc4-jgh9b 0m 8Mi kubia-c79c89cc4-trrqz 0m 9Mi nginx 0m 3Mi gcloud> k delete po curl-custom-sa pod "curl-custom-sa" deleted gcloud> k delete po nginx pod "nginx" deleted gcloud> k top pod --namespace=default NAME CPU(cores) MEMORY(bytes) kubia-0 0m 7Mi kubia-1 0m 8Mi kubia-2 0m 18Mi kubia-c79c89cc4-j7dwh 0m 9Mi kubia-c79c89cc4-jgh9b 0m 8Mi kubia-c79c89cc4-trrqz 0m 9Mi gcloud> k top no NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% gke-kubia-default-pool-df743581-c0lb 42m 4% 695Mi 24% gke-kubia-default-pool-df743581-mmpz 106m 11% 826Mi 29% gke-kubia-default-pool-df743581-o6pi 56m 5% 898Mi 31% gcloud> k cluster-info Kubernetes control plane is running at https://34.78.120.160 GLBCDefaultBackend is running at https://34.78.120.160/api/v1/namespaces/kube-system/services/default-http-backend:http/proxy KubeDNS is running at https://34.78.120.160/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy Metrics-server is running at https://34.78.120.160/api/v1/namespaces/kube-system/services/https:metrics-server:/proxy To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. -------------------------------- December 10 Chapter 15 p 439 Looking for Heapster pod and service... gcloud> k get po --namespace=kube-system NAME READY STATUS RESTARTS AGE event-exporter-gke-67986489c8-dvv4s 2/2 Running 0 15d fluentbit-gke-br5z6 2/2 Running 2 34d fluentbit-gke-rt2sx 2/2 Running 0 34d fluentbit-gke-v77cg 2/2 Running 0 34d gke-metrics-agent-29cpm 1/1 Running 0 34d gke-metrics-agent-8wkf7 1/1 Running 0 34d gke-metrics-agent-js2dg 1/1 Running 1 34d konnectivity-agent-54d48c955-2zl5x 1/1 Running 0 34d konnectivity-agent-54d48c955-66rts 1/1 Running 0 15d konnectivity-agent-54d48c955-pz6d9 1/1 Running 0 34d konnectivity-agent-autoscaler-6cb774c9cc-vrh7j 1/1 Running 0 15d kube-dns-autoscaler-844c9d9448-vr4sp 1/1 Running 0 15d kube-dns-b4f5c58c7-vsjgj 4/4 Running 0 34d kube-dns-b4f5c58c7-zqrsg 4/4 Running 0 15d kube-proxy-gke-kubia-default-pool-df743581-c0lb 1/1 Running 1 34d kube-proxy-gke-kubia-default-pool-df743581-mmpz 1/1 Running 0 34d kube-proxy-gke-kubia-default-pool-df743581-o6pi 1/1 Running 0 34d l7-default-backend-56cb9644f6-d96vp 1/1 Running 0 34d metrics-server-v0.3.6-9c5bbf784-wzfnz 2/2 Running 0 34d pdcsi-node-9hmch 2/2 Running 2 34d pdcsi-node-d8ff7 2/2 Running 0 34d pdcsi-node-nfdn8 2/2 Running 0 34d gcloud> k get svc --namespace=kube-system NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default-http-backend NodePort 10.116.9.220 80:31486/TCP 49d kube-dns ClusterIP 10.116.0.10 53/UDP,53/TCP 49d metrics-server ClusterIP 10.116.0.209 443/TCP 49d https://kubernetes.io/docs/tasks/debug-application-cluster/resource-usage-monitoring/ https://brancz.com/2018/01/05/prometheus-vs-heapster-vs-kubernetes-metrics-apis/ cAdvisor is still there. gcloud> k get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.116.0.1 443/TCP 45d kubia ClusterIP None 80/TCP 16d kubia-public ClusterIP 10.116.13.103 80/TCP 16d gcloud> k autoscale svc kubia --cpu-percent=30 --min=1 --max=5 error: cannot autoscale a Service: could not find scale subresource for /v1, Resource=services in discovery information gcloud> k get deployment NAME READY UP-TO-DATE AVAILABLE AGE kubia 3/3 3 3 24d gcloud> k autoscale deployment kubia --cpu-percent=30 --min=1 --max=5 horizontalpodautoscaler.autoscaling/kubia autoscaled gcloud> k get hpa NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE kubia Deployment/kubia /30% 1 5 3 2m12s gcloud> k get hpa kubia -o yaml apiVersion: autoscaling/v1 kind: HorizontalPodAutoscaler metadata: annotations: autoscaling.alpha.kubernetes.io/conditions: '[{"type":"AbleToScale","status":"True","lastTransitionTime":"2021-12-10T09:18:02Z","reason":"SucceededGetScale","message":"the HPA controller was able to get the target''s current scale"},{"type":"ScalingActive","status":"False","lastTransitionTime":"2021-12-10T09:18:02Z","reason":"FailedGetResourceMetric","message":"the HPA was unable to compute the replica count: failed to get cpu utilization: missing request for cpu"}]' ... spec: maxReplicas: 5 minReplicas: 1 scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: kubia targetCPUUtilizationPercentage: 30 status: currentReplicas: 3 desiredReplicas: 0 gcloud> k get hpa.v2beta1.autoscaling kubia -o yaml apiVersion: autoscaling/v2beta1 kind: HorizontalPodAutoscaler metadata: creationTimestamp: "2021-12-10T09:17:46Z" ... status: conditions: - lastTransitionTime: "2021-12-10T09:18:02Z" message: the HPA controller was able to get the target's current scale reason: SucceededGetScale status: "True" type: AbleToScale - lastTransitionTime: "2021-12-10T09:18:02Z" message: 'the HPA was unable to compute the replica count: failed to get cpu utilization: missing request for cpu' reason: FailedGetResourceMetric status: "False" type: ScalingActive currentMetrics: null currentReplicas: 3 desiredReplicas: 0 gcloud> k edit deployment kubia Waiting for Emacs... error: deployments.apps "kubia" is invalid Waiting for Emacs... deployment.apps/kubia edited I added the requests as in https://github.com/luksa/kubernetes-in-action/blob/master/Chapter15/deployment.yaml spec: ... template: ... spec: containers: - image: marcsf/kubia:v2 imagePullPolicy: IfNotPresent name: nodejs resources: requests: cpu: 100m ... # deployments.apps "kubia" was not valid: # * : Invalid value: "The edited file failed validation": [yaml: line 41: found character that cannot start any token, invalid character 'a' looking for beginning of value] Just untabify... But this didn't solved the issue: gcloud> k get hpa.v2beta1.autoscaling kubia -o yaml | perl -nle '$y++ if /lastTransitionTime/;print if $y>1 and /^ /' message: 'the HPA was unable to compute the replica count: failed to get cpu utilization: missing request for cpu' reason: FailedGetResourceMetric status: "False" type: ScalingActive gcloud> k get deployment NAME READY UP-TO-DATE AVAILABLE AGE kubia 3/3 3 3 24d gcloud> k get deployment kubia -o yaml | perl -nle '$y++ if /resources:/;print if $y>2 and /^ /' requests: cpu: 100m Probably 'edit' is too late... Needed to tweak the yaml a bit (apiVersion + selector:...) gcloud> k delete deployment kubia deployment.apps "kubia" deleted gcloud> k create -f deployment.yaml deployment.apps/kubia created gcloud> k get deployment NAME READY UP-TO-DATE AVAILABLE AGE kubia 0/3 3 0 13s gcloud> k get deployment NAME READY UP-TO-DATE AVAILABLE AGE kubia 3/3 3 3 59s gcloud> k get hpa.v2beta1.autoscaling kubia -o yaml | perl -nle '$y++ if /lastTransitionTime/;print if $y>1 and /^ /' message: 'the HPA was unable to compute the replica count: failed to get cpu utilization: missing request for cpu' reason: FailedGetResourceMetric status: "False" type: ScalingActive OK, need to enable something else... gcloud> k describe hpa | tail -12 Conditions: Type Status Reason Message ---- ------ ------ ------- AbleToScale True SucceededGetScale the HPA controller was able to get the target's current scale ScalingActive False FailedGetResourceMetric the HPA was unable to compute the replica count: failed to get cpu utilization: missing request for cpu Events: Type Reason Age From Message ---- ------ ---- ---- ------- Warning FailedGetResourceMetric 37m (x8 over 39m) horizontal-pod-autoscaler failed to get cpu utilization: missing request for cpu Warning FailedComputeMetricsReplicas 37m (x8 over 39m) horizontal-pod-autoscaler failed to compute desired number of replicas based on listed metrics for Deployment/default/kubia: invalid metrics (1 invalid out of 1), first error is: failed to get cpu utilization: missing request for cpu Warning FailedGetScale 8m46s (x25 over 15m) horizontal-pod-autoscaler deployments/scale.apps "kubia" not found Warning FailedGetResourceMetric 3m51s (x111 over 39m) horizontal-pod-autoscaler missing request for cpu gcloud> k get deployment kubia -o yaml | perl -nle '$y++ if /resources:/;print if $y>1 and /^ /' requests: cpu: 100m gcloud> k delete svc kubia service "kubia" deleted gcloud> k expose deployment kubia --port=80 --target-port=8080 service/kubia exposed ~> watch -n 1 kubectl get hpa,deployment gcloud> k run -it --rm --restart=Never loadgenerator --image=busybox -- sh -c "while true; do wget -O - -q http://kubia.default; done" If you don't see a command prompt, try pressing enter. wget: error getting response You've hit kubia-2 Data stored in the cluster: wget: error getting response This is v1 running in pod kubia-5bb46d6998-5fn4l This is v1 running in pod kubia-5bb46d6998-5fn4l You've hit kubia-1 Data stored in the cluster: C-c C-c^Cpod "loadgenerator" deleted pod default/loadgenerator terminated (Error) Otherwise, no effect... Neither in the 'watch' output, nor in: gcloud> k describe hpa | tail -12 Conditions: Type Status Reason Message ---- ------ ------ ------- AbleToScale True SucceededGetScale the HPA controller was able to get the target's current scale ScalingActive False FailedGetResourceMetric the HPA was unable to compute the replica count: failed to get cpu utilization: missing request for cpu Events: Type Reason Age From Message ---- ------ ---- ---- ------- Warning FailedGetResourceMetric 59m (x8 over 61m) horizontal-pod-autoscaler failed to get cpu utilization: missing request for cpu Warning FailedComputeMetricsReplicas 59m (x8 over 61m) horizontal-pod-autoscaler failed to compute desired number of replicas based on listed metrics for Deployment/default/kubia: invalid metrics (1 invalid out of 1), first error is: failed to get cpu utilization: missing request for cpu Warning FailedGetScale 31m (x25 over 37m) horizontal-pod-autoscaler deployments/scale.apps "kubia" not found Warning FailedGetResourceMetric 98s (x207 over 61m) horizontal-pod-autoscaler missing request for cpu gcloud> k delete hpa kubia horizontalpodautoscaler.autoscaling "kubia" deleted gcloud> k autoscale deployment kubia --cpu-percent=30 --min=1 --max=5 horizontalpodautoscaler.autoscaling/kubia autoscaled gcloud> k edit hpa kubia Waiting for Emacs... horizontalpodautoscaler.autoscaling/kubia edited gcloud> k get hpa NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE kubia Deployment/kubia /60% 1 5 3 4m13s gcloud> k get po --namespace=kube-system | grep auto konnectivity-agent-autoscaler-6cb774c9cc-vrh7j 1/1 Running 0 15d kube-dns-autoscaler-844c9d9448-vr4sp 1/1 Running 0 15d gcloud> k get po konnectivity-agent-autoscaler-6cb774c9cc-vrh7j --namespace=kube-system NAME READY STATUS RESTARTS AGE konnectivity-agent-autoscaler-6cb774c9cc-vrh7j 1/1 Running 0 15d -------------------------------- December 11 Chapter 16 16.1.1 p 458 gcloud> k describe no master.k8s Error from server (NotFound): nodes "master.k8s" not found gcloud> k get no NAME STATUS ROLES AGE VERSION gke-kubia-default-pool-df743581-c0lb Ready 35d v1.20.10-gke.1600 gke-kubia-default-pool-df743581-mmpz Ready 35d v1.20.10-gke.1600 gke-kubia-default-pool-df743581-o6pi Ready 35d v1.20.10-gke.1600 gcloud> k describe no gke-kubia-default-pool-df743581-c0lb | grep -A5 ^Taints: Taints: Unschedulable: false Lease: HolderIdentity: gke-kubia-default-pool-df743581-c0lb AcquireTime: RenewTime: Sat, 11 Dec 2021 10:22:23 +0000 gcloud> for n in $(k get po --namespace=kube-system -o name); do echo $n: $(k describe $n --namespace=kube-system | egrep ^Tol); done pod/event-exporter-gke-67986489c8-dvv4s: Tolerations: components.gke.io/gke-managed-components op=Exists pod/fluentbit-gke-29vbl: Tolerations: :NoExecute op=Exists pod/fluentbit-gke-5hp6p: Tolerations: :NoExecute op=Exists pod/fluentbit-gke-nphl6: Tolerations: :NoExecute op=Exists pod/gke-metrics-agent-29cpm: Tolerations: :NoExecute op=Exists pod/gke-metrics-agent-8wkf7: Tolerations: :NoExecute op=Exists pod/gke-metrics-agent-js2dg: Tolerations: :NoExecute op=Exists pod/konnectivity-agent-54d48c955-2zl5x: Tolerations: CriticalAddonsOnly op=Exists pod/konnectivity-agent-54d48c955-66rts: Tolerations: CriticalAddonsOnly op=Exists pod/konnectivity-agent-54d48c955-pz6d9: Tolerations: CriticalAddonsOnly op=Exists pod/konnectivity-agent-autoscaler-6cb774c9cc-vrh7j: Tolerations: CriticalAddonsOnly op=Exists pod/kube-dns-autoscaler-844c9d9448-vr4sp: Tolerations: CriticalAddonsOnly op=Exists pod/kube-dns-b4f5c58c7-vsjgj: Tolerations: CriticalAddonsOnly op=Exists pod/kube-dns-b4f5c58c7-zqrsg: Tolerations: CriticalAddonsOnly op=Exists pod/kube-proxy-gke-kubia-default-pool-df743581-c0lb: Tolerations: :NoExecute op=Exists pod/kube-proxy-gke-kubia-default-pool-df743581-mmpz: Tolerations: :NoExecute op=Exists pod/kube-proxy-gke-kubia-default-pool-df743581-o6pi: Tolerations: :NoExecute op=Exists pod/l7-default-backend-56cb9644f6-d96vp: Tolerations: components.gke.io/gke-managed-components op=Exists pod/metrics-server-v0.3.6-9c5bbf784-wzfnz: Tolerations: CriticalAddonsOnly op=Exists pod/pdcsi-node-jrrtg: Tolerations: :NoSchedule op=Exists pod/pdcsi-node-mgjl5: Tolerations: :NoSchedule op=Exists pod/pdcsi-node-z5tzf: Tolerations: :NoSchedule op=Exists gcloud> k describe pod/pdcsi-node-z5tzf -n kube-system | egrep -A20 ^Toleration Tolerations: :NoSchedule op=Exists :NoExecute op=Exists CriticalAddonsOnly op=Exists components.gke.io/gke-managed-components op=Exists node.kubernetes.io/disk-pressure:NoSchedule op=Exists node.kubernetes.io/memory-pressure:NoSchedule op=Exists node.kubernetes.io/network-unavailable:NoSchedule op=Exists node.kubernetes.io/not-ready:NoExecute op=Exists node.kubernetes.io/pid-pressure:NoSchedule op=Exists node.kubernetes.io/unreachable:NoExecute op=Exists node.kubernetes.io/unschedulable:NoSchedule op=Exists Events: gcloud> k describe pod/kube-proxy-gke-kubia-default-pool-df743581-c0lb -n kube-system | egrep -A20 ^Toleration Tolerations: :NoExecute op=Exists :NoSchedule op=Exists Events: gcloud> k describe no gke-kubia-default-pool-df743581-c0lb | perl -nle '$f-- if $f and !/^ /;$f++ if /^Labels:/; print if $f' Labels: beta.kubernetes.io/arch=amd64 beta.kubernetes.io/instance-type=e2-medium beta.kubernetes.io/os=linux cloud.google.com/gke-boot-disk=pd-standard cloud.google.com/gke-container-runtime=containerd cloud.google.com/gke-nodepool=default-pool cloud.google.com/gke-os-distribution=cos cloud.google.com/machine-family=e2 failure-domain.beta.kubernetes.io/region=europe-west1 failure-domain.beta.kubernetes.io/zone=europe-west1-d kubernetes.io/arch=amd64 kubernetes.io/hostname=gke-kubia-default-pool-df743581-c0lb kubernetes.io/os=linux node.kubernetes.io/instance-type=e2-medium topology.gke.io/zone=europe-west1-d topology.kubernetes.io/region=europe-west1 topology.kubernetes.io/zone=europe-west1-d gcloud> for n in $(k get po -o name); do k describe $n | egrep '\baffinity:' && echo $n; done gcloud> -------------------------------- July 10 -- reading the appendices of the kuba book After the GKE environment has been deleted long ago $ k config get-contexts CURRENT NAME CLUSTER AUTHINFO NAMESPACE * gke_dev-copilot-329712_europe-west1-d_kubia gke_dev-copilot-329712_europe-west1-d_kubia gke_dev-copilot-329712_europe-west1-d_kubia $ k config get-clusters NAME gke_dev-copilot-329712_europe-west1-d_kubia I don't delete them (at least now)