kubectl custom query for CPU Memory Request Limit

kubectl custom query to get cpu , memory request and limit

kubectl get deploy -A -o=custom-columns='Namespace:.metadata.namespace,Name:.metadata.name,Request_CPU:.spec.template.spec.containers[0].resources.requests.cpu,Limit_CPU:.spec.template.spec.containers[0].resources.limits.cpu,Request_Memory:.spec.template.spec.containers[0].resources.requests.memory,Limit_Memory:.spec.template.spec.containers[0].resources.limits.memory' | sed 1d | tr -s '[:blank:]' ','

kubect replace variabe

deployments=$(kubectl get deploy | awk '{print $1}' | sed 1d)

for deploy in $deployments
do
    deploy_raw_yml=$(kubectl get deploy $deploy -o yaml)
    kubectl get deploy $deploy -o yaml > _tmp_store.yml
    value_to_be_replaced=$(kubectl get deploy $deploy -o yaml | grep -A 1 'NEW_RELIC_APP_NAME' | grep value | awk -F 'value: ' '{print $2}')
    echo "value_to_be_replaced: $value_to_be_replaced"
    if [[ $value_to_be_replaced == "" ]]; then
        echo "=====================$deploy no change =========================="
    else
        replaced_value=$(echo $value_to_be_replaced | sed 's/stage/perf/g')
         echo "replaced_value: $replaced_value"
        cat _tmp_store.yml| sed "s/$value_to_be_replaced/$replaced_value/g"  | kubectl apply -f -
        echo "=====================$deploy done =========================="
    fi
    
done

In Place Pod VerticalScaling in k8

  • Enable feature gate InPlacePodVerticalScaling

/etc/kubernetes/manifests/kube-apiserver.yaml

nginx.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: nginx
  name: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: nginx
    spec:
      containers:
      - image: nginx
        name: nginx
        resizePolicy:
        - resourceName: cpu
          restartPolicy: NotRequired
        - resourceName: memory
          restartPolicy: NotRequired
        resources:
          limits:
            memory: "100Mi"
            cpu: "100m"
          requests:
            memory: "100Mi"
            cpu: "100m"

patch.yml [only applied to pod]

kubectl patch pod nginx-94675f6cf-9bxzj --patch '{"spec":{"containers":[{"name":"nginx", "resources":{"requests":{"cpu":"200m"}, "limits":{"cpu":"200m"}}}]}}'

rancher desktop – libraries: libbz2.so.1.0: cannot open shared object

  • look for libbz2.so.1 file on your system
sudo find / -name libbz2.so.1
  • list the file to get the correct version
[root@fedora]# ls /usr/lib64/libbz2.so.1.0
libbz2.so.1.0    libbz2.so.1.0.8  
  • create symlink
ln -s /usr/lib64/libbz2.so.1.0.8 /usr/lib64/libbz2.so.1.0

Error on fedora:

libraries: libbz2.so.1.0: cannot open shared object file: No such file or directory\\n\”: exit status 127″\n’

kubernetes deployment scale up/down with bash

scale down deploy on weeknend:

####scale down####
namespaces="test,test2"
IFS=","

for namespace in $namespaces
do
    deployments=$(kubectl get deploy -n $namespace | grep -v '0/0' | awk '{print $1}' | sed 1d | tr '\n' ' ')
    IFS=" "
    for deploy in $deployments
    do
        replicas="$(kubectl get deploy $deploy -o=custom-columns='REPLICAS:spec.replicas' -n $namespace | sed 1d | tr '\n' ' ')"
        echo "namespace: $namespace deploy: $deploy replicas: $replicas"
        kubectl label deploy $deploy weekdays-replicas=$replicas -n $namespace --overwrite=true
        kubectl scale --replicas=0 statefulset $deploy -n "$namespace" || true
    done
done

scale-up:

####scale up####
namespaces="test,test2"
IFS=","
for namespace in $namespaces
do
    deployments=$(kubectl get deploy -n $namespace | awk '{print $1}' | sed 1d | tr '\n' ' ')
    IFS=" "
    for deploy in $deployments
    do
        replicas="$(kubectl get deploy $deploy -o=custom-columns='REPLICAS:metadata.labels.weekdays-replicas' -n $namespace | sed 1d | tr '\n' ' ')"
        echo "kubectl scale --replicas=$replicas statefulset $deploy -n "$namespace" || true"
    done
done

Read the secrets data from etcd of kubernetes

  • Find out etcd procecss id
ps -ef | grep etcd
  • Go to process directory of ectd
cd /proc/2626577/fd
  • List the files and look for “/var/lib/etcd/member/snap/db
ls -ltr | grep db
  • To read any secret that is currently created by user in k8
#create secret

kubectl create secret generic secret1 --from-literal=secretname=helloworld

#read secret directly from etcd

cat /var/lib/etcd/member/snap/db | strings | grep secret1 -C 10

Encrypting Secret Data at Rest https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/

https://jonathan18186.medium.com/certified-kubernetes-security-specialist-cks-preparation-part-8-runtime-security-system-9f705872c17

CKS Practice questions 2023

  • Create runtimeclass named sandboxed with handler runsc and run new pod using runtime as sandboxed with image nginx.
  • Set min TLS version to VersionTLS12 and cipher to TLS_AES_128_GCM_SHA256 for Kubelet nad kubeapi server
  • etcd with –-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
  • Node-authrization to minimize the cluster role and remove clusterrole and anonymous access.
  • ImagePolicyWebhook with default deny add correct app endpoint url in kubeconfig file
  • auditing with maxage=10, rotate=5
  • falco runtime format %evt,%user.name,%user.id,%proc.name
  • network policy default deny, pod with name and namespace selector
  • create service account bind with role/clusterrole binding and create a pod, delete unsed sa
  • create a secret and mount to pod with readonly
  • Create service account with automounttoken off
  • create a pod with /root/profile using apparmor. podname=xyz, image=nginx
  • analyse 2 issues in Dockerfile and Deployment file
  • scan image with trivy and delete critical severity pod
  • fix kube-bench report for kube-api , kubelet, kube-controler
  • Upgrade k8 cluster from 1.25.4 to 1.26.0

Get list of deploy with node selector kubernetes

#!/bin/bash

u_time=$(date +%s)

list_of_namespaces=$(kubectl get ns | awk '{print $1}' | sed 1d)
worker_node_list=$(kubectl get nodes --label-columns beta.kubernetes.io/instance-type --label-columns karpenter.sh/capacity-type -l role=worker | awk '{print $1}' | sed 1d)

for namespace in $list_of_namespaces
do
    echo "================$namespace=================="
    list_of_deploy=$(kubectl get deploy -n $namespace | awk '{print $1}' | sed 1d)

    for deploy in $list_of_deploy
    do
        if [ "$(kubectl get deploy $deploy -n $namespace -o yaml | grep nodeSelector -A 1 | grep role | awk '{print $NF}')" = "worker-arm64" ]
        then
            echo "$deploy,$namespace,true"
            echo "$deploy,$namespace,true" >> prod_deploy_arm64_$u_time.list
        else
            echo "$deploy,$namespace,false"
            echo "$deploy,$namespace,false" >> prod_deploy_arm64_$u_time.list
        fi
    done
done

Helm chart custom values

nginx-chart-files/
├── index.yaml
├── nginx-0.1.0.tgz
└── nginx-0.2.0.tgz

Generate manifest:

helm template ./direcotry  -f values.yaml --output-dir output_dir

helm template ./direcotry -f values.yml

#render in stdout

helm template ./direcotry  -f values.yaml --dry-run 
  • Create helm package (.gz file)
helm package ./direcotry_path
apiVersion: {{ template "controller.apiVersion" . }}
kind: {{ .Values.controller.kind }}
metadata:
  labels:
{{ $labels | indent 4 }}
  name: {{ $name }}
  namespace: {{ $.Release.Namespace }}
---
    spec:
    {{- with .Values.controller.hostAliases }}
      hostAliases:
{{ toYaml . | indent 8 }}
    {{- end }}
---

values.yml

controller:
  create: true
  kind: Deployment


---
  terminationGracePeriodSeconds: 30
  hostAliases:
  - hostnames:
    - example.com
    ip: 127.0.0.1

index.html

apiVersion: v1
entries:
  nginx:
  - apiVersion: v2
    appVersion: 1.16.0
    created: "2021-07-03T21:59:00.34571153-04:00"
    digest: b22a325b03c8e88b6a6a8d1a8e79f5d0498813855174a983426466b6de5a5f71
    maintainers:
    - email: [email protected]
      name: John Smith
    name: nginx
    type: application
    urls:
    - https://example.com/charts/nginx-0.1.0.tgz
    version: 0.1.0
  - apiVersion: v2
    appVersion: 1.17.0
    created: "2021-07-03T21:59:00.34571153-04:00"
    digest: b22a325b03c8e88b6a6a8d1a8e79f5d0498813855174a983426466b6de5a5f71
    maintainers:
    - email: [email protected]
      name: John Smith
    name: nginx
    type: application
    urls:
    - https://example.com/charts/nginx-0.2.0.tgz
    version: 0.2.0

https://kodekloud.com/blog/uploading-a-helm-chart/

Backup and Restore etcd snapshot for Kubernetes

  1. Create a deployment to verify the restore in the end
k create deploy nginx-test --image=nginx

2. Update the cert path as per /etc/kubernetes/mainifest/etcd.yaml

ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 \
--cacert=<trusted-ca-file> \
--cert=<cert-file> --key=<key-file> \
  snapshot save /tmp/etcd.backup

2. Stop kubelet

systemctl stop kubelet

3. Stop kube-api and etcd

mv /etc/kubernetes/manifests/kube-apiserver.yaml /root/
mv /etc/kubernetes/manifests/etcd.yaml /root/

4. Restore the etcd.backup

ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2379 snapshot restore etcd.backup

It will create “default.etcd” directory in current directory

[root@lp-k8control-1 etcd]# ls default.etcd/
member

5. Look at /etc/kubernetes/manifests/etcd.yaml etcd-data (/var/lib/etcd) directory path

[root@lp-k8control-1 default.etcd]# ls /var/lib/etcd
member

6. Copy member directory content from default.etcd to /var/lib/etcd

7. Start kube-api and etcd

mv /root/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml
mv /root/etcd.yaml /etc/kubernetes/manifests/etcd.yaml

8. Restart kubelet service

systemctl restart kubelet

9. Verify if nginx deployment we created in step 1 is restored

k get deploy

Grafana https behind nginx controller

if grafana is running on https and you do not add below setting it will give HTTP ERROR 400

nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
  • Also check grafana.ini aur default.ini for root_url
root_url = https://grafana.example.com
  • Also check the liveness and readiness probe scheme

https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#backend-protocol

https://stackoverflow.com/questions/54459015/how-to-configure-ingress-to-direct-traffic-to-an-https-backend-using-https

  • grafana helm values.yaml
image:
  repository: grafana/grafana
  tag: 8.5.2
persistence:
  enabled: true
  type: statefulset
resources:
 limits:
   cpu: 500m
   memory: 512Mi
 requests:
   cpu: 500m
   memory: 512Mi

grafana.ini:
  app_mode: test
  paths:
    data: /var/lib/grafana/
    logs: /var/log/grafana
    plugins: /var/lib/grafana/plugins
    provisioning: /etc/grafana/provisioning
    temp_data_lifetime: 24h
  server:
    protocol: https
    domain: grafana.example.com
    enforce_domain: false
    root_url: https://grafana.example.com/
    serve_from_sub_path: false
    cert_file: /var/lib/grafana/cert/dev.crt
    cert_key: /var/lib/grafana/cert/dev.key
  database:
    type: postgres
    host: database-hostname
    name: grafana
    user: grafana
    password: ${grafana_eks_postgres_password}
    ssl_mode: disable
  security:
    cookie_secure: true
    allow_embedding: true
    strict_transport_security: true
    strict_transport_security_max_age_seconds: 31536000
    strict_transport_security_preload: true
    strict_transport_security_subdomains: true
    x_content_type_options: true
    x_xss_protection: true
  smtp:
    enabled: true
    host: email-smtp.ap-south-1.amazonaws.com:25
    user: ${grafana_smtp_username}
    password: ${grafana_smtp_password}
    #skip_verify = true
    from_address: [email protected]
    from_name: test

readinessProbe:
  httpGet:
    path: /api/health
    port: 3000
    scheme: HTTPS
livenessProbe:
  httpGet:
    path: /api/health
    port: 3000
    scheme: HTTPS
  initialDelaySeconds: 60
  timeoutSeconds: 30
  failureThreshold: 10

ingress:
  enabled: true
  annotations:
    kubernetes.io/ingress.class: nginx
    nginx.ingress.kubernetes.io/proxy-connect-timeout: "600"
    nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
    nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
    route53mapper: enabled
    nginx.ingress.kubernetes.io/backend-protocol: HTTPS
  labels:
    app: grafana
  path: /
  pathType: ImplementationSpecific
  hosts:
    - grafana.example.com
helm upgrade --install grafana grafana/grafana --version 6.32.6 --set image.tag=8.5.2 --set persistence.enabled=true --set persistence.type=statefulset --set persistence.storageClassName=gp2
helm upgrade --install grafana grafana/grafana --version 6.32.6 -f values.taml