Delete k8 false apiservice – namespace

kubectl api-resources 
# look for which apiservice is giving error

kubectl get apiservice
# look for which False and MissingEndpoints

kubectl delete apiservice <service-name>

kubectl api-resources

# get CRD related to api-resources
kubectl get crds | grep cilium

kubectl delete crd ciliumnodes.cilium.io

https://github.com/helm/helm/issues/6361#issuecomment-538220109

  • delete namcespace
NAMESPACE=your_namespace
kubectl proxy &
kubectl get namespace $NAMESPACE -o json |jq '.spec = {"finalizers":[]}' >temp.json
curl -k -H "Content-Type: application/json" -X PUT --data-binary @temp.json 127.0.0.1:8001/api/v1/namespaces/$NAMESPACE/finalize

https://github.com/helm/helm/issues/6361

  • Delete rook-ceph namespace
kubectl -n rook-ceph patch cephclusters.ceph.rook.io rook-ceph -p '{"metadata":{"finalizers": []}}' --type=merge

kubectl api-resources --verbs=list --namespaced -o name  | xargs -n 1 kubectl get --show-kind --ignore-not-found -n rook-ceph

More : https://github.com/rook/rook/issues/2668

aws eks get k8 token kubectl

Note : If we creates eks cluster from UI it’s creates with different user and gives error when we do kubectl get pod

aws eks get-token  --cluster-name eks1
aws eks update-kubeconfig --name eks1
aws sts get-caller-identity
aws sts assume-role --role-arn "arn:aws:iam::1111111111:role/role-name" --role-session-name "tests3"
aws --profile=default eks update-kubeconfig --name eks1
aws eks create-cluster \
   --region ap-south-1 \
   --name eks1 \
   --kubernetes-version 1.20 \
   --role-arn arn:aws:iam::account_number:role/eks1-clst \
   --resources-vpc-config subnetIds=subnet-093a2ddfcb7bc30b1,subnet-0475d9e26dfdc9d00,subnet-0274975b4af3513ee
aws eks describe-cluster \
    --region ap-south-1 \
    --name eks1 \
    --query "cluster.status"

https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html

always check the minimum version of aws cli for eks

https://stackoverflow.com/questions/50791303/kubectl-error-you-must-be-logged-in-to-the-server-unauthorized-when-accessing

https://aws.amazon.com/premiumsupport/knowledge-center/eks-api-server-unauthorized-error/

Pod affinity, readiness, liveness in kubernetes

why:

pod affinity: Attracts pods with with matching label.
readiness : checks pod health before sending any traffic
liveness : checks health of pod

kubectl get nodes --show-labels

kubectl label nodes <node-name> <label-key>=<label-value>

kubectl label nodes lp-knode-02 disk=ssd
kubectl label nodes lp-knode-02 nodename=lp-knode-02
apiVersion: apps/v1
kind: Deployment
metadata:
  name: httpd-affinity-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: httpd-affinity
  template:
    metadata:
      name: httpd-affinity-deployment
      labels:
        app: httpd-affinity
        env: prod
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: disk
                operator: In
                values:
                - ssd
      containers:        
      - name: httpd-node-affinity
        image: httpd
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "256Mi"
            cpu: "100m"
          limits:
            memory: "256Mi"
            cpu: "100m"
        ports:
        - name: httpd-port
          containerPort: 80
        livenessProbe:
          httpGet:
            path: /index.html
            port: 80
            httpHeaders:
            - name: Custom-Header
              value: custom1
          initialDelaySeconds: 10
          periodSeconds: 10
          timeoutSeconds: 5
          failureThreshold: 3
        readinessProbe:
          exec:
            command:
            - cat
            - /usr/local/apache2/htdocs/index.html
          initialDelaySeconds: 10
          periodSeconds: 10

More :
https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/

Install Linkerd in kubernetes

-Install linkerd

curl -sL https://run.linkerd.io/install | sh
export PATH=$PATH:$HOME/.linkerd2/bin
echo "export PATH=$PATH:$HOME/.linkerd2/bin
" > ~/.bashrc
linkerd version
linkerd check --pre

-Install linkerd on kubernetes

linkerd install | kubectl apply -f -

#It will take some time to apply

kubectl -n linkerd get deploy

– Linkerd dashboard

update linkerd-web deployment and add your host ip(eg. 192.168.0.183)

 containers:
      - args:
        - -api-addr=linkerd-controller-api.linkerd.svc.cluster.local:8085
        - -grafana-addr=linkerd-grafana.linkerd.svc.cluster.local:3000
        - -controller-namespace=linkerd
        - -log-level=info
        - -enforced-host=^(192\.168\.0\.183|localhost|127\.0\.0\.1|linkerd-web\.linkerd\.svc\.cluster\.local|linkerd-web\.linkerd\.svc|\[::1\])(:\d+)?$

– update Linkerd service to NodePort

  • Inject linkerd
# Inject all the deployments in the default namespace.
kubectl get deploy -o yaml | linkerd inject - | kubectl apply -f -

adds a linkerd.io/inject: enabled annotation

Install metric server in kubernetes

WHY?
– Get node CPU/RAM usages
– Can create Horizontal Pod Autoscaler (HPA)
– Light weight

git clone https://github.com/kubernetes-sigs/metrics-server.git
  • Edit metrics-server/manifests/base/deployment.yaml and add below lines to args
args:
          - --cert-dir=/tmp
          - --secure-port=4443
          - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP
          - --kubelet-use-node-status-port #Deprecated metrics-server:v0.3.7
          - --kubelet-insecure-tls
kubectl apply -f metrics-server/manifests/base
  • To get node metrics run kubectl get top node

Elastic APM monitoring for javascript app on kubernetes

1.apm-server.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: apm-deployment
  labels:
    app: apm-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: apm-deployment
  template:
    metadata:
      labels:
        app: apm-deployment
        env: prod
    spec: 
      containers:
        - name: apm-deployment
          image: "elastic/apm-server:7.9.0"
          imagePullPolicy: IfNotPresent
          env:
          - name: REGISTRY_STORAGE_DELETE_ENABLED
            value: "true"
          volumeMounts:
          - name: apm-server-config
            mountPath: /usr/share/apm-server/apm-server.yml
            subPath: apm-server.yml    
          ports:
            - containerPort: 8200
      volumes:
        - name: apm-server-config
          configMap:
            name: apm-server-config


---
kind: Service
apiVersion: v1
metadata:
  name: apm-deployment-svc
  labels:
    app: apm-deployment-svc
spec:
  type: NodePort
  ports:
    - name: http
      port: 8200
      protocol: TCP
      nodePort: 30010
  selector:
    app: apm-deployment

---
apiVersion: v1
kind: ConfigMap
metadata:
  name: apm-server-config
  labels:
    app: apm-server
data:
  apm-server.yml: |-
    apm-server:
      host: "0.0.0.0:8200"
      rum:
        enabled: true  
    output.elasticsearch:
      hosts: elasticsearch-service:9200

Note:
1. Replace elasticsearch host as per your config
2. Only RUM js module is enabled

2. Add below code to your js file which is called in everyfile for eg. index.html

<script src="elastic-apm-rum.umd.min.js" crossorigin></script>
<script>
  elasticApm.init({
    serviceName: 'test-app1',
    serverUrl: 'http://192.168.0.183:30010',
  })
</script>

<body>
    This is test-app1
</body>

Note:
1. Replace serverUrl
2. Download elastic-apm-rum.umd.min.js from github

3. Kibana dashboard for APM

We can also monitor other languages apps performance

prometheus blackbox exporter in Kubernetes

prometheus-blackbox.yml

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: prometheus-blackbox-exporter
  labels:
    app: prometheus-blackbox-exporter
spec:
  replicas: 1
  selector:
    matchLabels:
      app: prometheus-blackbox-exporter
  template:
    metadata:
      labels:
        app: prometheus-blackbox-exporter
    spec:
      restartPolicy: Always
      containers:
        - name: blackbox-exporter
          image: "prom/blackbox-exporter:v0.15.1"
          imagePullPolicy: IfNotPresent
          args:
            - "--config.file=/config/blackbox.yaml"
          ports:
            - containerPort: 9115
          volumeMounts:
            - mountPath: /config
              name: prometheus-config
      volumes:
        - name: prometheus-config
          configMap:
            name: prometheus-blackbox-exporter

---
kind: Service
apiVersion: v1
metadata:
  name: prometheus-blackbox-exporter
  labels:
    app: prometheus-blackbox-exporter
spec:
  type: ClusterIP
  ports:
    - name: http
      port: 9115
      protocol: TCP
  selector:
    app: prometheus-blackbox-exporter

---

apiVersion: v1
kind: ConfigMap
metadata:
  name: prometheus-blackbox-exporter
  labels:
    app: prometheus-blackbox-exporter
data:
  blackbox.yaml: |
    modules:
      http_2xx:
        http:
          no_follow_redirects: false
          preferred_ip_protocol: ip4
          valid_http_versions:
          - HTTP/1.1
          - HTTP/2
          valid_status_codes: []
        prober: http
        timeout: 5s

2. in prometheus update prometheus.yml file as below

3. Prometheus query

probe_http_status_code{job="web1"}

Rancher proxy rule in httpd with websocket secure (wss)

<VirtualHost *:80>
	ServerName rancher.initedit.com
	Redirect permanent / https://rancher.initedit.com/
	RewriteEngine on
	RewriteCond %{SERVER_NAME} =rancher.initedit.com [OR]
	RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [END,QSA,R=permanent]
</VirtualHost>

<VirtualHost *:443>
    ServerName rancher.initedit.com
    AllowEncodedSlashes on
    SSLEngine On
    SSLProxyEngine On
    RewriteEngine on
    SSLProxyVerify none
    SSLProxyCheckPeerCN off
    SSLProxyCheckPeerName off
    SSLProxyCheckPeerExpire off
    RequestHeader set X-Forwarded-Proto "https"
    RewriteCond %{HTTP:Upgrade} =websocket [NC]
    RewriteRule /(.*)   wss://192.168.0.183:8443/$1 [P,L]
    RewriteCond %{HTTP:Upgrade} !=websocket [NC]
    RewriteRule /(.*)   https://192.168.0.183:8443/$1 [P,L]
    ProxyPassReverse / https://192.168.0.183:8443/
    ProxyPreserveHost On
    SSLCertificateFile /etc/letsencrypt/live/rancher.initedit.com/cert.pem
    SSLCertificateKeyFile /etc/letsencrypt/live/rancher.initedit.com/privkey.pem
    SSLCertificateChainFile /etc/letsencrypt/live/rancher.initedit.com/fullchain.pem
    Include /etc/letsencrypt/options-ssl-apache.conf
</VirtualHost>

More info : https://stackoverflow.com/questions/27526281/websockets-and-apache-proxy-how-to-configure-mod-proxy-wstunnel