Elastic APM monitoring for javascript app on kubernetes

1.apm-server.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: apm-deployment
  labels:
    app: apm-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: apm-deployment
  template:
    metadata:
      labels:
        app: apm-deployment
        env: prod
    spec: 
      containers:
        - name: apm-deployment
          image: "elastic/apm-server:7.9.0"
          imagePullPolicy: IfNotPresent
          env:
          - name: REGISTRY_STORAGE_DELETE_ENABLED
            value: "true"
          volumeMounts:
          - name: apm-server-config
            mountPath: /usr/share/apm-server/apm-server.yml
            subPath: apm-server.yml    
          ports:
            - containerPort: 8200
      volumes:
        - name: apm-server-config
          configMap:
            name: apm-server-config


---
kind: Service
apiVersion: v1
metadata:
  name: apm-deployment-svc
  labels:
    app: apm-deployment-svc
spec:
  type: NodePort
  ports:
    - name: http
      port: 8200
      protocol: TCP
      nodePort: 30010
  selector:
    app: apm-deployment

---
apiVersion: v1
kind: ConfigMap
metadata:
  name: apm-server-config
  labels:
    app: apm-server
data:
  apm-server.yml: |-
    apm-server:
      host: "0.0.0.0:8200"
      rum:
        enabled: true  
    output.elasticsearch:
      hosts: elasticsearch-service:9200

Note:
1. Replace elasticsearch host as per your config
2. Only RUM js module is enabled

2. Add below code to your js file which is called in everyfile for eg. index.html

<script src="elastic-apm-rum.umd.min.js" crossorigin></script>
<script>
  elasticApm.init({
    serviceName: 'test-app1',
    serverUrl: 'http://192.168.0.183:30010',
  })
</script>

<body>
    This is test-app1
</body>

Note:
1. Replace serverUrl
2. Download elastic-apm-rum.umd.min.js from github

3. Kibana dashboard for APM

We can also monitor other languages apps performance

Simple cicd pipeline in Gitlab with runner

1.Install gitlab runner on centos7

wget https://gitlab-runner-downloads.s3.amazonaws.com/latest/rpm/gitlab-runner_amd64.rpm

rpm -ivh gitlab-runner_amd64.rpm

systemctl status gitlab-runner

More : https://docs.gitlab.com/runner/install/

2.Get Gitlab URL and token for runner

https://gitlab.com/<username>/<project_name> > setting > CI / CD > Runners

Note: This token has been revoked. you will have different token

3.Register Runner with gitlab-runner register command as below

4.Create .gitlab-ci.yml in your gitproject root directory

stage1:
  tags:
  - ci
  script:
    - echo stage 1

stage2:
  tags:
  - ci
  script:
    - echo stage 2

tags: it’s should be same as we used in runner registration

prometheus blackbox exporter in Kubernetes

prometheus-blackbox.yml

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: prometheus-blackbox-exporter
  labels:
    app: prometheus-blackbox-exporter
spec:
  replicas: 1
  selector:
    matchLabels:
      app: prometheus-blackbox-exporter
  template:
    metadata:
      labels:
        app: prometheus-blackbox-exporter
    spec:
      restartPolicy: Always
      containers:
        - name: blackbox-exporter
          image: "prom/blackbox-exporter:v0.15.1"
          imagePullPolicy: IfNotPresent
          args:
            - "--config.file=/config/blackbox.yaml"
          ports:
            - containerPort: 9115
          volumeMounts:
            - mountPath: /config
              name: prometheus-config
      volumes:
        - name: prometheus-config
          configMap:
            name: prometheus-blackbox-exporter

---
kind: Service
apiVersion: v1
metadata:
  name: prometheus-blackbox-exporter
  labels:
    app: prometheus-blackbox-exporter
spec:
  type: ClusterIP
  ports:
    - name: http
      port: 9115
      protocol: TCP
  selector:
    app: prometheus-blackbox-exporter

---

apiVersion: v1
kind: ConfigMap
metadata:
  name: prometheus-blackbox-exporter
  labels:
    app: prometheus-blackbox-exporter
data:
  blackbox.yaml: |
    modules:
      http_2xx:
        http:
          no_follow_redirects: false
          preferred_ip_protocol: ip4
          valid_http_versions:
          - HTTP/1.1
          - HTTP/2
          valid_status_codes: []
        prober: http
        timeout: 5s

2. in prometheus update prometheus.yml file as below

3. Prometheus query

probe_http_status_code{job="web1"}

Rancher proxy rule in httpd with websocket secure (wss)

<VirtualHost *:80>
	ServerName rancher.initedit.com
	Redirect permanent / https://rancher.initedit.com/
	RewriteEngine on
	RewriteCond %{SERVER_NAME} =rancher.initedit.com [OR]
	RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [END,QSA,R=permanent]
</VirtualHost>

<VirtualHost *:443>
    ServerName rancher.initedit.com
    AllowEncodedSlashes on
    SSLEngine On
    SSLProxyEngine On
    RewriteEngine on
    SSLProxyVerify none
    SSLProxyCheckPeerCN off
    SSLProxyCheckPeerName off
    SSLProxyCheckPeerExpire off
    RequestHeader set X-Forwarded-Proto "https"
    RewriteCond %{HTTP:Upgrade} =websocket [NC]
    RewriteRule /(.*)   wss://192.168.0.183:8443/$1 [P,L]
    RewriteCond %{HTTP:Upgrade} !=websocket [NC]
    RewriteRule /(.*)   https://192.168.0.183:8443/$1 [P,L]
    ProxyPassReverse / https://192.168.0.183:8443/
    ProxyPreserveHost On
    SSLCertificateFile /etc/letsencrypt/live/rancher.initedit.com/cert.pem
    SSLCertificateKeyFile /etc/letsencrypt/live/rancher.initedit.com/privkey.pem
    SSLCertificateChainFile /etc/letsencrypt/live/rancher.initedit.com/fullchain.pem
    Include /etc/letsencrypt/options-ssl-apache.conf
</VirtualHost>

More info : https://stackoverflow.com/questions/27526281/websockets-and-apache-proxy-how-to-configure-mod-proxy-wstunnel

Docker secure private registry with https behind Apache proxy using letsencrypt

1.docker-ui.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: docker-registry-deployment
  labels:
    app: registry
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: docker-registry
      env: prod
  template:
    metadata:
      labels:
        app: docker-registry
        env: prod
    spec:
      containers:
      - name: docker-registry-container
        image: registry:2
        imagePullPolicy: IfNotPresent
        env:
          - name: REGISTRY_STORAGE_DELETE_ENABLED
            value: "true"
        resources:
          requests:
            memory: "256Mi"
            cpu: "200m"
          limits:
            memory: "512Mi"
            cpu: "200m"
        volumeMounts:
          - name: registry-data
            mountPath: /var/lib/registry
          - name: config-yml
            mountPath: /etc/docker/registry/config.yml
            subPath: config.yml   
        ports:
        - containerPort: 5000
      volumes:
        - name: registry-data
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/docker_registry"
        - name: config-yml
          configMap:
           name: docker-registry-conf     
              

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: docker-registry-conf
data:
  config.yml: |+
    version: 0.1
    log:
      fields:
        service: registry
    storage:
      filesystem:
        rootdirectory: /var/lib/registry
    http:
      addr: 0.0.0.0:5000
      secret: asecretforlocaldevelopment
      headers:
        X-Content-Type-Options: [nosniff]
    health:
      storagedriver:
        enabled: true
        interval: 10s
        threshold: 3

---
kind: Service
apiVersion: v1
metadata:
  name: docker-registry-service
  labels:
    app: docker-registry
    env: prod
spec:
  selector:
    app: docker-registry
    env: prod
  ports:
  - name: docker-registry
    protocol: TCP
    port: 5000
    targetPort: 5000

#Docker ui
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: docker-ui-deployment
  labels:
    app: dockerui
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: dockerui
      env: prod
  template:
    metadata:
      labels:
        app: dockerui
        env: prod
    spec:
      containers:
      - name: dockerui-container
        image: joxit/docker-registry-ui:static
        imagePullPolicy: IfNotPresent
        env:
          - name: REGISTRY_URL
            value: "http://docker-registry-service:5000"
          - name: DELETE_IMAGES
            value: "true"
          - name: REGISTRY_TITLE
            value: "Docker-UI"
        resources:
          requests:
            memory: "512Mi"
            cpu: "200m"
          limits:
            memory: "512Mi"
            cpu: "200m"
        ports:
        - containerPort: 80 

---
kind: Service
apiVersion: v1
metadata:
  name: dockerui-service
  labels:
    app: dockerui
    env: prod
spec:
  selector:
    app: dockerui
    env: prod
  ports:
  - name: dockerui
    protocol: TCP
    port: 80
    targetPort: 80
    nodePort: 30005
  type: NodePort

2.Apache proxy rule

htpasswd -c /etc/httpd/admin-htpasswd admin

<VirtualHost *:80>
ServerName docker.initedit.com
RewriteEngine on
RewriteCond %{SERVER_NAME} =docker.initedit.com
RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [END,QSA,R=permanent]
</VirtualHost>

<VirtualHost *:443>
<Location />
    AuthName authorization
    AuthType Basic
    require valid-user
    AuthUserFile '/etc/httpd/admin-htpasswd'
</Location>
    ServerName docker.initedit.com
    AllowEncodedSlashes on
    RewriteEngine on
    SSLEngine On
    SSLProxyEngine On
    ProxyPreserveHost On
    RequestHeader set X-Forwarded-Proto "https"
    ProxyPass /  http://192.168.0.183:30005/
    ProxyPassReverse / http://192.168.0.183:30005/
    SSLCertificateFile /etc/letsencrypt/live/docker.initedit.com/cert.pem
    SSLCertificateKeyFile /etc/letsencrypt/live/docker.initedit.com/privkey.pem
    SSLCertificateChainFile /etc/letsencrypt/live/docker.initedit.com/fullchain.pem
    Include /etc/letsencrypt/options-ssl-apache.conf
</VirtualHost>

Note: Add htpasswd for basic authentication

docker tag docker.io/busybox docker.initedit.com/busybox1
docker push docker.initedit.com/busybox1

3. You can delete from UI and also using docker_reg_tool. After deleting you need to run below command inside registry container to remove it completely

docker exec -it name_of_registory_container bin/registry garbage-collect /etc/docker/registry/config.yml

haproxy with basic authentication and ssl

1.Create ssl certificate

openssl req \
    -new \
    -newkey rsa:4096 \
    -days 365 \
    -nodes \
    -x509 \
    -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=example.com" \
    -keyout example.com.key \
    -out example.com.crt

2. Create pem from above key and cert

cat example.com.crt example.com.key > example.com.pem

2.update haproxy.cfg

global
daemon
maxconn 256

defaults
    mode http
    timeout connect 5000ms
    timeout client 50000ms
    timeout server 50000ms
    
userlist http_basic_users
    group http_basic_users
    user admin insecure-password Your_Password groups http_basic_users
    
frontend http-in
    bind *:80
    acl example_acl hdr(host) -i example.initedit.com
    use_backend example_back if example_acl

backend example_back
    acl draw-auth http_auth(http_basic_users)
    http-request auth realm draw unless draw-auth
    server server1 192.168.0.150:8080

frontend https-in
    bind *:8889 ssl crt /usr/local/etc/haproxy/ssl/example.com.pem

    http-request redirect scheme https unless { ssl_fc }
    http-request set-header X-Forwarded-Proto https if { ssl_fc }
    http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
    
    acl example_acl hdr(host) -i example.com
    use_backend example_back if example_acl

backend example_back
    server server1 192.168.0.97:8443 check ssl verify none

More :
https://gist.github.com/Iristyle/5005653
https://serverfault.com/questions/239749/possible-to-add-basic-http-access-authentication-via-haproxy

Scale to Zero with Openfaas serverless deployment

Create openfass serverless deployment

1.update faas-idler deployment in k8 from dryRun=true to dryRun=false

2.While deploying the openfass faas-idler add “com.openfaas.scale.zero=true”

sudo faas-cli deploy -f python-fn.yml  --label "com.openfaas.scale.zero=true"

https://docs.openfaas.com/architecture/autoscaling/

run jenkins with docker-compose

jenkins.yml

version: '3'
services:
  jenkins:
    image: jenkins/jenkins
    user: root:root
    restart: always
    container_name: jenkins
    environment:
      TZ: "Asia/Kolkata"
    volumes:
      - /opt/docker/jenkins:/var/jenkins_home
    ports:
      - 8080:8080

start : docker-compose -f jenkins.yml up -d
stop : docker-compose -f jenkins.yml down

user mapping : https://dev.to/acro5piano/specifying-user-and-group-in-docker-i2e

setup glusterfs on centos7 and use in kubernetes

glusterfs1 – 10.10.10.1
glusterfs2 – 10.10.10.2

add below entry into /etc/hosts
10.10.10.2 glusterfs1
10.10.10.1 glusterfs2

– Add 10GB of disk to both server(for eg. /dev/sdb)

– On both server

yum install centos-release-gluster -y 

mkdir -p /bricks/brick1
mkfs.xfs  /dev/sdb

echo "/dev/sdb /bricks/brick1 xfs defaults 1 2" >> /etc/fstab
mount -a 

yum install glusterfs-server -y
systemctl enable glusterd
systemctl start glusterd
systemctl status glusterd

-On glusterfs1

gluster peer probe glusterfs2

-On glusterfs2

gluster peer probe glusterfs1

-On any of one server

gluster volume create gv0 replica 2 glusterfs1:/bricks/brick1/gv0 glusterfs2:/bricks/brick1/gv0

gluster volume start gv0

gluster volume info

-Verify glusterfs mount

mkdir /mnt/gv0 
mount -t glusterfs glusterfs1:/gv0 /mnt/gv0 

Use glusterfs in Kubernetes-deployment

-install glusterfs client on all kubernetes node

yum install centos-release-gluster -y 
yum install glusterfs -y

glusterfs-nginx-deployment.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: gluster-nginx
spec:
  selector:
    matchLabels:
      run: gluster-nginx
  replicas: 1
  template:
    metadata:
      labels:
        run: gluster-nginx
    spec:
      containers:
      - name: gluster-nginx
        image: nginx
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: "/mnt/glusterfs"
          name: glusterfsvol
      volumes:
      - name: glusterfsvol
        glusterfs:
          endpoints: glusterfs-cluster
          path: 10.10.10.1:/gv0