run postgres with docker-compose

postgres.yml

version: '3'
services:
  postgres:
    image: postgres
    restart: always
    container_name: postgres
    environment:
      POSTGRES_PASSWORD: "password"
    volumes:
      - /opt/docker/postgres:/var/lib/postgresql/data
    ports:
      - 5432:5432

start : docker-compose -f postgres.yml up -d
stop : docker-compose -f postgres.yml down

setup glusterfs on centos7 and use in kubernetes

glusterfs1 – 10.10.10.1
glusterfs2 – 10.10.10.2

add below entry into /etc/hosts
10.10.10.2 glusterfs1
10.10.10.1 glusterfs2

– Add 10GB of disk to both server(for eg. /dev/sdb)

– On both server

yum install centos-release-gluster -y 

mkdir -p /bricks/brick1
mkfs.xfs  /dev/sdb

echo "/dev/sdb /bricks/brick1 xfs defaults 1 2" >> /etc/fstab
mount -a 

yum install glusterfs-server -y
systemctl enable glusterd
systemctl start glusterd
systemctl status glusterd

-On glusterfs1

gluster peer probe glusterfs2

-On glusterfs2

gluster peer probe glusterfs1

-On any of one server

gluster volume create gv0 replica 2 glusterfs1:/bricks/brick1/gv0 glusterfs2:/bricks/brick1/gv0

gluster volume start gv0

gluster volume info

-Verify glusterfs mount

mkdir /mnt/gv0 
mount -t glusterfs glusterfs1:/gv0 /mnt/gv0 

Use glusterfs in Kubernetes-deployment

-install glusterfs client on all kubernetes node

yum install centos-release-gluster -y 
yum install glusterfs -y

glusterfs-nginx-deployment.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: gluster-nginx
spec:
  selector:
    matchLabels:
      run: gluster-nginx
  replicas: 1
  template:
    metadata:
      labels:
        run: gluster-nginx
    spec:
      containers:
      - name: gluster-nginx
        image: nginx
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: "/mnt/glusterfs"
          name: glusterfsvol
      volumes:
      - name: glusterfsvol
        glusterfs:
          endpoints: glusterfs-cluster
          path: 10.10.10.1:/gv0

Running sonarqube with postgress on kubernetes

Note:
– for sonarqube 8 set sysctl -w vm.max_map_count=262144 on host machine
– Move all extensions jars from container to your extensions dir

1.postgress.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: postgres-deployment
  labels:
    app: postgres
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: postgres
      env: prod
  template:
    metadata:
      labels:
        app: postgres
        env: prod
    spec:
      containers:
      - name: postgres-container
        image: postgres
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "256Mi"
            cpu: "100m"
          limits:
            memory: "512Mi"
            cpu: "200m"
        env:
          - name: POSTGRES_PASSWORD
            value: "PASSWORD"  
        volumeMounts:
          - name: postgres-data
            mountPath: /var/lib/postgresql/data
        ports:
        - containerPort: 5432    
      volumes:
        - name: postgres-data
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/postgres/data"
---
kind: Service
apiVersion: v1
metadata:
  name: postgres-service
  labels:
    app: postgres
    env: prod
spec:
  selector:
    app: postgres
    env: prod
  ports:
  - name: postgres
    protocol: TCP
    port: 5432
    targetPort: 5432
    nodePort: 30432
  type: NodePort

2.sonarqube.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: sonarqube-deployment
  labels:
    app: sonarqube
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: sonarqube
      env: prod
  template:
    metadata:
      labels:
        app: sonarqube
        env: prod
    spec:
      containers:
      - name: sonarqube-container
        image: sonarqube:7.7-community
        imagePullPolicy: IfNotPresent
        env:
          - name: SONARQUBE_JDBC_USERNAME
            value: postgres
          - name: SONARQUBE_JDBC_PASSWORD
            value: "PASSWORD"
          - name: SONARQUBE_JDBC_URL
            value: jdbc:postgresql://postgres-service:5432/sonar
        resources:
          requests:
            memory: "1024Mi"
            cpu: "500m"
          limits:
            memory: "2048Mi"
            cpu: "1000m"
        volumeMounts:
          - name: sonarqube-conf
            mountPath: /opt/sonarqube/conf
          - name: sonarqube-data
            mountPath: /opt/sonarqube/data
          - name: sonarqube-logs
            mountPath: /opt/sonarqube/logs
          - name: sonarqube-extensions
            mountPath: /opt/sonarqube/extensions      
        ports:
        - containerPort: 9000    
      volumes:
        - name: sonarqube-conf
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/sonarqube/conf"
        - name: sonarqube-data
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/sonarqube/data"
        - name: sonarqube-logs
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/sonarqube/logs"
        - name: sonarqube-extensions
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/sonarqube/extensions"
---
kind: Service
apiVersion: v1
metadata:
  name: sonarqube-service
  labels:
    app: sonarqube
    env: prod
spec:
  selector:
    app: sonarqube
    env: prod
  ports:
  - name: sonarqube
    protocol: TCP
    port: 9000
    targetPort: 9000
    nodePort: 30900
  type: NodePort

Note: Create sonar database in postgres

https://stackoverflow.com/questions/16825331/disallow-anonymous-users-to-access-sonar

Run ELK on Kubernetes

1.elasticsearch.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: elasticsearch-deployment
  labels:
    app: elasticsearch
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: elasticsearch
  template:
    metadata:
      name: elasticsearch-deployment
      labels:
        app: elasticsearch
    spec:
      containers:
      - name: elasticsearch
        image: elasticsearch:7.8.0
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "1024Mi"
            cpu: "200m"
          limits:
            memory: "2048Mi"
            cpu: "500m"
        env:
          - name: discovery.type
            value: single-node
        volumeMounts:
          - name: elasticsearch-nfs
            mountPath: /usr/share/elasticsearch/data
        ports:
        - name: tcp-port
          containerPort: 9200
      volumes:
        - name: elasticsearch-nfs
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/elasticsearch"

---
apiVersion: v1
kind: Service
metadata:
  name: elasticsearch-service
  labels:
    app: elasticsearch
    env: prod
spec:
  selector:
    app: elasticsearch
  type: NodePort
  ports:
  - name: elasticsearch
    port: 9200
    targetPort: 9200
    nodePort: 30061

2.kibana.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: kibana-deployment
  labels:
    app: kibana
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kibana
  template:
    metadata:
      name: kibana-deployment
      labels:
        app: kibana
    spec:
      containers:
      - name: kibana
        image: kibana:7.8.0
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "512Mi"
            cpu: "200m"
          limits:
            memory: "1024Mi"
            cpu: "400m"
        env:
          - name: ELASTICSEARCH_HOSTS
            value: http://elasticsearch-service:9200
        volumeMounts:
          - name: kibana-nfs
            mountPath: /usr/share/kibana/data
        ports:
        - name: tcp-port
          containerPort: 5601
      volumes:
        - name: kibana-nfs
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/kibana"

---
apiVersion: v1
kind: Service
metadata:
  name: kibana-service
  labels:
    app: kibana
    env: prod
spec:
  selector:
    app: kibana
  type: NodePort
  ports:
  - name: kibana
    port: 5601
    targetPort: 5601
    nodePort: 30063

3.logstash.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: logstash-deployment
  labels:
    app: logstash
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: logstash
  template:
    metadata:
      name: logstash-deployment
      labels:
        app: logstash
    spec:
      containers:
      - name: logstash
        image: logstash:7.8.0
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "512Mi"
            cpu: "200m"
          limits:
            memory: "1024Mi"
            cpu: "200m"
        env:
          - name: xpack.monitoring.elasticsearch.hosts
            value: http://elasticsearch-service:9200
        volumeMounts:
          - name: logstash-nfs
            mountPath: /usr/share/logstash/pipeline
        ports:
        - name: tcp-port
          containerPort: 5044
      nodeSelector:
        node: lp-knode-02
      volumes:
        - name: logstash-nfs
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/logstash/pipeline"

---
apiVersion: v1
kind: Service
metadata:
  name: logstash-service
  labels:
    app: logstash
    env: prod
spec:
  selector:
    app: logstash
  type: NodePort
  ports:
  - name: logstash
    port: 5044
    targetPort: 5044
    nodePort: 30062
  • logstash pipeline config file for input and output and filter

02-beats-input.conf

input {
  beats {
    port => 5044
  }
}

30-elasticsearch-output.conf

output {
  elasticsearch {
    hosts => ["http://elasticsearch-service:9200"]
    manage_template => false
    index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
  }
}

10-syslog-filter.conf

filter {
  if [fileset][module] == "system" {
    if [fileset][name] == "auth" {
      grok {
        match => { "message" => ["%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: %{DATA:[system][auth][ssh][event]} %{DATA:[system][auth][ssh][method]} for (invalid user )?%{DATA:[system][auth][user]} from %{IPORHOST:[system][auth][ssh][ip]} port %{NUMBER:[system][auth][ssh][port]} ssh2(: %{GREEDYDATA:[system][auth][ssh][signature]})?",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: %{DATA:[system][auth][ssh][event]} user %{DATA:[system][auth][user]} from %{IPORHOST:[system][auth][ssh][ip]}",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: Did not receive identification string from %{IPORHOST:[system][auth][ssh][dropped_ip]}",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sudo(?:\[%{POSINT:[system][auth][pid]}\])?: \s*%{DATA:[system][auth][user]} :( %{DATA:[system][auth][sudo][error]} ;)? TTY=%{DATA:[system][auth][sudo][tty]} ; PWD=%{DATA:[system][auth][sudo][pwd]} ; USER=%{DATA:[system][auth][sudo][user]} ; COMMAND=%{GREEDYDATA:[system][auth][sudo][command]}",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} groupadd(?:\[%{POSINT:[system][auth][pid]}\])?: new group: name=%{DATA:system.auth.groupadd.name}, GID=%{NUMBER:system.auth.groupadd.gid}",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} useradd(?:\[%{POSINT:[system][auth][pid]}\])?: new user: name=%{DATA:[system][auth][user][add][name]}, UID=%{NUMBER:[system][auth][user][add][uid]}, GID=%{NUMBER:[system][auth][user][add][gid]}, home=%{DATA:[system][auth][user][add][home]}, shell=%{DATA:[system][auth][user][add][shell]}$",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} %{DATA:[system][auth][program]}(?:\[%{POSINT:[system][auth][pid]}\])?: %{GREEDYMULTILINE:[system][auth][message]}"] }
        pattern_definitions => {
          "GREEDYMULTILINE"=> "(.|\n)*"
        }
        remove_field => "message"
      }
      date {
        match => [ "[system][auth][timestamp]", "MMM  d HH:mm:ss", "MMM dd HH:mm:ss" ]
      }
      geoip {
        source => "[system][auth][ssh][ip]"
        target => "[system][auth][ssh][geoip]"
      }
    }
    else if [fileset][name] == "syslog" {
      grok {
        match => { "message" => ["%{SYSLOGTIMESTAMP:[system][syslog][timestamp]} %{SYSLOGHOST:[system][syslog][hostname]} %{DATA:[system][syslog][program]}(?:\[%{POSINT:[system][syslog][pid]}\])?: %{GREEDYMULTILINE:[system][syslog][message]}"] }
        pattern_definitions => { "GREEDYMULTILINE" => "(.|\n)*" }
        remove_field => "message"
      }
      date {
        match => [ "[system][syslog][timestamp]", "MMM  d HH:mm:ss", "MMM dd HH:mm:ss" ]
      }
    }
  }
}

Kubernetes Taints and Tolerations example

1.Add taint label to node

kubectl taint nodes lp-knode-01 key=value:NoSchedule

kubectl taint nodes lp-knode-01 key=arm:NoSchedule

kubectl taint nodes lp-knode-01 node=arm64:NoSchedule

kubectl taint nodes lp-arm-{1,2,3,4}.home node=arm64:NoSchedule


kubectl get nodes -o json | jq '.items[].spec.taints'

2.Add toleration in deployment

apiVersion: apps/v1
kind: Deployment
metadata:
  name: sonarqube-deployment
  labels:
    app: sonarqube
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: sonarqube
      env: prod
  template:
    metadata:
      labels:
        app: sonarqube
        env: prod
    spec:
      nodeSelector:
        node: "lp-knode-01"
      containers:
      - name: sonarqube-container
        image: sonarqube
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "1024Mi"
            cpu: "100m"
          limits:
            memory: "2048Mi"
            cpu: "2000m"
        volumeMounts:
          - name: sonarqube-data
            mountPath: /opt/sonarqube/data
        ports:
        - containerPort: 9000    
      tolerations:
      - key: "key"
        operator: "Equal"
        value: "arm"
        effect: "NoSchedule"
      volumes:
        - name: sonarqube-data
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/sonarqube/data"
---
kind: Service
apiVersion: v1
metadata:
  name: sonarqube-service
  labels:
    app: sonarqube
    env: prod
spec:
  selector:
    app: sonarqube
    env: prod
  ports:
  - name: sonarqube
    protocol: TCP
    port: 9000
    targetPort: 9000
    nodePort: 30900
  type: NodePort

      tolerations:
      - key: "key"
        operator: "Exists"
        effect: "NoSchedule"

nohup alternative for windows in Jenkins

1.Run windows batch command in background using start

pipeline
{
    agent {
        label 'master'
    }
    stages{
		stage ('windows-nohup')
		{
			steps
			{
				
				bat """set JENKINS_NODE_COOKIE=dontKillMe && start /min COMMAND_TO_RUN """
				
			}
		}
    }
}

2.Run windows batch command in background with logs in jenkins workspace with batch and powershell

pipeline
{

    agent {
        label 'master'
    }

    stages{
        
    stage ('windows-nohup')
    {

        steps
        {
            //copy the command to test.bat file
            bat """echo COMMAND_TO_RUN REPLACE COMMAND.log > test.bat"""
			//replace the REPLACE with append symbol(>)
            powershell 'Get-Content test.bat | ForEach-Object { $_ -replace "REPLACE", " > " } | Set-Content test2.bat'
			//run the test2.bat in background
            bat 'set JENKINS_NODE_COOKIE=dontKillMe && start /min test2.bat ^& exit'
			//logs will be available at workspace COMMAND.log
			
			
			//Example
			/*
			bat """echo C:\\java8\\bin\\java -jar core.jar --config=test.json REPLACE java.log > test.bat"""
            powershell 'Get-Content test.bat | ForEach-Object { $_ -replace "REPLACE", " > " } | Set-Content test2.bat'
            bat 'more test2.bat'
            bat 'set JENKINS_NODE_COOKIE=dontKillMe && start /min test2.bat ^& exit'
			*/
        }
    }
    }
}