Running sonarqube with postgress on kubernetes

Note:
– for sonarqube 8 set sysctl -w vm.max_map_count=262144 on host machine
– Move all extensions jars from container to your extensions dir

1.postgress.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: postgres-deployment
  labels:
    app: postgres
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: postgres
      env: prod
  template:
    metadata:
      labels:
        app: postgres
        env: prod
    spec:
      containers:
      - name: postgres-container
        image: postgres
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "256Mi"
            cpu: "100m"
          limits:
            memory: "512Mi"
            cpu: "200m"
        env:
          - name: POSTGRES_PASSWORD
            value: "PASSWORD"  
        volumeMounts:
          - name: postgres-data
            mountPath: /var/lib/postgresql/data
        ports:
        - containerPort: 5432    
      volumes:
        - name: postgres-data
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/postgres/data"
---
kind: Service
apiVersion: v1
metadata:
  name: postgres-service
  labels:
    app: postgres
    env: prod
spec:
  selector:
    app: postgres
    env: prod
  ports:
  - name: postgres
    protocol: TCP
    port: 5432
    targetPort: 5432
    nodePort: 30432
  type: NodePort

2.sonarqube.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: sonarqube-deployment
  labels:
    app: sonarqube
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: sonarqube
      env: prod
  template:
    metadata:
      labels:
        app: sonarqube
        env: prod
    spec:
      containers:
      - name: sonarqube-container
        image: sonarqube:7.7-community
        imagePullPolicy: IfNotPresent
        env:
          - name: SONARQUBE_JDBC_USERNAME
            value: postgres
          - name: SONARQUBE_JDBC_PASSWORD
            value: "PASSWORD"
          - name: SONARQUBE_JDBC_URL
            value: jdbc:postgresql://postgres-service:5432/sonar
        resources:
          requests:
            memory: "1024Mi"
            cpu: "500m"
          limits:
            memory: "2048Mi"
            cpu: "1000m"
        volumeMounts:
          - name: sonarqube-conf
            mountPath: /opt/sonarqube/conf
          - name: sonarqube-data
            mountPath: /opt/sonarqube/data
          - name: sonarqube-logs
            mountPath: /opt/sonarqube/logs
          - name: sonarqube-extensions
            mountPath: /opt/sonarqube/extensions      
        ports:
        - containerPort: 9000    
      volumes:
        - name: sonarqube-conf
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/sonarqube/conf"
        - name: sonarqube-data
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/sonarqube/data"
        - name: sonarqube-logs
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/sonarqube/logs"
        - name: sonarqube-extensions
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/sonarqube/extensions"
---
kind: Service
apiVersion: v1
metadata:
  name: sonarqube-service
  labels:
    app: sonarqube
    env: prod
spec:
  selector:
    app: sonarqube
    env: prod
  ports:
  - name: sonarqube
    protocol: TCP
    port: 9000
    targetPort: 9000
    nodePort: 30900
  type: NodePort

Note: Create sonar database in postgres

https://stackoverflow.com/questions/16825331/disallow-anonymous-users-to-access-sonar

Run ELK on Kubernetes

1.elasticsearch.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: elasticsearch-deployment
  labels:
    app: elasticsearch
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: elasticsearch
  template:
    metadata:
      name: elasticsearch-deployment
      labels:
        app: elasticsearch
    spec:
      containers:
      - name: elasticsearch
        image: elasticsearch:7.8.0
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "1024Mi"
            cpu: "200m"
          limits:
            memory: "2048Mi"
            cpu: "500m"
        env:
          - name: discovery.type
            value: single-node
        volumeMounts:
          - name: elasticsearch-nfs
            mountPath: /usr/share/elasticsearch/data
        ports:
        - name: tcp-port
          containerPort: 9200
      volumes:
        - name: elasticsearch-nfs
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/elasticsearch"

---
apiVersion: v1
kind: Service
metadata:
  name: elasticsearch-service
  labels:
    app: elasticsearch
    env: prod
spec:
  selector:
    app: elasticsearch
  type: NodePort
  ports:
  - name: elasticsearch
    port: 9200
    targetPort: 9200
    nodePort: 30061

2.kibana.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: kibana-deployment
  labels:
    app: kibana
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kibana
  template:
    metadata:
      name: kibana-deployment
      labels:
        app: kibana
    spec:
      containers:
      - name: kibana
        image: kibana:7.8.0
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "512Mi"
            cpu: "200m"
          limits:
            memory: "1024Mi"
            cpu: "400m"
        env:
          - name: ELASTICSEARCH_HOSTS
            value: http://elasticsearch-service:9200
        volumeMounts:
          - name: kibana-nfs
            mountPath: /usr/share/kibana/data
        ports:
        - name: tcp-port
          containerPort: 5601
      volumes:
        - name: kibana-nfs
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/kibana"

---
apiVersion: v1
kind: Service
metadata:
  name: kibana-service
  labels:
    app: kibana
    env: prod
spec:
  selector:
    app: kibana
  type: NodePort
  ports:
  - name: kibana
    port: 5601
    targetPort: 5601
    nodePort: 30063

3.logstash.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: logstash-deployment
  labels:
    app: logstash
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: logstash
  template:
    metadata:
      name: logstash-deployment
      labels:
        app: logstash
    spec:
      containers:
      - name: logstash
        image: logstash:7.8.0
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "512Mi"
            cpu: "200m"
          limits:
            memory: "1024Mi"
            cpu: "200m"
        env:
          - name: xpack.monitoring.elasticsearch.hosts
            value: http://elasticsearch-service:9200
        volumeMounts:
          - name: logstash-nfs
            mountPath: /usr/share/logstash/pipeline
        ports:
        - name: tcp-port
          containerPort: 5044
      nodeSelector:
        node: lp-knode-02
      volumes:
        - name: logstash-nfs
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/logstash/pipeline"

---
apiVersion: v1
kind: Service
metadata:
  name: logstash-service
  labels:
    app: logstash
    env: prod
spec:
  selector:
    app: logstash
  type: NodePort
  ports:
  - name: logstash
    port: 5044
    targetPort: 5044
    nodePort: 30062
  • logstash pipeline config file for input and output and filter

02-beats-input.conf

input {
  beats {
    port => 5044
  }
}

30-elasticsearch-output.conf

output {
  elasticsearch {
    hosts => ["http://elasticsearch-service:9200"]
    manage_template => false
    index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
  }
}

10-syslog-filter.conf

filter {
  if [fileset][module] == "system" {
    if [fileset][name] == "auth" {
      grok {
        match => { "message" => ["%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: %{DATA:[system][auth][ssh][event]} %{DATA:[system][auth][ssh][method]} for (invalid user )?%{DATA:[system][auth][user]} from %{IPORHOST:[system][auth][ssh][ip]} port %{NUMBER:[system][auth][ssh][port]} ssh2(: %{GREEDYDATA:[system][auth][ssh][signature]})?",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: %{DATA:[system][auth][ssh][event]} user %{DATA:[system][auth][user]} from %{IPORHOST:[system][auth][ssh][ip]}",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: Did not receive identification string from %{IPORHOST:[system][auth][ssh][dropped_ip]}",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sudo(?:\[%{POSINT:[system][auth][pid]}\])?: \s*%{DATA:[system][auth][user]} :( %{DATA:[system][auth][sudo][error]} ;)? TTY=%{DATA:[system][auth][sudo][tty]} ; PWD=%{DATA:[system][auth][sudo][pwd]} ; USER=%{DATA:[system][auth][sudo][user]} ; COMMAND=%{GREEDYDATA:[system][auth][sudo][command]}",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} groupadd(?:\[%{POSINT:[system][auth][pid]}\])?: new group: name=%{DATA:system.auth.groupadd.name}, GID=%{NUMBER:system.auth.groupadd.gid}",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} useradd(?:\[%{POSINT:[system][auth][pid]}\])?: new user: name=%{DATA:[system][auth][user][add][name]}, UID=%{NUMBER:[system][auth][user][add][uid]}, GID=%{NUMBER:[system][auth][user][add][gid]}, home=%{DATA:[system][auth][user][add][home]}, shell=%{DATA:[system][auth][user][add][shell]}$",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} %{DATA:[system][auth][program]}(?:\[%{POSINT:[system][auth][pid]}\])?: %{GREEDYMULTILINE:[system][auth][message]}"] }
        pattern_definitions => {
          "GREEDYMULTILINE"=> "(.|\n)*"
        }
        remove_field => "message"
      }
      date {
        match => [ "[system][auth][timestamp]", "MMM  d HH:mm:ss", "MMM dd HH:mm:ss" ]
      }
      geoip {
        source => "[system][auth][ssh][ip]"
        target => "[system][auth][ssh][geoip]"
      }
    }
    else if [fileset][name] == "syslog" {
      grok {
        match => { "message" => ["%{SYSLOGTIMESTAMP:[system][syslog][timestamp]} %{SYSLOGHOST:[system][syslog][hostname]} %{DATA:[system][syslog][program]}(?:\[%{POSINT:[system][syslog][pid]}\])?: %{GREEDYMULTILINE:[system][syslog][message]}"] }
        pattern_definitions => { "GREEDYMULTILINE" => "(.|\n)*" }
        remove_field => "message"
      }
      date {
        match => [ "[system][syslog][timestamp]", "MMM  d HH:mm:ss", "MMM dd HH:mm:ss" ]
      }
    }
  }
}

Kubernetes Taints and Tolerations example

1.Add taint label to node

kubectl taint nodes lp-knode-01 key=value:NoSchedule

kubectl taint nodes lp-knode-01 key=arm:NoSchedule

kubectl taint nodes lp-knode-01 node=arm64:NoSchedule

kubectl taint nodes lp-arm-{1,2,3,4}.home node=arm64:NoSchedule


kubectl get nodes -o json | jq '.items[].spec.taints'

2.Add toleration in deployment

apiVersion: apps/v1
kind: Deployment
metadata:
  name: sonarqube-deployment
  labels:
    app: sonarqube
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: sonarqube
      env: prod
  template:
    metadata:
      labels:
        app: sonarqube
        env: prod
    spec:
      nodeSelector:
        node: "lp-knode-01"
      containers:
      - name: sonarqube-container
        image: sonarqube
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "1024Mi"
            cpu: "100m"
          limits:
            memory: "2048Mi"
            cpu: "2000m"
        volumeMounts:
          - name: sonarqube-data
            mountPath: /opt/sonarqube/data
        ports:
        - containerPort: 9000    
      tolerations:
      - key: "key"
        operator: "Equal"
        value: "arm"
        effect: "NoSchedule"
      volumes:
        - name: sonarqube-data
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/sonarqube/data"
---
kind: Service
apiVersion: v1
metadata:
  name: sonarqube-service
  labels:
    app: sonarqube
    env: prod
spec:
  selector:
    app: sonarqube
    env: prod
  ports:
  - name: sonarqube
    protocol: TCP
    port: 9000
    targetPort: 9000
    nodePort: 30900
  type: NodePort

      tolerations:
      - key: "key"
        operator: "Exists"
        effect: "NoSchedule"

nohup alternative for windows in Jenkins

1.Run windows batch command in background using start

pipeline
{
    agent {
        label 'master'
    }
    stages{
		stage ('windows-nohup')
		{
			steps
			{
				
				bat """set JENKINS_NODE_COOKIE=dontKillMe && start /min COMMAND_TO_RUN """
				
			}
		}
    }
}

2.Run windows batch command in background with logs in jenkins workspace with batch and powershell

pipeline
{

    agent {
        label 'master'
    }

    stages{
        
    stage ('windows-nohup')
    {

        steps
        {
            //copy the command to test.bat file
            bat """echo COMMAND_TO_RUN REPLACE COMMAND.log > test.bat"""
			//replace the REPLACE with append symbol(>)
            powershell 'Get-Content test.bat | ForEach-Object { $_ -replace "REPLACE", " > " } | Set-Content test2.bat'
			//run the test2.bat in background
            bat 'set JENKINS_NODE_COOKIE=dontKillMe && start /min test2.bat ^& exit'
			//logs will be available at workspace COMMAND.log
			
			
			//Example
			/*
			bat """echo C:\\java8\\bin\\java -jar core.jar --config=test.json REPLACE java.log > test.bat"""
            powershell 'Get-Content test.bat | ForEach-Object { $_ -replace "REPLACE", " > " } | Set-Content test2.bat'
            bat 'more test2.bat'
            bat 'set JENKINS_NODE_COOKIE=dontKillMe && start /min test2.bat ^& exit'
			*/
        }
    }
    }
}

kubernetes Highly Available clusters Using 3 Master node

1. Setup TCP load balancer using nginx (192.168.0.50)

load_module /usr/lib64/nginx/modules/ngx_stream_module.so;
events { }

stream {

upstream kapi {
	server 192.168.0.51:6443;
	server 192.168.0.52:6443;
	server 192.168.0.53:6443;
}

server {
	listen 8888;
	proxy_pass kapi;
    }

}

2. Run below command on all master node

yum install -y kubelet kubeadm kubectl docker
systemctl enable kubelet
systemctl start kubelet
systemctl enable docker
systemctl start docker

3. Run below command on 192.168.0.51

kubeadm init --control-plane-endpoint "192.168.0.50:8888" --upload-certs

It will generate command to add other master node and worker node

4.Join other 2 master (192.168.0.51, 192.168.0.52)

 kubeadm join 192.168.0.50:8888 --token hvlnv8.6r90i8d04cs23sii \
    --discovery-token-ca-cert-hash sha256:bc6fe39f98c7ae6cd8434bd8ade4eb3b15b45e151af37595e4be0a9fdfcfdcc4 \
    --control-plane --certificate-key 3659353b0a256650fb0c1a0357cb608d07e3bdc8ce8b64fa995bcb814c131fa6

Note : Token will be differ

5.Get the info of cluster

kubectl cluster-info

kubectl get node

Skip stages in Jenkins

stages {

def skip_stage = 'skip'
pipeline
{
agent {
    label 'master'
}
parameters {
string(name: 'skip_stage', description: 'skip stage', defaultValue: 'skip')
}
  stages
{
  stage("test") {
     when {
        expression { skip_stage != "skip" }
     }
     steps {
        echo 'This will never run'
     }
  }
}
}

More when condition : https://gist.github.com/merikan/228cdb1893fca91f0663bab7b095757c

Setup elasticsearch cluster with 3 nodes

1. Up the /etc/hosts on all 3 nodes

192.168.0.50 elk1.local
192.168.0.51 elk2.local
192.168.0.52 elk3.local

Note : Minimum 2 nodes should be up to make cluster healthy.

2.Install elasticsearch on all 3 nodes

rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch

echo '[elasticsearch-7.x]
name=Elasticsearch repository for 7.x packages
baseurl=https://artifacts.elastic.co/packages/7.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md' > /etc/yum.repos.d/elasticsearch.repo

###install elasticsearch
yum -y install elasticsearch

###Enable elasticsearch
systemctl enable elasticsearch

3. Edit /etc/elasticsearch/elasticsearch.yml as per cluster name (eg. elk-cluster)

cluster.name: elk-cluster
node.name: elk1.local
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.0.50
discovery.seed_hosts: ["elk1.local", "elk2.local", "elk3.local"]
cluster.initial_master_nodes: ["elk1.local", "elk2.local", "elk3.local"]

change the only node.name and network.host for other 2 elasticsearch nodes

4. Restart elasticsearch service on all 3 elasticsearch node

systemctl restart elasticsearch

After restart 1 master node will be elected.

5. Check master node in elasticsearch cluster

curl -X GET "192.168.0.50:9200/_cat/master?v&pretty"

More : https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-master.html

More information about setting up cluster : https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery.html

Private docker registry server on kubernetes with nfs persistent volume

  1. kubectl apply -f registry-server.yml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: registry-deployment
  labels:
    app: registry
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: registry
      env: prod
  template:
    metadata:
      labels:
        app: registry
        env: prod
    spec:
      containers:
      - name: registry-container
        image: registry:2
        imagePullPolicy: IfNotPresent
        env:
          - name: REGISTRY_STORAGE_DELETE_ENABLED
            value: "true"
        resources:
          requests:
            memory: "256Mi"
            cpu: "200m"
          limits:
            memory: "512Mi"
            cpu: "200m"
        volumeMounts:
          - name: registry-data
            mountPath: /var/lib/registry
          - name: config-yml
            mountPath: /etc/docker/registry/config.yml
            subPath: config.yml   
        ports:
        - containerPort: 5000
      volumes:
        - name: registry-data
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/registry"
        - name: config-yml
          configMap:
           name: registry-conf     
              

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: registry-conf
data:
  config.yml: |+
    version: 0.1
    log:
      fields:
        service: registry
    storage:
      filesystem:
        rootdirectory: /var/lib/registry
    http:
      addr: :5000
      headers:
        X-Content-Type-Options: [nosniff]
    health:
      storagedriver:
        enabled: true
        interval: 10s
        threshold: 3

---
kind: Service
apiVersion: v1
metadata:
  name: registry-service
  labels:
    app: registry
    env: prod
spec:
  selector:
    app: registry
    env: prod
  ports:
  - name: registry
    protocol: TCP
    port: 5000
    targetPort: 5000
    nodePort: 30500
  type: NodePort

2.As this is running on http we need to add insecure registry inside /etc/docker/daemon.json on all running worker node and

{
 "insecure-registries" : [ "192.168.0.183:30500" ]
}

3. Restart the docker service

systemctl restart docker

4. Tag the image that with registry server ip and port. DNS name can be used if available.

docker tag debian:latest 192.168.0.183:30500/debianlocal:latest

5. Push the images to private registry server

docker push 192.168.0.183:30500/debianlocal:latest

6. Delete images form registry server we will use docker_reg_tool https://github.com/byrnedo/docker-reg-tool/blob/master/docker_reg_tool

Note:
– Delete blobdescriptor: inmemory part from /etc/docker/registry/config.yml which is already have done in this example
– REGISTRY_STORAGE_DELETE_ENABLED = “true” should be present in env

./docker_reg_tool http://192.168.0.183:30500 delete debianlocal latest

#This can be cronjob inside the container

docker exec -it name_of_registory_container  bin/registry garbage-collect /etc/docker/registry/config.yml

Prometheus pushgateway to monitor running proccess (docker ps)

1.Deploy pushgateway to kubernetes

pushgateway.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: pushgateway-deployment
  labels:
    app: pushgateway
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: pushgateway
      env: prod
  template:
    metadata:
      labels:
        app: pushgateway
        env: prod
    spec:
      containers:
      - name: pushgateway-container
        image: prom/pushgateway
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "128Mi"
            cpu: "200m"
          limits:
            memory: "256Mi"
            cpu: "200m"
        ports:
        - containerPort: 9091
---
kind: Service
apiVersion: v1
metadata:
  name: pushgateway-service
  labels:
    app: pushgateway
    env: prod
spec:
  selector:
    app: pushgateway
    env: prod
  ports:
  - name: pushgateway
    protocol: TCP
    port: 9091
    targetPort: 9091
    nodePort: 30191
  type: NodePort

2. Add pushgateway in /etc/prometheus/prometheus.yml

3. Push running docker status to pushgateway using below bash script and add it to crontab

job="docker_status"

running_docker=$(docker ps | wc -l)
docker_images=$(docker images | wc -l)

cat <<EOF | curl --data-binary @- http://192.168.0.183:30191/metrics/job/$job/instance/$(hostname)
# TYPE running_docker counter
running_docker $running_docker
docker_images $docker_images
EOF

4. Data visualization in prometheus and pushgateway server

Python code:

job_name='cpuload'
instance_name='web1'
payload_key='cpu'
payload_value='10'
#print("{k} {v} \n".format(k=payload_key, v=payload_value))
#print('http://192.168.0.183:30191/metrics/job/{j}/instance/{i}'.format(j=job_name, i=instance_name))
response = requests.post('http://192.168.0.183:30191/metrics/job/{j}/instance/{i}'.format(j=job_name, i=instance_name), data="{k} {v}\n".format(k=payload_key, v=payload_value))
#print(response.text)

pushgateway powershell command:

Invoke-WebRequest "http://192.168.0.183:30191/metrics/job/jenkins/instance/instace_name -Body "process 1`n" -Method Post
$process1 = (tasklist /v | Select-String -AllMatches 'Jenkins' | findstr 'java' | %{ $_.Split('')[0]; }) | Out-String
if($process1 -like "java.exe*"){
   write-host("This is if statement")
   Invoke-WebRequest "http://192.168.0.183:30191/metrics/job/jenkins/instance/instace_name" -Body "jenkins_process 1`n" -Method Post
}else {
   write-host("This is else statement")
   Invoke-WebRequest "http://192.168.0.183:30191/metrics/job/jenkins/instance/instace_name" -Body "jenkins_process 0`n" -Method Post
}

Jenkins build id and name with git commit message

  1. Install build-name-setter plugin https://plugins.jenkins.io/build-name-setter/
pipeline
{
    agent any
    stages
    {
        stage('Git-checkout')
        {
            steps
            {
                sh '''
                rm  -rf simple-storage-solution 2>&1
                git clone https://github.com/initedit-project/simple-storage-solution.git
                cd simple-storage-solution
                git log -1 --pretty=format:"%s" > ../gitcommitmsg.txt
                
            '''
            script
            {
                def gitcommitmsg = readFile(file: 'gitcommitmsg.txt')
                buildDescription gitcommitmsg
            }
            }
            
        }
        
    }
}

Output: