Docker secure private registry with https behind Apache proxy using letsencrypt

1.docker-ui.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: docker-registry-deployment
  labels:
    app: registry
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: docker-registry
      env: prod
  template:
    metadata:
      labels:
        app: docker-registry
        env: prod
    spec:
      containers:
      - name: docker-registry-container
        image: registry:2
        imagePullPolicy: IfNotPresent
        env:
          - name: REGISTRY_STORAGE_DELETE_ENABLED
            value: "true"
        resources:
          requests:
            memory: "256Mi"
            cpu: "200m"
          limits:
            memory: "512Mi"
            cpu: "200m"
        volumeMounts:
          - name: registry-data
            mountPath: /var/lib/registry
          - name: config-yml
            mountPath: /etc/docker/registry/config.yml
            subPath: config.yml   
        ports:
        - containerPort: 5000
      volumes:
        - name: registry-data
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/docker_registry"
        - name: config-yml
          configMap:
           name: docker-registry-conf     
              

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: docker-registry-conf
data:
  config.yml: |+
    version: 0.1
    log:
      fields:
        service: registry
    storage:
      filesystem:
        rootdirectory: /var/lib/registry
    http:
      addr: 0.0.0.0:5000
      secret: asecretforlocaldevelopment
      headers:
        X-Content-Type-Options: [nosniff]
    health:
      storagedriver:
        enabled: true
        interval: 10s
        threshold: 3

---
kind: Service
apiVersion: v1
metadata:
  name: docker-registry-service
  labels:
    app: docker-registry
    env: prod
spec:
  selector:
    app: docker-registry
    env: prod
  ports:
  - name: docker-registry
    protocol: TCP
    port: 5000
    targetPort: 5000

#Docker ui
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: docker-ui-deployment
  labels:
    app: dockerui
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: dockerui
      env: prod
  template:
    metadata:
      labels:
        app: dockerui
        env: prod
    spec:
      containers:
      - name: dockerui-container
        image: joxit/docker-registry-ui:static
        imagePullPolicy: IfNotPresent
        env:
          - name: REGISTRY_URL
            value: "http://docker-registry-service:5000"
          - name: DELETE_IMAGES
            value: "true"
          - name: REGISTRY_TITLE
            value: "Docker-UI"
        resources:
          requests:
            memory: "512Mi"
            cpu: "200m"
          limits:
            memory: "512Mi"
            cpu: "200m"
        ports:
        - containerPort: 80 

---
kind: Service
apiVersion: v1
metadata:
  name: dockerui-service
  labels:
    app: dockerui
    env: prod
spec:
  selector:
    app: dockerui
    env: prod
  ports:
  - name: dockerui
    protocol: TCP
    port: 80
    targetPort: 80
    nodePort: 30005
  type: NodePort

2.Apache proxy rule

htpasswd -c /etc/httpd/admin-htpasswd admin

<VirtualHost *:80>
ServerName docker.initedit.com
RewriteEngine on
RewriteCond %{SERVER_NAME} =docker.initedit.com
RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [END,QSA,R=permanent]
</VirtualHost>

<VirtualHost *:443>
<Location />
    AuthName authorization
    AuthType Basic
    require valid-user
    AuthUserFile '/etc/httpd/admin-htpasswd'
</Location>
    ServerName docker.initedit.com
    AllowEncodedSlashes on
    RewriteEngine on
    SSLEngine On
    SSLProxyEngine On
    ProxyPreserveHost On
    RequestHeader set X-Forwarded-Proto "https"
    ProxyPass /  http://192.168.0.183:30005/
    ProxyPassReverse / http://192.168.0.183:30005/
    SSLCertificateFile /etc/letsencrypt/live/docker.initedit.com/cert.pem
    SSLCertificateKeyFile /etc/letsencrypt/live/docker.initedit.com/privkey.pem
    SSLCertificateChainFile /etc/letsencrypt/live/docker.initedit.com/fullchain.pem
    Include /etc/letsencrypt/options-ssl-apache.conf
</VirtualHost>

Note: Add htpasswd for basic authentication

docker tag docker.io/busybox docker.initedit.com/busybox1
docker push docker.initedit.com/busybox1

3. You can delete from UI and also using docker_reg_tool. After deleting you need to run below command inside registry container to remove it completely

docker exec -it name_of_registory_container bin/registry garbage-collect /etc/docker/registry/config.yml

setup glusterfs on centos7 and use in kubernetes

glusterfs1 – 10.10.10.1
glusterfs2 – 10.10.10.2

add below entry into /etc/hosts
10.10.10.2 glusterfs1
10.10.10.1 glusterfs2

– Add 10GB of disk to both server(for eg. /dev/sdb)

– On both server

yum install centos-release-gluster -y 

mkdir -p /bricks/brick1
mkfs.xfs  /dev/sdb

echo "/dev/sdb /bricks/brick1 xfs defaults 1 2" >> /etc/fstab
mount -a 

yum install glusterfs-server -y
systemctl enable glusterd
systemctl start glusterd
systemctl status glusterd

-On glusterfs1

gluster peer probe glusterfs2

-On glusterfs2

gluster peer probe glusterfs1

-On any of one server

gluster volume create gv0 replica 2 glusterfs1:/bricks/brick1/gv0 glusterfs2:/bricks/brick1/gv0

gluster volume start gv0

gluster volume info

-Verify glusterfs mount

mkdir /mnt/gv0 
mount -t glusterfs glusterfs1:/gv0 /mnt/gv0 

Use glusterfs in Kubernetes-deployment

-install glusterfs client on all kubernetes node

yum install centos-release-gluster -y 
yum install glusterfs -y

glusterfs-nginx-deployment.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: gluster-nginx
spec:
  selector:
    matchLabels:
      run: gluster-nginx
  replicas: 1
  template:
    metadata:
      labels:
        run: gluster-nginx
    spec:
      containers:
      - name: gluster-nginx
        image: nginx
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: "/mnt/glusterfs"
          name: glusterfsvol
      volumes:
      - name: glusterfsvol
        glusterfs:
          endpoints: glusterfs-cluster
          path: 10.10.10.1:/gv0

Running sonarqube with postgress on kubernetes

Note:
– for sonarqube 8 set sysctl -w vm.max_map_count=262144 on host machine
– Move all extensions jars from container to your extensions dir

1.postgress.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: postgres-deployment
  labels:
    app: postgres
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: postgres
      env: prod
  template:
    metadata:
      labels:
        app: postgres
        env: prod
    spec:
      containers:
      - name: postgres-container
        image: postgres
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "256Mi"
            cpu: "100m"
          limits:
            memory: "512Mi"
            cpu: "200m"
        env:
          - name: POSTGRES_PASSWORD
            value: "PASSWORD"  
        volumeMounts:
          - name: postgres-data
            mountPath: /var/lib/postgresql/data
        ports:
        - containerPort: 5432    
      volumes:
        - name: postgres-data
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/postgres/data"
---
kind: Service
apiVersion: v1
metadata:
  name: postgres-service
  labels:
    app: postgres
    env: prod
spec:
  selector:
    app: postgres
    env: prod
  ports:
  - name: postgres
    protocol: TCP
    port: 5432
    targetPort: 5432
    nodePort: 30432
  type: NodePort

2.sonarqube.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: sonarqube-deployment
  labels:
    app: sonarqube
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: sonarqube
      env: prod
  template:
    metadata:
      labels:
        app: sonarqube
        env: prod
    spec:
      containers:
      - name: sonarqube-container
        image: sonarqube:7.7-community
        imagePullPolicy: IfNotPresent
        env:
          - name: SONARQUBE_JDBC_USERNAME
            value: postgres
          - name: SONARQUBE_JDBC_PASSWORD
            value: "PASSWORD"
          - name: SONARQUBE_JDBC_URL
            value: jdbc:postgresql://postgres-service:5432/sonar
        resources:
          requests:
            memory: "1024Mi"
            cpu: "500m"
          limits:
            memory: "2048Mi"
            cpu: "1000m"
        volumeMounts:
          - name: sonarqube-conf
            mountPath: /opt/sonarqube/conf
          - name: sonarqube-data
            mountPath: /opt/sonarqube/data
          - name: sonarqube-logs
            mountPath: /opt/sonarqube/logs
          - name: sonarqube-extensions
            mountPath: /opt/sonarqube/extensions      
        ports:
        - containerPort: 9000    
      volumes:
        - name: sonarqube-conf
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/sonarqube/conf"
        - name: sonarqube-data
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/sonarqube/data"
        - name: sonarqube-logs
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/sonarqube/logs"
        - name: sonarqube-extensions
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/sonarqube/extensions"
---
kind: Service
apiVersion: v1
metadata:
  name: sonarqube-service
  labels:
    app: sonarqube
    env: prod
spec:
  selector:
    app: sonarqube
    env: prod
  ports:
  - name: sonarqube
    protocol: TCP
    port: 9000
    targetPort: 9000
    nodePort: 30900
  type: NodePort

Note: Create sonar database in postgres

https://stackoverflow.com/questions/16825331/disallow-anonymous-users-to-access-sonar

Run ELK on Kubernetes

1.elasticsearch.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: elasticsearch-deployment
  labels:
    app: elasticsearch
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: elasticsearch
  template:
    metadata:
      name: elasticsearch-deployment
      labels:
        app: elasticsearch
    spec:
      containers:
      - name: elasticsearch
        image: elasticsearch:7.8.0
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "1024Mi"
            cpu: "200m"
          limits:
            memory: "2048Mi"
            cpu: "500m"
        env:
          - name: discovery.type
            value: single-node
        volumeMounts:
          - name: elasticsearch-nfs
            mountPath: /usr/share/elasticsearch/data
        ports:
        - name: tcp-port
          containerPort: 9200
      volumes:
        - name: elasticsearch-nfs
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/elasticsearch"

---
apiVersion: v1
kind: Service
metadata:
  name: elasticsearch-service
  labels:
    app: elasticsearch
    env: prod
spec:
  selector:
    app: elasticsearch
  type: NodePort
  ports:
  - name: elasticsearch
    port: 9200
    targetPort: 9200
    nodePort: 30061

2.kibana.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: kibana-deployment
  labels:
    app: kibana
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kibana
  template:
    metadata:
      name: kibana-deployment
      labels:
        app: kibana
    spec:
      containers:
      - name: kibana
        image: kibana:7.8.0
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "512Mi"
            cpu: "200m"
          limits:
            memory: "1024Mi"
            cpu: "400m"
        env:
          - name: ELASTICSEARCH_HOSTS
            value: http://elasticsearch-service:9200
        volumeMounts:
          - name: kibana-nfs
            mountPath: /usr/share/kibana/data
        ports:
        - name: tcp-port
          containerPort: 5601
      volumes:
        - name: kibana-nfs
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/kibana"

---
apiVersion: v1
kind: Service
metadata:
  name: kibana-service
  labels:
    app: kibana
    env: prod
spec:
  selector:
    app: kibana
  type: NodePort
  ports:
  - name: kibana
    port: 5601
    targetPort: 5601
    nodePort: 30063

3.logstash.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: logstash-deployment
  labels:
    app: logstash
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: logstash
  template:
    metadata:
      name: logstash-deployment
      labels:
        app: logstash
    spec:
      containers:
      - name: logstash
        image: logstash:7.8.0
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "512Mi"
            cpu: "200m"
          limits:
            memory: "1024Mi"
            cpu: "200m"
        env:
          - name: xpack.monitoring.elasticsearch.hosts
            value: http://elasticsearch-service:9200
        volumeMounts:
          - name: logstash-nfs
            mountPath: /usr/share/logstash/pipeline
        ports:
        - name: tcp-port
          containerPort: 5044
      nodeSelector:
        node: lp-knode-02
      volumes:
        - name: logstash-nfs
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/logstash/pipeline"

---
apiVersion: v1
kind: Service
metadata:
  name: logstash-service
  labels:
    app: logstash
    env: prod
spec:
  selector:
    app: logstash
  type: NodePort
  ports:
  - name: logstash
    port: 5044
    targetPort: 5044
    nodePort: 30062
  • logstash pipeline config file for input and output and filter

02-beats-input.conf

input {
  beats {
    port => 5044
  }
}

30-elasticsearch-output.conf

output {
  elasticsearch {
    hosts => ["http://elasticsearch-service:9200"]
    manage_template => false
    index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
  }
}

10-syslog-filter.conf

filter {
  if [fileset][module] == "system" {
    if [fileset][name] == "auth" {
      grok {
        match => { "message" => ["%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: %{DATA:[system][auth][ssh][event]} %{DATA:[system][auth][ssh][method]} for (invalid user )?%{DATA:[system][auth][user]} from %{IPORHOST:[system][auth][ssh][ip]} port %{NUMBER:[system][auth][ssh][port]} ssh2(: %{GREEDYDATA:[system][auth][ssh][signature]})?",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: %{DATA:[system][auth][ssh][event]} user %{DATA:[system][auth][user]} from %{IPORHOST:[system][auth][ssh][ip]}",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: Did not receive identification string from %{IPORHOST:[system][auth][ssh][dropped_ip]}",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sudo(?:\[%{POSINT:[system][auth][pid]}\])?: \s*%{DATA:[system][auth][user]} :( %{DATA:[system][auth][sudo][error]} ;)? TTY=%{DATA:[system][auth][sudo][tty]} ; PWD=%{DATA:[system][auth][sudo][pwd]} ; USER=%{DATA:[system][auth][sudo][user]} ; COMMAND=%{GREEDYDATA:[system][auth][sudo][command]}",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} groupadd(?:\[%{POSINT:[system][auth][pid]}\])?: new group: name=%{DATA:system.auth.groupadd.name}, GID=%{NUMBER:system.auth.groupadd.gid}",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} useradd(?:\[%{POSINT:[system][auth][pid]}\])?: new user: name=%{DATA:[system][auth][user][add][name]}, UID=%{NUMBER:[system][auth][user][add][uid]}, GID=%{NUMBER:[system][auth][user][add][gid]}, home=%{DATA:[system][auth][user][add][home]}, shell=%{DATA:[system][auth][user][add][shell]}$",
                  "%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} %{DATA:[system][auth][program]}(?:\[%{POSINT:[system][auth][pid]}\])?: %{GREEDYMULTILINE:[system][auth][message]}"] }
        pattern_definitions => {
          "GREEDYMULTILINE"=> "(.|\n)*"
        }
        remove_field => "message"
      }
      date {
        match => [ "[system][auth][timestamp]", "MMM  d HH:mm:ss", "MMM dd HH:mm:ss" ]
      }
      geoip {
        source => "[system][auth][ssh][ip]"
        target => "[system][auth][ssh][geoip]"
      }
    }
    else if [fileset][name] == "syslog" {
      grok {
        match => { "message" => ["%{SYSLOGTIMESTAMP:[system][syslog][timestamp]} %{SYSLOGHOST:[system][syslog][hostname]} %{DATA:[system][syslog][program]}(?:\[%{POSINT:[system][syslog][pid]}\])?: %{GREEDYMULTILINE:[system][syslog][message]}"] }
        pattern_definitions => { "GREEDYMULTILINE" => "(.|\n)*" }
        remove_field => "message"
      }
      date {
        match => [ "[system][syslog][timestamp]", "MMM  d HH:mm:ss", "MMM dd HH:mm:ss" ]
      }
    }
  }
}

Kubernetes Taints and Tolerations example

1.Add taint label to node

kubectl taint nodes lp-knode-01 key=value:NoSchedule

kubectl taint nodes lp-knode-01 key=arm:NoSchedule

kubectl taint nodes lp-knode-01 node=arm64:NoSchedule

kubectl taint nodes lp-arm-{1,2,3,4}.home node=arm64:NoSchedule


kubectl get nodes -o json | jq '.items[].spec.taints'

2.Add toleration in deployment

apiVersion: apps/v1
kind: Deployment
metadata:
  name: sonarqube-deployment
  labels:
    app: sonarqube
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: sonarqube
      env: prod
  template:
    metadata:
      labels:
        app: sonarqube
        env: prod
    spec:
      nodeSelector:
        node: "lp-knode-01"
      containers:
      - name: sonarqube-container
        image: sonarqube
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "1024Mi"
            cpu: "100m"
          limits:
            memory: "2048Mi"
            cpu: "2000m"
        volumeMounts:
          - name: sonarqube-data
            mountPath: /opt/sonarqube/data
        ports:
        - containerPort: 9000    
      tolerations:
      - key: "key"
        operator: "Equal"
        value: "arm"
        effect: "NoSchedule"
      volumes:
        - name: sonarqube-data
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/sonarqube/data"
---
kind: Service
apiVersion: v1
metadata:
  name: sonarqube-service
  labels:
    app: sonarqube
    env: prod
spec:
  selector:
    app: sonarqube
    env: prod
  ports:
  - name: sonarqube
    protocol: TCP
    port: 9000
    targetPort: 9000
    nodePort: 30900
  type: NodePort

      tolerations:
      - key: "key"
        operator: "Exists"
        effect: "NoSchedule"

Openfaas – serverless deployment with k8

1.Create serverless python3 deployment

faas-cli new --lang python3 python-fn --gateway http://192.168.0.183:31112 --prefix=192.168.0.183:30500/python-fn

–prefix = Docker private repository
–gateway = Openfass gateway server

2.It will create the python-fn directory and python-fn.yml file

3.Write your python code inside python-fn/handler.py

def handle(req):
    """handle a request to the function
    Args:
        req (str): request body
    """
    print("hola-openfaas")
    return req

4.Build / Push / Deploy

faas-cli build -f python-fn.yml
faas-cli push -f python-fn.yml
export OPENFAAS_URL=http://192.168.0.183:31112
faas-cli login --password "YOUR_Openfaas_PASSWORD"
faas-cli deploy -f python-fn.yml

5. Remove

faas-cli remove -f python-fn.yml
rm -rf python-fn*

Openfaas installation in k8

curl -sL https://cli.openfaas.com | sudo sh

git clone https://github.com/openfaas/faas-netes

kubectl apply -f https://raw.githubusercontent.com/openfaas/faas-netes/master/namespaces.yml

PASSWORD=$(head -c 12 /dev/urandom | shasum| cut -d' ' -f1)

kubectl -n openfaas create secret generic basic-auth \
--from-literal=basic-auth-user=admin \
--from-literal=basic-auth-password="$PASSWORD"

cd faas-netes

kubectl apply -f ./yaml

nohup kubectl port-forward svc/gateway -n openfaas 31112:8080 &


export OPENFAAS_URL=http://192.168.0.183:31112

echo -n "$PASSWORD" | faas-cli login --password-stdin

echo "$PASSWORD"

https://docs.openfaas.com/deployment/kubernetes/

kubernetes Highly Available clusters Using 3 Master node

1. Setup TCP load balancer using nginx (192.168.0.50)

load_module /usr/lib64/nginx/modules/ngx_stream_module.so;
events { }

stream {

upstream kapi {
	server 192.168.0.51:6443;
	server 192.168.0.52:6443;
	server 192.168.0.53:6443;
}

server {
	listen 8888;
	proxy_pass kapi;
    }

}

2. Run below command on all master node

yum install -y kubelet kubeadm kubectl docker
systemctl enable kubelet
systemctl start kubelet
systemctl enable docker
systemctl start docker

3. Run below command on 192.168.0.51

kubeadm init --control-plane-endpoint "192.168.0.50:8888" --upload-certs

It will generate command to add other master node and worker node

4.Join other 2 master (192.168.0.51, 192.168.0.52)

 kubeadm join 192.168.0.50:8888 --token hvlnv8.6r90i8d04cs23sii \
    --discovery-token-ca-cert-hash sha256:bc6fe39f98c7ae6cd8434bd8ade4eb3b15b45e151af37595e4be0a9fdfcfdcc4 \
    --control-plane --certificate-key 3659353b0a256650fb0c1a0357cb608d07e3bdc8ce8b64fa995bcb814c131fa6

Note : Token will be differ

5.Get the info of cluster

kubectl cluster-info

kubectl get node

Prometheus and Grafana on Kubernetes with nfs persistent volume

Prometheus-k8.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: prometheus-deployment
  labels:
    app: prometheus
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: prometheus
      env: prod
  template:
    metadata:
      labels:
        app: prometheus
        env: prod
    spec:
      containers:
      - name: prometheus-container
        image: prom/prometheus
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "128Mi"
            cpu: "200m"
          limits:
            memory: "256Mi"
            cpu: "200m"
        volumeMounts:
          - name: config-volume
            mountPath: /etc/prometheus/prometheus.yml
            subPath: prometheus.yml
          - name: prometheus-storage-volume
            mountPath: /prometheus
        ports:
        - containerPort: 9090
      volumes:
        - name: config-volume
          configMap:
           name: prometheus-conf
        - name: prometheus-storage-volume
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/prometheus"
---
kind: Service
apiVersion: v1
metadata:
  name: prometheus-service
  labels:
    app: prometheus
    env: prod
spec:
  selector:
    app: prometheus
    env: prod
  ports:
  - name: promui
    protocol: TCP
    port: 9090
    targetPort: 9090
    nodePort: 30090
  type: NodePort

Create prometheus.yml config-map file

kubectl create configmap game-config --from-file=/mnt/nfs1/prometheus/prometheus.yml

prometheus.yml

    global:
      scrape_interval:     30s
      evaluation_interval: 30s

    scrape_configs:
      - job_name: 'lp-kmaster-01'
        static_configs:
        - targets: ['192.168.0.183:9100']

Grafana-k8.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: grafana-deployment
  labels:
    app: grafana
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: grafana
  template:
    metadata:
      name: grafana-deployment
      labels:
        app: grafana
        env: prod
    spec:
      containers:
      - name: grafana
        image: grafana/grafana:7.0.0
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "128Mi"
            cpu: "200m"
          limits:
            memory: "256Mi"
            cpu: "200m"
        ports:
        - name: grafana
          containerPort: 3000
        volumeMounts:
          - mountPath: /var/lib/grafana
            name: grafana-storage
      volumes:
        - name: grafana-storage
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/grafana"

---
apiVersion: v1
kind: Service
metadata:
  name: grafana-service
  labels:
    app: grafana
    env: prod
spec:
  selector:
    app: grafana
  type: NodePort
  ports:
    - port: 3000
      targetPort: 3000
      nodePort: 30091

Pi-hole on kubernetes with NFS persistent volume

1.Create NFS share

2. pi-hole-deployment.yml

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: pi-hole-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: pi-hole
  template:
    metadata:
      name: pi-hole-deployment
      labels:
        app: pi-hole
        env: prod
    spec:
      containers:
      - name: pi-hole
        image: pihole/pihole
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "256Mi"
            cpu: "200m"
          limits:
            memory: "512Mi"
            cpu: "200m"
        volumeMounts:
          - name: pihole-nfs
            mountPath: /etc/pihole
          - name: dnsmasq-nfs
            mountPath: /etc/dnsmasq.d
        ports:
        - name: tcp-port
          containerPort: 53
          protocol: TCP
        - name: udp-port
          containerPort: 53
          protocol: UDP
        - name: http-port
          containerPort: 80
        - name: https-port
          containerPort: 443
      volumes:
        - name: pihole-nfs
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/pihole/pihole"
        - name: dnsmasq-nfs
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/pihole/dnsmasq.d"
        

---
apiVersion: v1
kind: Service
metadata:
  name: pi-hole-service
  labels:
    app: pi-hole
    env: prod
spec:
  selector:
    app: pi-hole
  type: NodePort
  externalIPs:
    - 192.168.0.183
  ports:
  - name: dns-tcp
    port: 53
    targetPort: 53
    nodePort: 30053
    protocol: TCP
  - name: dns-udp
    port: 53
    targetPort: 53
    nodePort: 30053
    protocol: UDP
  - name: http
    port: 800
    targetPort: 80
    nodePort: 30054
  - name: https
    port: 801
    targetPort: 443
    nodePort: 30055

Note: Use externalIPs in service so that the IP can be put inside wifi DNS.