Simple Kubernetes NFS Subdir – External Provisioner

Why?
– No need to create directory manually on nfs server
– Easy

helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/

helm upgrade --install nfs-subdir-external-provisioner nfs-subdir-external-provisioner/nfs-subdir-external-provisioner --set nfs.server=192.168.0.182 --set nfs.path=/mnt/nfs2 --set storageClass.defaultClass=true --set storageClass.onDelete=retain
   

deployment-nginx.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-nginx
spec:
  selector:
    matchLabels:
      run: my-nginx
  replicas: 1
  template:
    metadata:
      labels:
        run: my-nginx
    spec:
      containers:
      - name: my-nginx
        image: nginx
        ports:
        - containerPort: 80
        volumeMounts:
        - name: pvc-claim
          mountPath: /data
        resources:
          requests:
            memory: "256Mi"
            cpu: "100m"
          limits:
            memory: "256Mi"
            cpu: "100m"
      volumes:
        - name: pvc-claim
          persistentVolumeClaim:
            claimName: test-claim
---

kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-claim
  annotations:
    nfs.io/storage-path: "test-path"
spec:
  storageClassName: nfs-client
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 10Mi

More : https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner

Pi-hole on kubernetes with NFS persistent volume

1.Create NFS share

2. pi-hole-deployment.yml

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: pi-hole-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: pi-hole
  template:
    metadata:
      name: pi-hole-deployment
      labels:
        app: pi-hole
        env: prod
    spec:
      containers:
      - name: pi-hole
        image: pihole/pihole
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "256Mi"
            cpu: "200m"
          limits:
            memory: "512Mi"
            cpu: "200m"
        volumeMounts:
          - name: pihole-nfs
            mountPath: /etc/pihole
          - name: dnsmasq-nfs
            mountPath: /etc/dnsmasq.d
        ports:
        - name: tcp-port
          containerPort: 53
          protocol: TCP
        - name: udp-port
          containerPort: 53
          protocol: UDP
        - name: http-port
          containerPort: 80
        - name: https-port
          containerPort: 443
      volumes:
        - name: pihole-nfs
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/pihole/pihole"
        - name: dnsmasq-nfs
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/pihole/dnsmasq.d"
        

---
apiVersion: v1
kind: Service
metadata:
  name: pi-hole-service
  labels:
    app: pi-hole
    env: prod
spec:
  selector:
    app: pi-hole
  type: NodePort
  externalIPs:
    - 192.168.0.183
  ports:
  - name: dns-tcp
    port: 53
    targetPort: 53
    nodePort: 30053
    protocol: TCP
  - name: dns-udp
    port: 53
    targetPort: 53
    nodePort: 30053
    protocol: UDP
  - name: http
    port: 800
    targetPort: 80
    nodePort: 30054
  - name: https
    port: 801
    targetPort: 443
    nodePort: 30055

Note: Use externalIPs in service so that the IP can be put inside wifi DNS.

Private docker registry server on kubernetes with nfs persistent volume

  1. kubectl apply -f registry-server.yml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: registry-deployment
  labels:
    app: registry
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: registry
      env: prod
  template:
    metadata:
      labels:
        app: registry
        env: prod
    spec:
      containers:
      - name: registry-container
        image: registry:2
        imagePullPolicy: IfNotPresent
        env:
          - name: REGISTRY_STORAGE_DELETE_ENABLED
            value: "true"
        resources:
          requests:
            memory: "256Mi"
            cpu: "200m"
          limits:
            memory: "512Mi"
            cpu: "200m"
        volumeMounts:
          - name: registry-data
            mountPath: /var/lib/registry
          - name: config-yml
            mountPath: /etc/docker/registry/config.yml
            subPath: config.yml   
        ports:
        - containerPort: 5000
      volumes:
        - name: registry-data
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/registry"
        - name: config-yml
          configMap:
           name: registry-conf     
              

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: registry-conf
data:
  config.yml: |+
    version: 0.1
    log:
      fields:
        service: registry
    storage:
      filesystem:
        rootdirectory: /var/lib/registry
    http:
      addr: :5000
      headers:
        X-Content-Type-Options: [nosniff]
    health:
      storagedriver:
        enabled: true
        interval: 10s
        threshold: 3

---
kind: Service
apiVersion: v1
metadata:
  name: registry-service
  labels:
    app: registry
    env: prod
spec:
  selector:
    app: registry
    env: prod
  ports:
  - name: registry
    protocol: TCP
    port: 5000
    targetPort: 5000
    nodePort: 30500
  type: NodePort

2.As this is running on http we need to add insecure registry inside /etc/docker/daemon.json on all running worker node and

{
 "insecure-registries" : [ "192.168.0.183:30500" ]
}

3. Restart the docker service

systemctl restart docker

4. Tag the image that with registry server ip and port. DNS name can be used if available.

docker tag debian:latest 192.168.0.183:30500/debianlocal:latest

5. Push the images to private registry server

docker push 192.168.0.183:30500/debianlocal:latest

6. Delete images form registry server we will use docker_reg_tool https://github.com/byrnedo/docker-reg-tool/blob/master/docker_reg_tool

Note:
– Delete blobdescriptor: inmemory part from /etc/docker/registry/config.yml which is already have done in this example
– REGISTRY_STORAGE_DELETE_ENABLED = “true” should be present in env

./docker_reg_tool http://192.168.0.183:30500 delete debianlocal latest

#This can be cronjob inside the container

docker exec -it name_of_registory_container  bin/registry garbage-collect /etc/docker/registry/config.yml

NFS server in linux

apt-get install nfs-kernel-server
systemctl start nfs-server
systemctl enable nfs-server

yum install nfs-utils - for centos
systemctl enable rpcbind
systemctl enable nfs-server
systemctl start rpcbind
systemctl start nfs-server

nano /etc/exports
### For specific ip
/var/html 192.168.0.150(rw,sync,no_root_squash)
### For all ip
/var/html *(rw,sync,no_root_squash)

exportfs -r
exportfs -a
exportfs  

mount -t nfs 192.168.0.150:/var/html  /var/html

###  for showing mounts available 
showmount -e 192.168.0.150