Docker secure private registry with https behind Apache proxy using letsencrypt

1.docker-ui.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: docker-registry-deployment
  labels:
    app: registry
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: docker-registry
      env: prod
  template:
    metadata:
      labels:
        app: docker-registry
        env: prod
    spec:
      containers:
      - name: docker-registry-container
        image: registry:2
        imagePullPolicy: IfNotPresent
        env:
          - name: REGISTRY_STORAGE_DELETE_ENABLED
            value: "true"
        resources:
          requests:
            memory: "256Mi"
            cpu: "200m"
          limits:
            memory: "512Mi"
            cpu: "200m"
        volumeMounts:
          - name: registry-data
            mountPath: /var/lib/registry
          - name: config-yml
            mountPath: /etc/docker/registry/config.yml
            subPath: config.yml   
        ports:
        - containerPort: 5000
      volumes:
        - name: registry-data
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/docker_registry"
        - name: config-yml
          configMap:
           name: docker-registry-conf     
              

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: docker-registry-conf
data:
  config.yml: |+
    version: 0.1
    log:
      fields:
        service: registry
    storage:
      filesystem:
        rootdirectory: /var/lib/registry
    http:
      addr: 0.0.0.0:5000
      secret: asecretforlocaldevelopment
      headers:
        X-Content-Type-Options: [nosniff]
    health:
      storagedriver:
        enabled: true
        interval: 10s
        threshold: 3

---
kind: Service
apiVersion: v1
metadata:
  name: docker-registry-service
  labels:
    app: docker-registry
    env: prod
spec:
  selector:
    app: docker-registry
    env: prod
  ports:
  - name: docker-registry
    protocol: TCP
    port: 5000
    targetPort: 5000

#Docker ui
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: docker-ui-deployment
  labels:
    app: dockerui
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: dockerui
      env: prod
  template:
    metadata:
      labels:
        app: dockerui
        env: prod
    spec:
      containers:
      - name: dockerui-container
        image: joxit/docker-registry-ui:static
        imagePullPolicy: IfNotPresent
        env:
          - name: REGISTRY_URL
            value: "http://docker-registry-service:5000"
          - name: DELETE_IMAGES
            value: "true"
          - name: REGISTRY_TITLE
            value: "Docker-UI"
        resources:
          requests:
            memory: "512Mi"
            cpu: "200m"
          limits:
            memory: "512Mi"
            cpu: "200m"
        ports:
        - containerPort: 80 

---
kind: Service
apiVersion: v1
metadata:
  name: dockerui-service
  labels:
    app: dockerui
    env: prod
spec:
  selector:
    app: dockerui
    env: prod
  ports:
  - name: dockerui
    protocol: TCP
    port: 80
    targetPort: 80
    nodePort: 30005
  type: NodePort

2.Apache proxy rule

htpasswd -c /etc/httpd/admin-htpasswd admin

<VirtualHost *:80>
ServerName docker.initedit.com
RewriteEngine on
RewriteCond %{SERVER_NAME} =docker.initedit.com
RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [END,QSA,R=permanent]
</VirtualHost>

<VirtualHost *:443>
<Location />
    AuthName authorization
    AuthType Basic
    require valid-user
    AuthUserFile '/etc/httpd/admin-htpasswd'
</Location>
    ServerName docker.initedit.com
    AllowEncodedSlashes on
    RewriteEngine on
    SSLEngine On
    SSLProxyEngine On
    ProxyPreserveHost On
    RequestHeader set X-Forwarded-Proto "https"
    ProxyPass /  http://192.168.0.183:30005/
    ProxyPassReverse / http://192.168.0.183:30005/
    SSLCertificateFile /etc/letsencrypt/live/docker.initedit.com/cert.pem
    SSLCertificateKeyFile /etc/letsencrypt/live/docker.initedit.com/privkey.pem
    SSLCertificateChainFile /etc/letsencrypt/live/docker.initedit.com/fullchain.pem
    Include /etc/letsencrypt/options-ssl-apache.conf
</VirtualHost>

Note: Add htpasswd for basic authentication

docker tag docker.io/busybox docker.initedit.com/busybox1
docker push docker.initedit.com/busybox1

3. You can delete from UI and also using docker_reg_tool. After deleting you need to run below command inside registry container to remove it completely

docker exec -it name_of_registory_container bin/registry garbage-collect /etc/docker/registry/config.yml

run jenkins with docker-compose

jenkins.yml

version: '3'
services:
  jenkins:
    image: jenkins/jenkins
    user: root:root
    restart: always
    container_name: jenkins
    environment:
      TZ: "Asia/Kolkata"
    volumes:
      - /opt/docker/jenkins:/var/jenkins_home
    ports:
      - 8080:8080

start : docker-compose -f jenkins.yml up -d
stop : docker-compose -f jenkins.yml down

user mapping : https://dev.to/acro5piano/specifying-user-and-group-in-docker-i2e

run postgres with docker-compose

postgres.yml

version: '3'
services:
  postgres:
    image: postgres
    restart: always
    container_name: postgres
    environment:
      POSTGRES_PASSWORD: "password"
    volumes:
      - /opt/docker/postgres:/var/lib/postgresql/data
    ports:
      - 5432:5432

start : docker-compose -f postgres.yml up -d
stop : docker-compose -f postgres.yml down

HAproxy configuration on docker

1.Install docker

yum install docker

systemctl enable docker
systemctl start docker

2. Run haproxy docker images with with persistent volume

mkdir /opt/haproxy

#and move the haproxy.cfg  inside /opt/haproxy

docker run -d -p 8888:8888 -p 8404:8404 -v /opt/haproxy:/usr/local/etc/haproxy:Z haproxy

3. haproxy.cfg

global
	daemon
	maxconn 256

defaults
    timeout connect 10s
    timeout client 30s
    timeout server 30s
    log global
    mode http
    option httplog
    maxconn 3000

frontend stats
	bind *:8404
	stats enable
	stats uri /stats
	stats refresh 10s

frontend app1
	bind *:80
	default_backend app1_backend

backend app1_backend
	server server1 192.168.0.151:8080 maxconn 32
	server server1 192.168.0.152:8080 maxconn 32
	server server1 192.168.0.153:8080 maxconn 32

docker-compose file

version: '3'
services:
  haproxy:
    image: haproxy
    ports:
     - 80:80
     - 8404:8404
    volumes:
     - /opt/haproxy:/usr/local/etc/haproxy

Private docker registry server on kubernetes with nfs persistent volume

  1. kubectl apply -f registry-server.yml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: registry-deployment
  labels:
    app: registry
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: registry
      env: prod
  template:
    metadata:
      labels:
        app: registry
        env: prod
    spec:
      containers:
      - name: registry-container
        image: registry:2
        imagePullPolicy: IfNotPresent
        env:
          - name: REGISTRY_STORAGE_DELETE_ENABLED
            value: "true"
        resources:
          requests:
            memory: "256Mi"
            cpu: "200m"
          limits:
            memory: "512Mi"
            cpu: "200m"
        volumeMounts:
          - name: registry-data
            mountPath: /var/lib/registry
          - name: config-yml
            mountPath: /etc/docker/registry/config.yml
            subPath: config.yml   
        ports:
        - containerPort: 5000
      volumes:
        - name: registry-data
          nfs:
            server: 192.168.0.184
            path: "/opt/nfs1/registry"
        - name: config-yml
          configMap:
           name: registry-conf     
              

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: registry-conf
data:
  config.yml: |+
    version: 0.1
    log:
      fields:
        service: registry
    storage:
      filesystem:
        rootdirectory: /var/lib/registry
    http:
      addr: :5000
      headers:
        X-Content-Type-Options: [nosniff]
    health:
      storagedriver:
        enabled: true
        interval: 10s
        threshold: 3

---
kind: Service
apiVersion: v1
metadata:
  name: registry-service
  labels:
    app: registry
    env: prod
spec:
  selector:
    app: registry
    env: prod
  ports:
  - name: registry
    protocol: TCP
    port: 5000
    targetPort: 5000
    nodePort: 30500
  type: NodePort

2.As this is running on http we need to add insecure registry inside /etc/docker/daemon.json on all running worker node and

{
 "insecure-registries" : [ "192.168.0.183:30500" ]
}

3. Restart the docker service

systemctl restart docker

4. Tag the image that with registry server ip and port. DNS name can be used if available.

docker tag debian:latest 192.168.0.183:30500/debianlocal:latest

5. Push the images to private registry server

docker push 192.168.0.183:30500/debianlocal:latest

6. Delete images form registry server we will use docker_reg_tool https://github.com/byrnedo/docker-reg-tool/blob/master/docker_reg_tool

Note:
– Delete blobdescriptor: inmemory part from /etc/docker/registry/config.yml which is already have done in this example
– REGISTRY_STORAGE_DELETE_ENABLED = “true” should be present in env

./docker_reg_tool http://192.168.0.183:30500 delete debianlocal latest

#This can be cronjob inside the container

docker exec -it name_of_registory_container  bin/registry garbage-collect /etc/docker/registry/config.yml

Prometheus pushgateway to monitor running proccess (docker ps)

1.Deploy pushgateway to kubernetes

pushgateway.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: pushgateway-deployment
  labels:
    app: pushgateway
    env: prod
spec:
  replicas: 1
  selector:
    matchLabels:
      app: pushgateway
      env: prod
  template:
    metadata:
      labels:
        app: pushgateway
        env: prod
    spec:
      containers:
      - name: pushgateway-container
        image: prom/pushgateway
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "128Mi"
            cpu: "200m"
          limits:
            memory: "256Mi"
            cpu: "200m"
        ports:
        - containerPort: 9091
---
kind: Service
apiVersion: v1
metadata:
  name: pushgateway-service
  labels:
    app: pushgateway
    env: prod
spec:
  selector:
    app: pushgateway
    env: prod
  ports:
  - name: pushgateway
    protocol: TCP
    port: 9091
    targetPort: 9091
    nodePort: 30191
  type: NodePort

2. Add pushgateway in /etc/prometheus/prometheus.yml

3. Push running docker status to pushgateway using below bash script and add it to crontab

job="docker_status"

running_docker=$(docker ps | wc -l)
docker_images=$(docker images | wc -l)

cat <<EOF | curl --data-binary @- http://192.168.0.183:30191/metrics/job/$job/instance/$(hostname)
# TYPE running_docker counter
running_docker $running_docker
docker_images $docker_images
EOF

4. Data visualization in prometheus and pushgateway server

Python code:

job_name='cpuload'
instance_name='web1'
payload_key='cpu'
payload_value='10'
#print("{k} {v} \n".format(k=payload_key, v=payload_value))
#print('http://192.168.0.183:30191/metrics/job/{j}/instance/{i}'.format(j=job_name, i=instance_name))
response = requests.post('http://192.168.0.183:30191/metrics/job/{j}/instance/{i}'.format(j=job_name, i=instance_name), data="{k} {v}\n".format(k=payload_key, v=payload_value))
#print(response.text)

pushgateway powershell command:

Invoke-WebRequest "http://192.168.0.183:30191/metrics/job/jenkins/instance/instace_name -Body "process 1`n" -Method Post
$process1 = (tasklist /v | Select-String -AllMatches 'Jenkins' | findstr 'java' | %{ $_.Split('')[0]; }) | Out-String
if($process1 -like "java.exe*"){
   write-host("This is if statement")
   Invoke-WebRequest "http://192.168.0.183:30191/metrics/job/jenkins/instance/instace_name" -Body "jenkins_process 1`n" -Method Post
}else {
   write-host("This is else statement")
   Invoke-WebRequest "http://192.168.0.183:30191/metrics/job/jenkins/instance/instace_name" -Body "jenkins_process 0`n" -Method Post
}

Docker Swarm cluster configuration

swarm-master-01 = 192.168.0.150
swarm-node-01 = 192.168.0.151
swarm-node-02 = 192.168.0.152

swarm-master-01

yum install docker
systemctl disable firewalld
systemctl stop firewalld

docker swarm init --advertise-addr 192.168.0.150

#after this command it will genrate join command

docker swarm join --token SWMTKN-1-3xrfrgwy67vm0dmel94fveuqvg9ngsv8qt5jysl31xfv16c0gq-55tzlxjtezu59l4mw4hxjo3h9 192.168.0.150:2377

On swarm-node1,2

yum install docker
systemctl disable firewalld
systemctl stop firewalld

docker swarm join --token SWMTKN-1-3xrfrgwy67vm0dmel94fveuqvg9ngsv8qt5jysl31xfv16c0gq-55tzlxjtezu59l4mw4hxjo3h9 192.168.0.150:2377

Install swarm dashboard

https://github.com/charypar/swarm-dashboard

# compose.yml
version: "3"

services:
  dashboard:
    image: charypar/swarm-dashboard
    volumes:
    - "/var/run/docker.sock:/var/run/docker.sock"
    ports:
    - 8080:8080
    environment:
      PORT: 8080
    deploy:
      replicas: 1
      placement:
        constraints:
          - node.role == manager

#deploy swarm dashboard

docker stack deploy -c compose.yml svc

#Dashboard will be accessible on http://master_ip:8080

Deploy service in swarm cluster

docker service create -p 8881:80 --name httpd --replicas 2 httpd