- Create account on https://connect.raspberrypi.com/
apt install rpi-connect
rpi-connect on
# Only remote shell access
apt install rpi-connect-lite
rpi-connect on
rpi-connect signin
Only for Raspberry Pi OS Bookworm

apt install rpi-connect
rpi-connect on
# Only remote shell access
apt install rpi-connect-lite
rpi-connect on
rpi-connect signin
Only for Raspberry Pi OS Bookworm
Ollama it has similar pattern as docker.
curl -L https://ollama.com/download/ollama-linux-amd64.tgz -o ollama-linux-amd64.tgz
sudo tar -C /usr -xzf ollama-linux-amd64.tgz
ollama serve
# OR host on different ip
OLLAMA_HOST=192.168.29.13:11435 ollama serve
ollama -v
#check graphics card
nvidia-smi
#port http://127.0.0.1:11434/
###Podman/Docker - https://ollama.com/blog/ollama-is-now-available-as-an-official-docker-image
podman run -d --gpus=all --device nvidia.com/gpu=all --security-opt=label=disable -v ollama:/root/.ollama -p 11434:11434 ollama/ollama
user@home:~$ ollama list
NAME ID SIZE MODIFIED
gemma2:latest ff02c3702f32 5.4 GB 11 hours ago
llama3.2:latest a80c4f17acd5 2.0 GB 12 hours ago
user@home:~$ ollama run llama3.2
>>> hola
Hola! ¿En qué puedo ayudarte hoy?
>>> hey
What's up? Want to chat about something in particular or just shoot the breeze?
podman run -d -p 3000:8080 --gpus all --device nvidia.com/gpu=all --security-opt=label=disable -e OLLAMA_BASE_URL=http://192.168.29.13:11434 -e WEBUI_AUTH=False -v open-webui:/app/backend/data --name open-webui ghcr.io/open-webui/open-webui:main
https://github.com/ollama/ollama/blob/main/docs/linux.md
for podman GPU access cdi- https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/cdi-support.html
https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/cdi-support.html
docker-compose-web.yml
services:
open-webui:
image: ghcr.io/open-webui/open-webui:main
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
restart: always
container_name: open-webui
environment:
- OLLAMA_BASE_URL=http://192.168.29.142:11434
- WEBUI_AUTH=False
volumes:
- open-webui:/app/backend/data
ports:
- 3000:8080
volumes:
open-webui:
docker run --rm --gpus all ubuntu nvidia-smi
user@home:~$ cat /proc/driver/nvidia/version
NVRM version: NVIDIA UNIX x86_64 Kernel Module 550.120 Fri Sep 13 10:10:01 UTC 2024
apt install nvidia-utils-550
nvtop
lets you dynamically run Lua code from Kong, before other plugins in each phase.
for eg. removing the header if it is blank.
local k_request = kong.request.get_header("Content-Type")
if k_request and (k_request == "application/json") then
local check_head = kong.request.get_raw_body()
if (not check_head) or (check_head == "") then
kong.service.request.clear_header("Content-Type")
end
end
Local netowork(LAN1): 192.168.0.1
OpenVPN setting:
Local Network: 192.168.0.0/24
IP pool : 192.168.3.0/24
I can now SSH from OpenVPN client to 192.168.0.183
openvpn --config VPN_Default_2024-11-17-11-19-59.ovpn
Get full jenkins job name :
Jenkins.instance.getAllItems(AbstractItem.class).each {
println(it.fullName)
};
Kill: Jenkins job which is stuck
Jenkins.instance
.getItemByFullName(“Tenerity-Platform/BizOps/apg-chargeback”)
.getBranch(“develop”)
.getBuildByNumber(716)
.finish(hudson.model.Result.ABORTED, new java.io.IOException(“Aborting build”));
namespace level metrics
CPU:
sum by (namespace) (container_cpu_allocation)
Memory:
sum by (namespace) (container_memory_allocation_bytes/1024/1024)
kubectl custom query to get cpu , memory request and limit
kubectl get deploy -A -o=custom-columns='Namespace:.metadata.namespace,Name:.metadata.name,Request_CPU:.spec.template.spec.containers[0].resources.requests.cpu,Limit_CPU:.spec.template.spec.containers[0].resources.limits.cpu,Request_Memory:.spec.template.spec.containers[0].resources.requests.memory,Limit_Memory:.spec.template.spec.containers[0].resources.limits.memory' | sed 1d | tr -s '[:blank:]' ','
kubect replace variabe
deployments=$(kubectl get deploy | awk '{print $1}' | sed 1d)
for deploy in $deployments
do
deploy_raw_yml=$(kubectl get deploy $deploy -o yaml)
kubectl get deploy $deploy -o yaml > _tmp_store.yml
value_to_be_replaced=$(kubectl get deploy $deploy -o yaml | grep -A 1 'NEW_RELIC_APP_NAME' | grep value | awk -F 'value: ' '{print $2}')
echo "value_to_be_replaced: $value_to_be_replaced"
if [[ $value_to_be_replaced == "" ]]; then
echo "=====================$deploy no change =========================="
else
replaced_value=$(echo $value_to_be_replaced | sed 's/stage/perf/g')
echo "replaced_value: $replaced_value"
cat _tmp_store.yml| sed "s/$value_to_be_replaced/$replaced_value/g" | kubectl apply -f -
echo "=====================$deploy done =========================="
fi
done