I had same issue while configuring prometheus on baremetal and this is how I resolved it.
You can use local-storage
storage class for PV and PVC which bind your PVC to your node. So whenever the node restart pod will be scheduled on the same node where PVC is there. I am sharing my json files:
Prometheus-pv.json
{
"kind": "PersistentVolume",
"apiVersion": "v1",
"metadata": {
"name": "prometheus-vol",
"namespace": "monitoring"
"labels": {
"type": "local",
"app": "harmony-vol"
}
},
"spec": {
"capacity": {
"storage": "10Gi"
},
"accessModes": [
"ReadWriteOnce"
],
"storageClassName": "local-storage",
"local": {
"path": "/data"
},
"claimRef": {
"namespace": "default",
"name": "data-prafull-0"
},
"nodeAffinity": {
"required": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{
"key": "kubernetes.io/hostname",
"operator": "In",
"values": [
"<node_name>"
]
}
]
}
]
}
}
}
}
Prometheus.json
{
"apiVersion": "monitoring.coreos.com/v1",
"kind": "Prometheus",
"metadata": {
"labels": {
"prometheus": "prafull"
},
"name": "prafull",
"namespace": "monitoring"
},
"spec": {
"alerting": {
"alertmanagers": [
{
"name": "alertmanager-main",
"namespace": "monitoring",
"port": "web"
}
]
},
"baseImage": "quay.io/prometheus/prometheus",
"replicas": 2,
"resources": {
"requests": {
"memory": "400Mi"
}
},
"ruleSelector": {
"matchLabels": {
"prometheus": "prafull",
"role": "alert-rules"
}
},
"securityContext": {
"fsGroup": 0,
"runAsNonRoot": false,
"runAsUser": 0
},
"serviceAccountName": "prometheus",
"serviceMonitorSelector": {
"matchExpressions": [
{
"key": "k8s-app",
"operator": "Exists"
}
]
},
"storage": {
"class": "",
"resources": {},
"selector": {},
"volumeClaimTemplate": {
"metadata": {
"name": "data"
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"storageClassName": "local-storage",
"resources": {
"requests": {
"storage": "10Gi"
}
}
}
}
},
"version": "v2.2.1"
}
}
After applying this your pod will not be reschedule to another node because PV, PVC are bound to that node