I create a mysql pods which is running in Node3 172.24.18.125. But after I stop all kubernetes services in Node3, this pods disappears after a while instead of running in Node1 or Node2. Why the kubernetes master doesn't reschedule the pods in another node? Below are the yaml files for pods and replication controller.
[root@localhost pods]# kubectl get nodes
NAME LABELS STATUS
127.0.0.1 kubernetes.io/hostname=127.0.0.1 Ready
172.24.18.123 database=mysql,kubernetes.io/hostname=172.24.18.123 Ready
172.24.18.124 kubernetes.io/hostname=172.24.18.124 Ready
172.24.18.125 kubernetes.io/hostname=172.24.18.125 Ready
YAML file to create mysql pod:
apiVersion: v1
kind: Pod
metadata:
name: mysql
labels:
name: mysql
spec:
containers:
- resources:
limits :
cpu: 1
image: mysql
name: mysql
env:
- name: MYSQL_ROOT_PASSWORD
value: welcome
ports:
- containerPort: 3306
name: mysql
mysql-service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
name: mysql
name: mysql
spec:
publicIPs:
- 172.24.18.120
ports:
# the port that this service should serve on
- port: 3306
# label keys and values that must match in order to receive traffic for this service
selector:
name: mysql
replicationcontroller.yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: mysql-controller
spec:
replicas: 2
selector:
name: mysql
template:
metadata:
labels:
name: mysql
spec:
containers:
- name: mysql
image: mysql
ports:
- containerPort: 3306