2
votes

I have the following deployment yaml:

apiVersion: apps/v1
kind: Deployment
metadata:
  name: gofirst
  labels:
    app: gofirst
spec:
  selector:
    matchLabels:
      app: gofirst
  template:
    metadata:
      labels:
        app: gofirst
    spec:
      restartPolicy: Always
      containers:
      - name: gofirst
        image: lbvenkatesh/gofirst:0.0.5
        resources:
          limits:
            memory: "128Mi"
            cpu: "500m"
        ports:
        - name: http
          containerPort: 8080
        livenessProbe:
          httpGet:
            path: /health
            port: http
            httpHeaders:
            - name: "X-Health-Check"
              value: "1"
          initialDelaySeconds: 30
          periodSeconds: 10
        readinessProbe:
          httpGet:
            path: /health
            port: http
            httpHeaders:
            - name: "X-Health-Check"
              value: "1"
          initialDelaySeconds: 30
          periodSeconds: 10

and my service yaml is this:

apiVersion: v1
kind: Service
metadata:
  name: gofirst
  labels:
    app: gofirst
spec:
  publishNotReadyAddresses: true
  type: NodePort
  selector:
    app: gofirst
  ports:
  - port: 8080
    targetPort: http
    name: http

"gofirst" is a simple web application written in Golang Gin. Here is the dockerFile of the same:

FROM golang:latest 
LABEL MAINTAINER='Venkatesh Laguduva <[email protected]>'
RUN mkdir /app 
ADD . /app/
RUN apt -y update && apt -y install git
RUN go get github.com/gin-gonic/gin
RUN go get -u github.com/RaMin0/gin-health-check
WORKDIR /app 
RUN go build -o main . 
ARG verArg="0.0.1"
ENV VERSION=$verArg
ENV PORT=8080
ENV GIN_MODE=release
EXPOSE 8080
CMD ["/app/main"]

I have deployed this application in Minikube and when I try to describe this pods, I am seeing these events:

  Type     Reason            Age                     From               Message
  ----     ------            ----                    ----               -------
  Warning  FailedScheduling  10m (x2 over 10m)       default-scheduler  0/1 nodes are available: 1 Insufficient cpu.
  Normal   Scheduled         10m                     default-scheduler  Successfully assigned default/gofirst-95fc8668c-6r4qc to m01
  Normal   Pulling           10m                     kubelet, m01       Pulling image "lbvenkatesh/gofirst:0.0.5"
  Normal   Pulled            10m                     kubelet, m01       Successfully pulled image "lbvenkatesh/gofirst:0.0.5"
  Normal   Killing           8m13s (x2 over 9m13s)   kubelet, m01       Container gofirst failed liveness probe, will be restarted
  Normal   Pulled            8m13s (x2 over 9m12s)   kubelet, m01       Container image "lbvenkatesh/gofirst:0.0.5" already present on machine
  Normal   Created           8m12s (x3 over 10m)     kubelet, m01       Created container gofirst
  Normal   Started           8m12s (x3 over 10m)     kubelet, m01       Started container gofirst
  Warning  Unhealthy         7m33s (x7 over 9m33s)   kubelet, m01       Liveness probe failed: Get http://172.17.0.4:8080/health: dial tcp 172.17.0.4:8080: connect: connection refused
  Warning  Unhealthy         5m35s (x12 over 9m25s)  kubelet, m01       Readiness probe failed: Get http://172.17.0.4:8080/health: dial tcp 172.17.0.4:8080: connect: connection refused
  Warning  BackOff           31s (x17 over 4m13s)    kubelet, m01       Back-off restarting failed container

I tried the sample container "hello-world" and worked well when I did "minikube service hello-world" but when I tried the same with "minikube service gofirst", I got the connection error in the browser.

I must be doing something relatively simpler but am unable to locate the error. Please go through my yaml and docker file, let me know if I am making any error.

1

1 Answers

2
votes

I've reproduced your scenario and faced the same issues you have. So I decided to remove the liveness and rediness probes to be able to log in to the pod and investigate it.

Here is the yaml I used:

apiVersion: apps/v1
kind: Deployment
metadata:
  name: gofirst
  labels:
    app: gofirst
spec:
  selector:
    matchLabels:
      app: gofirst
  template:
    metadata:
      labels:
        app: gofirst
    spec:
      restartPolicy: Always
      containers:
      - name: gofirst
        image: lbvenkatesh/gofirst:0.0.5
        resources:
          limits:
            memory: "128Mi"
            cpu: "500m"
        ports:
        - name: http
          containerPort: 8080

I logged in the pod to check if the application is listening in the port you are trying to test:

kubectl exec -ti gofirst-65cfc7556-bbdcg -- bash

Then I installed netstat:

# apt update
# apt install net-tools

Checked if the application is running:

# ps -ef 
UID          PID    PPID  C STIME TTY          TIME CMD
root           1       0  0 10:06 ?        00:00:00 /app/main
root           9       0  0 10:06 pts/0    00:00:00 sh
root          15       9  0 10:07 pts/0    00:00:00 ps -ef

And finally checked if port 8080 is listening:

# netstat -an
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State      
tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN     
tcp        0      0 10.28.0.9:56106         151.101.184.204:80      TIME_WAIT  
tcp        0      0 10.28.0.9:56130         151.101.184.204:80      TIME_WAIT  
tcp        0      0 10.28.0.9:56104         151.101.184.204:80      TIME_WAIT  
Active UNIX domain sockets (servers and established)
Proto RefCnt Flags       Type       State         I-Node   Path

As we can see, application is listening to localhost connections only and not from everywhere. Expected output should be: 0.0.0.0:8080

Hope it helps you to solve the problem.