1
votes

I am trying to create a Helm chart for varnish to be deployed/run on Kubernetes cluster. While running the helm package which has varnish image from Docker community its throwing error

Readiness probe failed: HTTP probe failed with statuscode: 503

Liveness probe failed: HTTP probe failed with statuscode: 503

Have shared values.yaml, deployment.yaml, varnish-config.yaml, varnish.vcl.

Any solution approached would be welcomed....

Values.yaml:

    # Default values for tt.
    # This is a YAML-formatted file.
    # Declare variables to be passed into your templates.

    replicaCount: 1


    #vcl 4.0;

    #import std;

    #backend default {
     # .host = "www.varnish-cache.org";
     # .port = "80";
     # .first_byte_timeout = 60s;
     # .connect_timeout = 300s;
    #}



    varnishBackendService: "www.varnish-cache.org"
    varnishBackendServicePort: "80"

    image:
      repository: varnish
      tag: 6.0.6
      pullPolicy: IfNotPresent

    nameOverride: ""
    fullnameOverride: ""

    service:
      type: ClusterIP
      port: 80



    #probes:
     # enabled: true

    ingress:
      enabled: false
      annotations: {}
        # kubernetes.io/ingress.class: nginx
        # kubernetes.io/tls-acme: "true"
      path: /
      hosts:
        - chart-example.local
      tls: []
      #  - secretName: chart-example-tls
      #    hosts:
      #      - chart-example.local

    resources:
      limits:
        memory: 128Mi
      requests:
        memory: 64Mi

    #resources: {}
      # We usually recommend not to specify default resources and to leave this as a conscious
      # choice for the user. This also increases chances charts run on environments with little
      # resources, such as Minikube. If you do want to specify resources, uncomment the following
      # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
      # limits:
      #  cpu: 100m
      #  memory: 128Mi
      # requests:
      #  cpu: 100m
      #  memory: 128Mi

    nodeSelector: {}

    tolerations: []

    affinity: {}

Deployment.yaml:

    apiVersion: apps/v1beta2
    kind: Deployment
    metadata:
      name: {{ include "varnish.fullname" . }}
      labels:
        app: {{ include "varnish.name" . }}
        chart: {{ include "varnish.chart" . }}
        release: {{ .Release.Name }}
        heritage: {{ .Release.Service }}
    spec:
      replicas: {{ .Values.replicaCount }}
      selector:
        matchLabels:
          app: {{ include "varnish.name" . }}
          release: {{ .Release.Name }}
      template:
        metadata:
          labels:
            app: {{ include "varnish.name" . }}
            release: {{ .Release.Name }}
    #      annotations:
     #       sidecar.istio.io/rewriteAppHTTPProbers: "true"
        spec:
          volumes: 
            - name: varnish-config
              configMap:
                 name: {{ include "varnish.fullname" . }}-varnish-config
                 items:
                   - key: default.vcl
                     path: default.vcl
          containers:
            - name: {{ .Chart.Name }}
              image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
              imagePullPolicy: {{ .Values.image.pullPolicy }}    
              env:
              - name: VARNISH_VCL
                value: /etc/varnish/default.vcl
              volumeMounts: 
                - name: varnish-config
                  mountPath : /etc/varnish/
              ports:
                - name: http
                  containerPort: 80
                  protocol: TCP
                  targetPort: 80
              livenessProbe: 
                httpGet:
                  path: /healthcheck
                  port: http
                  port: 80
                failureThreshold: 3
                initialDelaySeconds: 45
                timeoutSeconds: 10
                periodSeconds: 20
              readinessProbe:
                httpGet:
                  path: /healthcheck
                  port: http
                  port: 80
                initialDelaySeconds: 10
                timeoutSeconds: 15
                periodSeconds: 5
              restartPolicy: "Always"
              resources:
    {{ toYaml .Values.resources | indent 12 }}
        {{- with .Values.nodeSelector }}
          nodeSelector:
    {{ toYaml . | indent 8 }}
        {{- end }}
        {{- with .Values.affinity }}
          affinity:
    {{ toYaml . | indent 8 }}
        {{- end }}
        {{- with .Values.tolerations }}
          tolerations:
    {{ toYaml . | indent 8 }}
        {{- end }}

vanrnish-config.yaml:

    apiVersion: v1
    kind: ConfigMap
    metadata:
      name: {{ template "varnish.fullname" . }}-varnish-config
      labels:
        app: {{ template "varnish.fullname" . }}
        chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
        release: "{{ .Release.Name }}"
        heritage: "{{ .Release.Service }}"
    data:
      default.vcl: |-
    {{ $file := (.Files.Get "config/varnish.vcl") }}
    {{ tpl $file . | indent 4 }}

varnish.vcl:


    # VCL version 5.0 is not supported so it should be 4.0 or 4.1 even though actually used Varnish version is 6
    vcl 4.1;

    import std;
    # The minimal Varnish version is 5.0
    # For SSL offloading, pass the following header in your proxy server or load balancer: 'X-Forwarded-Proto: https'

    backend default {
      #.host = "{{ default "google.com" .Values.varnishBackendService }}";
      .host = "{{  .Values.varnishBackendService }}";
      .port = "{{  .Values.varnishBackendServicePort }}";
      #.port = "{{ default "80" .Values.varnishBackendServicePort }}";
      .first_byte_timeout = 60s;
      .connect_timeout = 300s ;
      .probe = {
            .url = "/";
            .timeout = 1s;
            .interval = 5s;
            .window = 5;
            .threshold = 3;
        }
    }



    backend server2 {
        .host = "74.125.24.105:80";
        .probe = {
            .url = "/";
            .timeout = 1s;
            .interval = 5s;
            .window = 5;
            .threshold = 3;
        }
    }

    import directors;

    sub vcl_init {
        new vdir = directors.round_robin();
        vdir.add_backend(default);
        vdir.add_backend(server2);
    }

    #sub vcl_recv {
     #   if (req.url ~ "/healthcheck"){
      #       error 200 "imok";
       #      set req.http.Connection = "close";
        # }
    #}
1
Have you tried to run this deployment without probes? Maybe there is some issue underlying inside the pod. Maybe this answer will be helpful as a reference: Stackoverflow answer.Dawid Kruk
@DawidKruk Yea I did tried without Probe , it was failing as the vcl response was not properly done due to syntax error..Debasis Singh

1 Answers

2
votes

The fact that Kubernetes returns an HTTP 503 error for both the readiness & the liveliness probes means that there's probably something wrong with the connection to your backend.

Interestingly, that's besides the point. Those probes aren't there to perform an end-to-end test of your HTTP flow. The probes are only there to verify if the service they are monitoring is responding.

That's why you can just return a synthetic HTTP response when capturing requests that point to /healthcheck.

Here's the VCL code to do it:

sub vcl_recv {
  if(req.url == "/healthcheck") {
    return(synth(200,"OK"));
  }
}

That doesn't explain the fact why you're getting an HTTP 503 error, but at least, the probes will work.