0
votes

I want to install a simple helm chart (binderhub) on gke. But the current reference code (mostly from docs: https://github.com/hashicorp/learn-terraform-provision-gke-cluster), doesn't seem to work. Manual installation from helm install/update command works fine.

gke.tf:

terraform {
  required_providers {
    google = {
      source  = "hashicorp/google"
      version = "3.56.0"
    }
  }

  required_version = "~> 0.14"
}

variable "gke_username" {
  default     = ""
  description = "gke username"
}

variable "gke_password" {
  default     = ""
  description = "gke password"
}

variable "gke_num_nodes" {
  default     = 2
  description = "number of gke nodes"
}

data "google_client_config" "default" {}

data "google_container_cluster" "cluster" {
  name     = "cluster"
  location = "us-central1"
}

# https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster
# GKE cluster
resource "google_container_cluster" "primary" {
  name     = "${var.project_id}-gke"
  location = var.region


  remove_default_node_pool = true
  initial_node_count       = 1

  network    = google_compute_network.vpc.name
  subnetwork = google_compute_subnetwork.subnet.name

  master_auth {
    username = var.gke_username
    password = var.gke_password

    client_certificate_config {
      issue_client_certificate = false
    }
  }

}


# https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_node_pool
# Separately Managed Node Pool
resource "google_container_node_pool" "primary_nodes" {
  name       = "${google_container_cluster.primary.name}-node-pool"
  location   = var.region
  cluster    = google_container_cluster.primary.name
  node_count = var.gke_num_nodes

  autoscaling {
    min_node_count = 1
    max_node_count = 2
  }

  node_config {
    oauth_scopes = [
      "https://www.googleapis.com/auth/logging.write",
      "https://www.googleapis.com/auth/monitoring",
    ]

    labels = {
      env = var.project_id
    }

    # preemptible  = true
    machine_type = "n1-standard-1"
    tags         = ["gke-node", "${var.project_id}-gke"]
    metadata = {
      disable-legacy-endpoints = "true"
    }
  }
  /*management {
    auto_repair  = true
    auto_upgrade = true
  }*/
}

 provider "kubernetes" {
   load_config_file = "false"

   host     = google_container_cluster.primary.endpoint
   //username = var.gke_username
   //password = var.gke_password

   client_certificate     = google_container_cluster.primary.master_auth.0.client_certificate
   client_key             = google_container_cluster.primary.master_auth.0.client_key
   cluster_ca_certificate = google_container_cluster.primary.master_auth.0.cluster_ca_certificate
}

helm.tf:

variable "helm_version" {
  default = "v2.0.2"
}

provider "helm" {

  kubernetes {
    #load_config_file = false
    #config_path = "~/.kube/config"
    host     = google_container_cluster.primary.endpoint
    token    = data.google_client_config.default.access_token
    cluster_ca_certificate = base64decode(google_container_cluster.primary.master_auth.0.cluster_ca_certificate)

  }
}

resource "helm_release" "binderhub" {
  name       = "binderhub"
  namespace  = "kube-system"
  repository = "https://jupyterhub.github.io/helm-chart/"
  chart      = "binderhub"
  version    = "0.2.0-n499.h81660eb"
  values = [
    file("${path.module}/config.yaml"),
    file("${path.module}/secret.yaml")
  ]
}

google_compute_network.vpc: Refreshing state... [id=projects/odp-test-3ffa/global/networks/odp-test-3ffa-vpc]
google_compute_subnetwork.subnet: Refreshing state... [id=projects/odp-test-3ffa/regions/us-central1/subnetworks/odp-test-3ffa-subnet]
google_container_cluster.primary: Refreshing state... [id=projects/odp-test-3ffa/locations/us-central1/clusters/odp-test-3ffa-gke]
google_container_node_pool.primary_nodes: Refreshing state... [id=projects/odp-test-3ffa/locations/us-central1/clusters/odp-test-3ffa-gke/nodePools/odp-test-3ffa-gke-node-pool]
helm_release.binderhub: Refreshing state... [id=binderhub]
Apply complete! Resources: 0 added, 0 changed, 0 destroyed.

 $ terraform state list

...
google_container_cluster.primary
google_container_node_pool.primary_nodes
helm_release.binderhub

$ helm list 

NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
(empty)

$ kubectl --namespace=default get nodes

NAME STATUS ROLES AGE VERSION
gke-XXX-3ffa-gk-XXXX-4r90 Ready 28m v1.17.14-gke.1600
gke-XXX-3ffa-gk-XXXX-r30b Ready 28m v1.17.14-gke.1600
gke-XXX-3ffa-gk-XXXX-2j7s Ready 28m v1.17.14-gke.1600
...

1
Hello, Martin, I have not seen anything wrong in your configuration files. Could you please check this similar issue with Helm list not showing anything ? - Pit

1 Answers

0
votes

Thanks to @baconglobber @ https://github.com/helm/helm/issues/9388

It sounds like, what we wanted is to install biderhub (the helm pkg) on "default" name space as opposed to "kube-system". Something along the lines:

resource "helm_release" "binderhub" {
  name       = "binderhub"
  namespace  = "default"
  repository = "https://jupyterhub.github.io/helm-chart/"
  chart      = "binderhub"
  version    = var.binderhub_ver
  values = [
    file("${path.module}/config.yaml"),
    file("${path.module}/secret.yaml")
  ]
}