3
votes

When creating a new aks cluster using azurerm_kubernetes_cluster new resource group created automatically called - MC_%RESOURCE_GROUP_NAME%_%CLUSTER_NAME%_%LOCATION% which contains all the networking resources for the cluster.

enter image description here

My goal is to use one Vnet which will hold of these resources (AKS networking resource) and further resources such as VM's and Azure cache.

As I see it, there are 2 options to resolve this issue -

  1. Using terraform output and create the new resource inside the Vnet created automatically, this is not ideal since I have zero effect on the networking created.
  2. Create my own Vnet and attach AKS cluster to this Vnet.

Currently, I'm trying to approach 2 but no luck so far.

Any thoughts, recommendations, and examples would be appreciated.

1

1 Answers

5
votes

It doesnt really work like that. As you saw, azure will create a brand new RG with a bunch of AKS related stuff in there. But the cluster itsef will go in whatever RG you want it to be in. And then using the azure network plugin, your pods can then go onto the VNEts you specify, and have access to other resources in your Vnet.

Heres a sample how I built it when dev'ing for my needs, should be a good starter for you.

resource "azurerm_resource_group" "rg" {
    name            = var.rgname
    location        = var.location
    tags            = var.default-tags
}

resource "azurerm_virtual_network" "vnet" {
    name                        = "vnet"
    location                    = azurerm_resource_group.rg.location
    resource_group_name         = azurerm_resource_group.rg.name
    address_space               = [var.ipspace]
    tags                        = var.default-tags
}

resource "azurerm_subnet" "vmsubnet" {
    name                        = "vmsubnet"
    resource_group_name         = azurerm_resource_group.rg.name
    virtual_network_name        = azurerm_virtual_network.vnet.name
    address_prefix              = var.vmsubnet
}

resource "azurerm_subnet" "akspodssubnet" {
    name                        = "akspodssubnet"
    resource_group_name         = azurerm_resource_group.rg.name
    virtual_network_name        = azurerm_virtual_network.vnet.name
    address_prefix              = var.akspodssubnet
}


resource "azurerm_kubernetes_cluster" "k8s" {
  name                = "K8Scluster"
  location            = azurerm_resource_group.rg.location
  resource_group_name = azurerm_resource_group.rg.name # th RG the single cluster entity goes is
  dns_prefix          = "k8s"
  node_resource_group = "K8S${azurerm_resource_group.rg.name}"  #  all the k8s' entities must be in fdifferent RG than where the cluster object itself is
  api_server_authorized_ip_ranges = ["REDACTED"]
  #enable_pod_security_policy      = true
  kubernetes_version  = "1.15.7"

  default_node_pool {
    name                  = "default"
    type                  = "AvailabilitySet"
    vm_size               = var.vmsize # Standard_DC2s_v2 Standard_B1ms
    enable_node_public_ip = false
    enable_auto_scaling   = false
    os_disk_size_gb       = 30
    node_count            = 1
    vnet_subnet_id        = azurerm_subnet.akspodssubnet.id
  }

  addon_profile {
    kube_dashboard { enabled = true }
  }

  network_profile {
    network_plugin    = "azure"
    network_policy    = "azure"
    load_balancer_sku = "standard"
    service_cidr      = var.aksservicecidr
    docker_bridge_cidr = var.dockercidrip
    dns_service_ip    = var.aksdns
  }

  linux_profile {
    admin_username = var.sudouser
    ssh_key { key_data = var.sshpubkey }
  }

  service_principal {
    client_id     = var.client_id
    client_secret = var.client_secret
  }

  tags  = var.default-tags
}

output "client_certificate" {
  value = azurerm_kubernetes_cluster.k8s.kube_config.0.client_certificate
}

output "kube_config" {
  value = azurerm_kubernetes_cluster.k8s.kube_config_raw
}