1
votes

I'm trying to figure out how to use Ansible with Vagrant the proper way. By default, it seems Vagrant is isolating Ansible execution per box and executes playbooks after each box partially as it applies to that single box in the loop. I find this VERY counterproductive and I have tried tricking Vagrant into executing a playbook across all of the hosts AFTER all of them booted, but it seems Ansible, when started from Vagrant never sees more than a single box at a time.

Edit: these are the version I am working with:

Vagrant: 2.2.6 Ansible: 2.5.1 Virtualbox: 6.1

The playbook (with the hosts.ini) by itsef executes without issues when I run it stand-alone with the ansible-playbook executable after the hosts come up, so the problem is with my Vagrant file. I just cannot figure it out.

This is the Vagrantfile:

# -*- mode: ruby -*-
# vi: set ft=ruby :

IMAGE_NAME = "ubuntu/bionic64"

Vagrant.configure("2") do |config|
    config.ssh.insert_key = false
    config.vm.box = IMAGE_NAME

    # Virtualbox configuration
    config.vm.provider "virtualbox" do |v|
        v.memory = 4096
        v.cpus = 2
        #v.linked_clone = true
    end

    # master and node definition
    boxes = [
        { :name => "k8s-master", :ip => "192.168.50.10" },
        { :name => "k8s-node-1", :ip => "192.168.50.11" }
    ]

    boxes.each do |opts|
        config.vm.define opts[:name] do |config|
            config.vm.hostname = opts[:name]
            config.vm.network :private_network, ip: opts[:ip]

            if opts[:name] == "k8s-node-1"
                config.vm.provision "ansible_local" do |ansible|
                    ansible.compatibility_mode = "2.0"
                    ansible.limit = "all"
                    ansible.config_file = "ansible.cfg"
                    ansible.become = true
                    ansible.playbook = "playbook.yml"
                    ansible.groups = {
                        "masters" => ["k8s-master"],
                        "nodes" => ["k8s-node-1"]
                    }
                end
            end
        end
    end
end

ansible.cfg

[defaults]
connection = smart
timeout = 60
deprecation_warnings = False
host_key_checking = False
inventory = hosts.ini

[ssh_connection]
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes

hosts.ini

[masters]
k8s-master ansible_host=192.168.50.10 ansible_user=vagrant

[nodes]
k8s-node-1 ansible_host=192.168.50.11 ansible_user=vagrant

[all:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_ssh_user=vagrant
ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key

playbook.yml

- hosts: all
  become: yes
  tasks:
    - name: Update apt cache.
      apt: update_cache=yes cache_valid_time=3600
      when: ansible_os_family == 'Debian'

    - name: Ensure swap is disabled.
      mount:
        name: swap
        fstype: swap
        state: absent

    - name: Disable swap.
      command: swapoff -a
      when: ansible_swaptotal_mb > 0

    - name: create the 'mobile' user
      user: name=mobile append=yes state=present createhome=yes shell=/bin/bash

    - name: allow 'mobile' to have passwordless sudo
      lineinfile:
        dest: /etc/sudoers
        line: 'mobile ALL=(ALL) NOPASSWD: ALL'
        validate: 'visudo -cf %s'

    - name: set up authorized keys for the mobile user
      authorized_key:
        user: mobile
        key: "{{ lookup('pipe','cat ssh_keys/*.pub') }}"
        state: present
        exclusive: yes

- hosts: all
  become: yes
  tasks:
   - name: install Docker
     apt:
       name: docker.io
       state: present
       update_cache: true

   - name: install APT Transport HTTPS
     apt:
       name: apt-transport-https
       state: present

   - name: add Kubernetes apt-key
     apt_key:
       url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
       state: present

   - name: add Kubernetes' APT repository
     apt_repository:
      repo: deb http://apt.kubernetes.io/ kubernetes-xenial main
      state: present
      filename: 'kubernetes'

   - name: install kubelet
     apt:
       name: kubelet=1.17.0-00
       state: present
       update_cache: true

   - name: install kubeadm
     apt:
       name: kubeadm=1.17.0-00
       state: present

- hosts: masters
  become: yes
  tasks:
   - name: install kubectl
     apt:
       name: kubectl=1.17.0-00
       state: present
       force: yes

- hosts: k8s-master
  become: yes
  tasks:
    - name: check docker status
      systemd:
        state: started
        name: docker

    - name: initialize the cluster
      shell: kubeadm init --apiserver-advertise-address 192.168.50.10 --pod-network-cidr=10.244.0.0/16 >> cluster_initialized.txt
      args:
        chdir: $HOME
        creates: cluster_initialized.txt

    - name: create .kube directory
      become: yes
      become_user: mobile
      file:
        path: $HOME/.kube
        state: directory
        mode: 0755

    - name: copy admin.conf to user's kube config
      copy:
        src: /etc/kubernetes/admin.conf
        dest: /home/mobile/.kube/config
        remote_src: yes
        owner: mobile

    - name: install Pod network
      become: yes
      become_user: mobile
      shell: kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml >> pod_network_setup.txt
      args:
        chdir: $HOME
        creates: pod_network_setup.txt

- hosts: k8s-master
  become: yes
  gather_facts: false
  tasks:
    - name: get join command
      shell: kubeadm token create --print-join-command 2>/dev/null
      register: join_command_raw

    - name: set join command
      set_fact:
        join_command: "{{ join_command_raw.stdout_lines[0] }}"

- hosts: nodes
  become: yes
  tasks:
    - name: check docker status
      systemd:
        state: started
        name: docker

    - name: join cluster
      shell: "{{ hostvars['k8s-master'].join_command }} >> node_joined.txt"
      args:
        chdir: $HOME
        creates: node_joined.txt

The moment the playbook tries to execute against k8s-master, it fails like this:

fatal: [k8s-master]: UNREACHABLE! => {"changed": false, "msg": "Failed to connect to the host via ssh: ssh: Could not resolve hostname k8s-master: Temporary failure in name resolution", "unreachable": true}

The host is up. SSH works.

Who can help me sort this out?

Thanks!

1
Can you tell which Ansible version Vagrant uses? The earlier versions had ansible_ssh_host instead of ansible_host. Can you try that?Dawid Kruk
Thanks for calling out the missing information David. I added it to the question. Unfortunately, I do not think there is an issue with either hosts.ini or the playbook, as those execute without issues on their own. It's only when executed from within Vagrant, only that last host in the loop can be reached. If I added more boxes and looped over them, it would only connect to the last one. It's some sort of isolation thing in Ansible that Vagrant does that is REALLY stupid. I changed it to ansible_ssh_host and the issue remains.Tobias
I found the error and a solution. Need some time to write it down. Will keep you posted.Dawid Kruk
Awesome, let me know if you need more information or have me try stuff.Tobias

1 Answers

1
votes

I have managed to use Ansible inside of Vagrant.

Here is what I did to make it work:

Steps to reproduce:

  • Install Vagrant, Virtualbox
  • Create all the necessary files and directories
    • ansible.cfg
    • playbook.yml
    • hosts
    • insecure_private_key
    • Vagrant file
  • Test

Install Vagrant, Virtualbox

Follow installation guides at appropriate sites:

Create all the necessary files and directories

This example bases on original poster files.

Create vagrant and ansible folders to store all the configuration files and directories. The structure of it could look like that:

  • vagrant - directory
    • Vagrantfile - file with main configuration
  • ansible - directory
    • ansible.cfg - configuration file of Ansible
    • playbook.yml - file with steps for Ansible to execute
    • hosts - file with information about hosts
    • insecure_private_key - private key of created machines

Ansible folder is a seperate directory that will be copied to k8s-node-1.

By default Vagrant shares a vagrant folder with permissions of 777. It allows owner, group and others to have full access on everything that is inside of it.

Logging to virtual machine manualy and running ansible-playbook command inside vagrant directory will output errors connected with permissions. It will render ansible.cfg and insecure_private_key useless.

Ansible.cfg

Ansible.cfg is configuration file of Ansible. Example used below:

[defaults]
connection = smart
timeout = 60
deprecation_warnings = False
host_key_checking = False
[ssh_connection]
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes

Create ansible.cfg inside ansible directory.

Playbook.yml

Example playbook.yml is a file with steps for Ansible to execute. It will check connections and test if groups are configured correctly:

- name: Check all connections 
  hosts: all 
  tasks:
    - name: Ping
      ping: 

- name: Check specific connection to masters 
  hosts: masters 
  tasks:
    - name: Ping
      ping: 

- name: Check specific connection to nodes 
  hosts: nodes 
  tasks:
    - name: Ping
      ping: 

Create playbook.yml inside ansible directory.

Insecure_private_key

To successfully connect to virtual machines you will need insecure_private_key. You can create it by invoking command:$ vagrant init inside vagrant directory. It will create insecure_private_key inside your physical machine in HOME_DIRECTORY/.vagrant.d. Copy it to ansible folder.

Hosts

Below hosts file is responsible for passing the information about hosts to Ansible:

[masters]
k8s-master ansible_host=192.168.50.10 ansible_user=vagrant

[nodes]
k8s-node-1 ansible_host=192.168.50.11 ansible_user=vagrant

[all:vars]
ansible_python_interpreter=/usr/bin/python3
ansible_ssh_user=vagrant
ansible_ssh_private_key_file=/ansible/insecure_private_key

Create hosts file inside ansible directory.

Please take a specific look on: ansible_ssh_private_key_file=/ansible/insecure_private_key

This is declaration for Ansible to use earlier mentioned key.

Vagrant

Vagrant file is the main configuration file:

# -*- mode: ruby -*-
# vi: set ft=ruby :

IMAGE_NAME = "ubuntu/bionic64"

Vagrant.configure("2") do |config|
    config.ssh.insert_key = false
    config.vm.box = IMAGE_NAME

    # Virtualbox configuration
    config.vm.provider "virtualbox" do |v|
        v.memory = 4096
        v.cpus = 2
        #v.linked_clone = true
    end

    # master and node definition
    boxes = [
        { :name => "k8s-master", :ip => "192.168.50.10" },
        { :name => "k8s-node-1", :ip => "192.168.50.11" }
    ]

    boxes.each do |opts|
        config.vm.define opts[:name] do |config|
            config.vm.hostname = opts[:name]
            config.vm.network :private_network, ip: opts[:ip]

            if opts[:name] == "k8s-node-1"
                config.vm.synced_folder "../ansible", "/ansible", :mount_options => ["dmode=700", "fmode=700"]

                config.vm.provision "ansible_local" do |ansible|
                    ansible.compatibility_mode = "2.0"
                    ansible.limit = "all"
                    ansible.config_file = "/ansible/ansible.cfg"
                    ansible.become = true
                    ansible.playbook = "/ansible/playbook.yml"
                    ansible.inventory_path = "/ansible/hosts" 
                end
            end
        end
    end
end

Please take a specific look on:

config.vm.synced_folder "../ansible", "/ansible", :mount_options => ["dmode=700", "fmode=700"]

config.vm.synced_folder will copy ansible directory to k8s-node-1 with all the files inside.

It will set permissions for full access only to owner (vagrant user).

ansible.inventory_path =  "/ansible/hosts"

ansible.inventory_path will tell Vagrant to provide hosts file for Ansible.

Test

To check run the following command from the vagrant directory: $ vagrant up

The part of the output responsible for Ansible should look like that:

==> k8s-node-1: Running provisioner: ansible_local...
    k8s-node-1: Installing Ansible...
    k8s-node-1: Running ansible-playbook...

PLAY [Check all connections] ***************************************************

TASK [Gathering Facts] *********************************************************
ok: [k8s-master]
ok: [k8s-node-1]

TASK [Ping] ********************************************************************
ok: [k8s-master]
ok: [k8s-node-1]

PLAY [Check specific connection to masters] ************************************

TASK [Gathering Facts] *********************************************************
ok: [k8s-master]

TASK [Ping] ********************************************************************
ok: [k8s-master]

PLAY [Check specific connection to nodes] **************************************

TASK [Gathering Facts] *********************************************************
ok: [k8s-node-1]

TASK [Ping] ********************************************************************
ok: [k8s-node-1]

PLAY RECAP *********************************************************************
k8s-master                 : ok=4    changed=0    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
k8s-node-1                 : ok=4    changed=0    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0