0
votes

I need to use c5 instance type in AWS and found this breaks my terraform automation. Unlike the t2 instances where each boot sequence provides consistent device names under /dev.

resource "aws_volume_attachment" "ebs_att" {
  device_name = "/dev/xvdb"
  volume_id   = aws_ebs_volume.dev_build_data_dir.id
  instance_id = aws_instance.dev_build.id
  skip_destroy = "true"
}

creates that exact /dev/xvd(x) device on the ec2 instance. Easy to automate.

However on c5 instances using multiple (devices) terraform /dev/xvd(x) devices translates to /dev/nvme(x) and is not consistent across hosts or reboots. In my case I have multiple volumes I am trying to mount.

From the server side I dont see anything I can use for automation:

[root@ip-192-168-1-110 ~]# ls /dev/disk/by-id/
nvme-Amazon_Elastic_Block_Store_vol027ba08**********
nvme-Amazon_Elastic_Block_Store_vol085cd16**********
nvme-Amazon_Elastic_Block_Store_vol0a3b21f**********
nvme-Amazon_Elastic_Block_Store_vol0a3b21f**********-part1
nvme-Amazon_Elastic_Block_Store_vol0d2b239**********
nvme-nvme.1d0f-766f6c3032376261303865343732643164303632-416d617a6f6e20456c617374696320426c6f636b2053746f7265-00000001
nvme-nvme.1d0f-766f6c3038356364313630333864616665663835-416d617a6f6e20456c617374696320426c6f636b2053746f7265-00000001
nvme-nvme.1d0f-766f6c3061336232316661386434623862313138-416d617a6f6e20456c617374696320426c6f636b2053746f7265-00000001
nvme-nvme.1d0f-766f6c3061336232316661386434623862313138-416d617a6f6e20456c617374696320426c6f636b2053746f7265-00000001-part1
nvme-nvme.1d0f-766f6c3064326232333935336235656637356130-416d617a6f6e20456c617374696320426c6f636b2053746f7265-00000001
[root@ip-192-168-1-110 ~]# ls /dev/disk/by-
by-id/   by-path/ by-uuid/
[root@ip-192-168-1-110 ~]# ls /dev/disk/by-uuid/
1f140d37-9663-48db-b32d-6cf5859b958f  388a99ed-9486-4a46-aeb6-06eaf6c47675
387f3e88-33ce-4eb9-8a2d-6c9a9233a15e  a6581468-80a8-4c54-bc5c-55842807ead6
[root@ip-192-168-1-110 ~]# ll /dev/disk/by-uuid/
total 0
lrwxrwxrwx 1 root root 13 Nov  9 20:10 1f140d37-9663-48db-b32d-6cf5859b958f -> ../../nvme3n1
lrwxrwxrwx 1 root root 13 Nov  9 20:10 387f3e88-33ce-4eb9-8a2d-6c9a9233a15e -> ../../nvme1n1
lrwxrwxrwx 1 root root 15 Nov  9 20:10 388a99ed-9486-4a46-aeb6-06eaf6c47675 -> ../../nvme0n1p1
lrwxrwxrwx 1 root root 13 Nov  9 20:13 a6581468-80a8-4c54-bc5c-55842807ead6 -> ../../nvme2n1
[root@ip-192-168-1-110 ~]# lsblk
NAME        MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
nvme0n1     259:0    0    8G  0 disk
└─nvme0n1p1 259:1    0    8G  0 part /
nvme1n1     259:2    0  150G  0 disk
nvme2n1     259:3    0   50G  0 disk
nvme3n1     259:4    0    8G  0 disk

Terraform code (for one volume resource):

resource "aws_volume_attachment" "ebs_att" {
  device_name = "/dev/xvdb"
  volume_id   = aws_ebs_volume.dev_build_data_dir.id
  instance_id = aws_instance.dev_build.id
  skip_destroy = "true"
}

resource "aws_ebs_volume" "dev_build_data_dir" {
  availability_zone = var.AvailabilityZone
  size              = var.EbsDataVolumeSize
}

User data script (for one volume resource):

#!/bin/bash
if ! file -s /dev/nvme1n1 | grep 'XFS' &> /dev/null; then
  mkfs -t xfs /dev/nvme1n1
fi
if [ ! -d "/opt/home/user/data" ]; then
  mkdir -p /opt/home/user/data
fi
volume_uuid_nvme1n1=$(blkid | grep nvme1n1 |cut -d'"' -f2)
if ! grep $volume_uuid_nvme1n1 /etc/fstab &> /dev/null; then
  echo "UUID=$volume_uuid_nvme1n1 /opt/home/user/data  xfs  defaults,nofail  0  2" | tee -a /etc/fstab
fi
if grep $volume_uuid_nvme1n1 /etc/fstab &> /dev/null; then
  mount -a
  chown -R user:user /opt/home/user/data
fi

Using Terraform does anyone have any solutions that will allow me to programmatically identify volumes and mount them?

1

1 Answers

0
votes

figured it out, I didnt that I could also mount by id. So what I ended up doing was grabbing the volume id and passing it to avariable: nvme-Amazon_Elastic_Block_Store_vol${data_ebs_id:4}. So that I would add something like this line to /etc/fstab: nvme-Amazon_Elastic_Block_Store_vol${data_ebs_id:4}" /opt/home/user/data xfs defaults,nofail 0 2