Using Terraform to move linode to different region and upgrading the OS release
I have my linode in Mumbai, IN region which I want to move to eu-west-2 region. Since I already imported my linode to terraform. I was thinking to use terraform again to move my linode to region eu-west-2.
This is the IAS I have put together .
terraform {
required_providers {
linode = {
source = "linode/linode"
}
}
required_version = ">= 0.13"
}
variable token {
type = string
sensitive = true
}
provider "linode" {
token = var.token
}
resource "linode_instance" "primary_instance" {
region = "eu-west"
type = "g6-nanode-1"
connection {
type = "ssh"
host = var.domain_record_ip
user = "linode_username"
private_key = "/home/localuser/.ssh/id_rsa"
timeout = "4m"
}
provisioner "remote-exec" {
inline = [
"sleep 25",
"sudo apt-get update",
"sudo apt-get -y install haproxy"
]
}
provisioner "file" {
source = "./haproxy.cfg"
destination = "/etc/haproxy/haproxy.cfg"
}
config {
kernel = "linode/grub2"
label = "Restore 257841 - My Ubuntu 20.10 Disk Profile"
memory_limit = 0
root_device = "/dev/sda"
run_level = "default"
virt_mode = "paravirt"
devices {
sda {
disk_id = 63023983
disk_label = "Restore 257841 - Ubuntu 20.10 Disk"
volume_id = 0
}
sdb {
disk_id = 63023984
disk_label = "Restore 257841 - 512 MB Swap Image"
volume_id = 0
}
}
helpers {
devtmpfs_automount = true
distro = true
modules_dep = true
network = true
updatedb_disabled = true
}
}
disk {
authorized_keys = ["ssh-rsa AASecretIntentionallyRemoved"]
authorized_users = ["linode_user"]
filesystem = "ext4"
image = "linode/ubuntu20.04"
label = "Restore 257841 - Ubuntu 20.10 Disk"
read_only = false
size = 25088
stackscript_id = 0
}
disk {
filesystem = "swap"
label = "Restore 257841 - 512 MB Swap Image"
read_only = false
size = 512
stackscript_id = 0
}
}
In the above code first thing I changed was region = "eu-west"
now if I run terraform plan
it shows destroy linode from Mumbai, IN region and create the new one in eu-west regsion. However, here is a problem and why I didnt run the code as of yet. My linode has haproxy running which I want have it run on the new linode that gets created.
So in the above code I have added connection to connect to the new linode, using the ssh-key and provided auth_keys,and then install using remote exec and upload the haproxy config which is currently in use on linode.
Another thing I want to take care with this move is the Ubuntu release on my linode is deprecated, so I wanted to move to Ubuntu 20.10 LTS.
so after the above changes if I run terraform plan
I do not see the disks section update or new adds or modify such as OS release change or haproxy added on: (here is the result of terraform plan)
- private_ip = false -> null
+ private_ip_address = (known after apply)
~ region = "ap-west" -> "eu-west" # forces replacement
~ specs = [
- {
- disk = 25600
- memory = 1024
- transfer = 1000
- vcpus = 1
},
] -> (known after apply)
~ status = "running" -> (known after apply)
~ swap_size = 512 -> (known after apply)
- tags = [] -> null
# (2 unchanged attributes hidden)
~ alerts {
~ cpu = 90 -> (known after apply)
~ io = 10000 -> (known after apply)
~ network_in = 10 -> (known after apply)
~ network_out = 10 -> (known after apply)
~ transfer_quota = 80 -> (known after apply)
}
~ config {
~ label = "Restore 257841 - My Ubuntu 20.10 Disk Profile" -> "Restore 257841 - My Ubuntu 20.10 Disk Profile"
# (5 unchanged attributes hidden)
~ devices {
~ sda {
~ disk_label = "Restore 257841 - Ubuntu 20.10 Disk" -> "Restore 257841 - Ubuntu 20.10 Disk"
# (2 unchanged attributes hidden)
}
+ sdc {
+ disk_id = (known after apply)
+ disk_label = (known after apply)
+ volume_id = (known after apply)
}
+ sdd {
+ disk_id = (known after apply)
+ disk_label = (known after apply)
+ volume_id = (known after apply)
}
+ sde {
+ disk_id = (known after apply)
+ disk_label = (known after apply)
+ volume_id = (known after apply)
}
+ sdf {
+ disk_id = (known after apply)
+ disk_label = (known after apply)
+ volume_id = (known after apply)
}
+ sdg {
+ disk_id = (known after apply)
+ disk_label = (known after apply)
+ volume_id = (known after apply)
}
+ sdh {
+ disk_id = (known after apply)
+ disk_label = (known after apply)
+ volume_id = (known after apply)
}
# (1 unchanged block hidden)
}
# (1 unchanged block hidden)
}
~ disk {
- authorized_keys = [] -> null
- authorized_users = [] -> null
~ id = 63023983 -> (known after apply)
+ image = (known after apply)
~ label = "Restore 257841 - Ubuntu 20.10 Disk" -> "Restore 257841 - Ubuntu 20.04 Disk"
~ stackscript_data = (sensitive value)
~ stackscript_id = 0 -> (known after apply)
# (3 unchanged attributes hidden)
}
~ disk {
- authorized_keys = [] -> null
- authorized_users = [] -> null
~ id = 63023984 -> (known after apply)
+ image = (known after apply)
~ stackscript_data = (sensitive value)
~ stackscript_id = 0 -> (known after apply)
# (4 unchanged attributes hidden)
}
- timeouts {}
}
Plan: 1 to add, 0 to change, 1 to destroy.
why disks are not showing these updates:
authorized_keys = ["ssh-rsa AASecretIntentionallyRemoved"]
authorized_users = ["linode_user"]
filesystem = "ext4"
image = "linode/ubuntu20.10"
and neither does it picks up the connection or other provisioners!?
anyone with the knowledge of terraform please help