compute node with terragrunt

now the same but with terragrunt

cd ~/dev/cloud-terraform/

use terragrunt vars instead of shell vars

mv -i main.tf main.tf.terraform
vi main.tf

resource "yandex_compute_disk" "boot" {
  name     = var.name
  type     = "network-ssd"
  zone     = "ru-central1-d"
  size     = "5"
  image_id = var.image_id
}

resource "yandex_compute_instance" "this" {
  name  = var.name
  platform_id = "standard-v4a"
  zone  = "ru-central1-d"

  resources {
cores    = 2
memory  = 1
core_fraction = 20
  }

  boot_disk {
disk_id = yandex_compute_disk.boot.id
  }

  # you need a public ip unless you have access to the internal subnet
  network_interface {
subnet_id = var.zone_subnet_id
nat       = true
  }

  # username does not matter, as it depends on the image
  metadata = {
ssh-keys       = "debian:${var.pubkey}"
serial-port-enable = 1
  }

  scheduling_policy {
preemptible = true
  }

  allow_stopping_for_update = true
}

diff -u main.tf.terraform main.tf

vi terragrunt.hcl

inputs = {
  name     = "testvm"
  image_id       = "fd8miiisblcuktpjr6sc" # latest debian-12 as of jan 2026
  zone_subnet_id = "..." # depending on the chosen zone
  pubkey     = "openssh format pubkey here"
}

vi variables.tf

variable "name" { type = string }
variable "image_id" { type = string }
variable "zone_subnet_id" { type = string }
variable "pubkey" { type = string }

ready to goready to go

rm -rf .terraform*
terragrunt init

yc config profile list
yc config profile activate test

export YC_TOKEN=`yc iam create-token`
export YC_CLOUD_ID=`yc config get cloud-id`
export YC_FOLDER_ID=`yc config get folder-id`

echo $YC_TOKEN
echo $YC_CLOUD_ID
echo $YC_FOLDER_ID

terragrunt plan
# --terragrunt-debug

terragrunt apply

HOME | GUIDES | LECTURES | LAB | SMTP HEALTH | HTML5 | CONTACT
Licensed under MIT