umorpha-boxes/bin/vultr-install

210 lines
6.8 KiB
Bash
Executable File

#!/usr/bin/env bash
# Copyright (C) 2023 Umorpha Systems
# SPDX-License-Identifier: AGPL-3.0-or-later
set -euE -o pipefail
usage() {
cat <<EOF
Usage: ${0##*/} --name=HOSTNAME --plan=PLAN_ID --region=REGION_ID
or: ${0##*/} --help
Create/wipe a Vultr instance and install an image onto it.
For selecting the appropriate plan ID, determine which type you want
from the following table, then list the plans of that type with
\`vultr-cli plans list --type=TYPE\`
| type | WebUI label | ID format |
|-------+---------------------------------------------+---------------------------------------------|
| vc2 | Cloud Compute : Regular Performance | vc2-${nproc}c-${ram}gb[-sc1] |
| vhf | Cloud Compute : High Frequency | vhf-${nproc}c-${ram}gb[-sc1] |
| vhp | Cloud Compute : High Performance | vhp-${nproc}c-${ram}gb-{amd,intel}[-sc1] |
| voc-g | Optimized Cloud Compute : General Purpose | voc-g-${nproc}c-${ram}gb-${disk}s-amd[-sc1] |
| voc-c | Optimized Cloud Compute : CPU Optimized | voc-c-${nproc}c-${ram}gb-${disk}s-amd[-sc1] |
| voc-m | Optimized Cloud Compute : Memory Optimized | voc-m-${nproc}c-${ram}gb-${disk}s-amd[-sc1] |
| voc-s | Optimized Cloud Compute : Storage Optimized | voc-s-${nproc}c-${ram}gb-${disk}s-amd[-sc1] |
| vcg | Cloud GPU | vcg-${gpu}-{nproc}c-${ram}g-${vram}vram |
(The "-sc1" prefix for the "sao" region (São Paulo, Brazil), which have
separate plan types because it's more expensive there.)
For selecting the appropriate region ID, here lis a list of the
regions in the US:
| ID | Location | HDD | NVMe |
|-----+----------------+-----+------|
| atl | Atlanta | yes | |
| dfw | Dallas | yes | |
| ewr | New Jersey | yes | yes |
| hnl | Honolulu | yes | |
| lax | Los Angeles | yes | yes |
| mia | Miami | yes | |
| ord | Chicago | yes | |
| sea | Seattle | yes | |
| sjc | Silicon Valley | yes | |
EOF
}
readonly os_iso=159 # From `vultr-cli os list`
run() {
local arg_name arg_plan arg_region
arg_name=$1
arg_plan=$2
arg_region=$3
export VULTR_API_KEY=$(cat secrets/vultr-api-key.txt)
# Upload rootfs
printf >&2 ':: Uploading rootfs...\n'
local rootfs_url
rootfs_url=$(bin/vultr-upload "images/${arg_name%%.*}.rootfs.img" "s3://umorpha-images/${arg_name%%.*}.rootfs.img")
# Upload ISO
printf >&2 ':: Uploading bootstrap ISO...\n'
local iso_id
iso_id=$(bin/vultr-iso-id https://mirror.dal10.us.leaseweb.net/archlinux/iso/2023.11.01/archlinux-2023.11.01-x86_64.iso)
# Set up instance
local instance_id
instance_id=$(bin/vultr-api "instances?label=${arg_name}" | jq -r '.instances[]|.id')
local user_data
user_data="#!/bin/sh
set -euE -o pipefail
cd /tmp
while [[ \$(systemctl is-active pacman-init.service) != active ]]; do
echo 'Waiting for pacman to initialize...'
sleep 1
done
set -x
pacman -Sy --needed --noconfirm jq
echo '$(base64 bin/umorpha-install)' | base64 -d > umorpha-install
echo '$(base64 bin/vultr-api)' | base64 -d > vultr-api
chmod 755 umorpha-install vultr-api
# Grow /tmp.
# By default it's half the RAM (2G rn), but we need ~1.2G.
# Give it 1.5GiB.
mount -o remount,size=1572864k /tmp
curl ${rootfs_url} >./${arg_name%%.*}.rootfs.img
./umorpha-install /dev/vda ./${arg_name%%.*}.rootfs.img
export VULTR_API_KEY=${VULTR_API_KEY}
instance_id=\$(curl --fail-with-body --no-progress-meter http://169.254.169.254/v1/instance-v2-id)
./vultr-api instances/\${instance_id} -XPATCH --data '{\"user_data\":\"\"}'
./vultr-api instances/\${instance_id}/iso/detach -XPOST
reboot
"
if [[ -z "$instance_id" ]]; then
printf >&2 ':: Creating instance...\n'
vultr-cli instance create \
--host="$arg_name" \
--label="$arg_name" \
\
--plan="$arg_plan" \
--region="$arg_region" \
--ipv6=true \
--auto-backup=true \
\
--os="$os_iso" \
--iso="$iso_id" \
\
--userdata="$user_data"
else
printf >&2 ':: Updating instance %s...\n' "$instance_id"
# Determine what we need to patch.
instance_data="$(bin/vultr-api instances/$instance_id)"
declare -A patch
patch[user_data]="$(base64 --wrap=0 <<<"$user_data")"
if [[ "$(jq -r .instance.region <<<"$instance_data")" != "$arg_region" ]]; then
patch[region]="$arg_region"
fi
if [[ "$(jq -r .instance.plan <<<"$instance_data")" != "$arg_plan" ]]; then
patch[plan]="$arg_plan"
fi
# Format that patch as JSON.
data='{'
for k in "${!patch[@]}"; do
data+="\"$k\":\"${patch[$k]}\","
done
data="${data%,}}"
# Apply.
printf >&2 ' -> Halting instance...\n'
bin/vultr-api instances/$instance_id/halt -XPOST
printf >&2 ' -> Updating instance...\n'
bin/vultr-api instances/$instance_id -XPATCH --data "$data" | jq
printf >&2 ' -> Attaching ISO...\n'
resp=$(bin/vultr-api instances/$instance_id/iso/attach -XPOST --data "{\"iso_id\":\"${iso_id}\"}")
resp_state=$(jq -r .iso_status.state <<<"$resp")
resp_id=$(jq -r .iso_status.iso_id <<<"$resp")
while ! [[ "$resp_state" == isomounted && "$resp_id" == "$iso_id" ]]; do
printf >&2 ' instance iso: state=%q id=%q\n' "$resp_state" "$resp_id"
sleep 1
resp=$(bin/vultr-api instances/$instance_id/iso)
resp_state=$(jq -r .iso_status.state <<<"$resp")
resp_id=$(jq -r .iso_status.iso_id <<<"$resp")
done
printf >&2 ' instance iso: state=%q id=%q\n' "$resp_state" "$resp_id"
printf >&2 ' -> Starting instance...\n'
bin/vultr-api instances/$instance_id/start -XPOST
fi
}
main() {
local arg_name=
local arg_plan=
local arg_region=
local arg_mode=run
local args
if ! args="$(getopt -n "${0##*/}" -o '' -l 'help,name:,plan:,region:' -- "$@")"; then
arg_mode=error
else
eval "set -- $args"
while true; do
case "$1" in
--help) shift; arg_mode=help;;
--name) shift; arg_name=$1; shift;;
--plan) shift; arg_plan=$1; shift;;
--region) shift; arg_region=$1; shift;;
--) shift; break;;
esac
done
if [[ $@ -gt 0 ]]; then
printf >&2 '%s: error: unexpected positional arguments: %s' "${*@Q}"
arg_mode=error
fi
if [[ -z "$arg_name" ]]; then
printf >&2 '%s: error: --name=HOSTNAME is required\n' "${0##*/}"
arg_mode=error
fi
if [[ -z "$arg_plan" ]]; then
printf >&2 '%s: error: --plan=PLAN_ID is required\n' "${0##*/}"
arg_mode=error
fi
if [[ -z "$arg_region" ]]; then
printf >&2 '%s: error: --region=REGION_ID is required\n' "${0##*/}"
arg_mode=error
fi
case "$arg_mode" in
error) printf >&2 "Try '%q --help' for more information\n" "${0##*/}"; exit 2;;
help) show_help;;
run) run "$arg_name" "$arg_plan" "$arg_region";;
esac
fi
}
main "$@"