fix: only support libvirt

This commit is contained in:
2024-09-29 17:32:01 +02:00
parent 5ac1db15c1
commit 9f07beb4cd
5 changed files with 12 additions and 424 deletions

20
.vscode/settings.json vendored
View File

@@ -1,11 +1,11 @@
{
"cSpell.words": [
"cacher",
"DATACENTER",
"datacenters",
"ESXI",
"govc",
"hyperv",
"pkrvars"
]
}
"cSpell.words": [
"cacher",
"DATACENTER",
"datacenters",
"ESXI",
"govc",
"pkrvars"
],
"makefile.configureOnOpen": false
}

View File

@@ -5,14 +5,12 @@ VAR_FILE :=
VAR_FILE_OPTION := $(addprefix -var-file=,$(VAR_FILE))
help:
@echo type make build-libvirt, make build-uefi-libvirt, make build-proxmox, make build-uefi-proxmox, make build-hyperv, or make build-vsphere
@echo type make build-libvirt, make build-uefi-libvirt, make build-proxmox, or make build-uefi-proxmox
build-libvirt: proxmox-ve-amd64-libvirt.box
build-uefi-libvirt: proxmox-ve-uefi-amd64-libvirt.box
build-proxmox: proxmox-ve-amd64-proxmox.box
build-uefi-proxmox: proxmox-ve-uefi-amd64-proxmox.box
build-hyperv: proxmox-ve-amd64-hyperv.box
build-vsphere: proxmox-ve-amd64-vsphere.box
proxmox-ve-amd64-libvirt.box: provisioners/*.sh proxmox-ve.pkr.hcl Vagrantfile.template $(VAR_FILE)
rm -f $@
@@ -72,41 +70,7 @@ proxmox-ve-uefi-amd64-proxmox.box: provisioners/*.sh proxmox-ve.pkr.hcl Vagrantf
PKR_VAR_vagrant_box=$@ \
packer build -only=proxmox-iso.proxmox-ve-uefi-amd64 -on-error=abort -timestamp-ui $(VAR_FILE_OPTION) proxmox-ve.pkr.hcl
proxmox-ve-amd64-hyperv.box: provisioners/*.sh proxmox-ve.pkr.hcl Vagrantfile.template $(VAR_FILE)
rm -f $@
mkdir -p tmp
CHECKPOINT_DISABLE=1 \
PACKER_LOG=1 \
PACKER_LOG_PATH=$@.init.log \
packer init proxmox-ve.pkr.hcl
PACKER_OUTPUT_BASE_DIR=$${PACKER_OUTPUT_BASE_DIR:-.} \
CHECKPOINT_DISABLE=1 \
PACKER_LOG=1 \
PACKER_LOG_PATH=$@.log \
PKR_VAR_vagrant_box=$@ \
packer build -only=hyperv-iso.proxmox-ve-amd64 -on-error=abort -timestamp-ui $(VAR_FILE_OPTION) proxmox-ve.pkr.hcl
@./box-metadata.sh hyperv proxmox-ve-amd64 $@
proxmox-ve-amd64-vsphere.box: provisioners/*.sh proxmox-ve-vsphere.pkr.hcl $(VAR_FILE)
rm -f $@
mkdir -p tmp
CHECKPOINT_DISABLE=1 \
PACKER_LOG=1 \
PACKER_LOG_PATH=$@.init.log \
packer init proxmox-ve-vsphere.pkr.hcl
PACKER_OUTPUT_BASE_DIR=$${PACKER_OUTPUT_BASE_DIR:-.} \
CHECKPOINT_DISABLE=1 \
PACKER_LOG=1 \
PACKER_LOG_PATH=$@.log \
packer build -only=vsphere-iso.proxmox-ve-amd64 -on-error=abort -timestamp-ui $(VAR_FILE_OPTION) proxmox-ve-vsphere.pkr.hcl
rm -rf tmp/$@-contents
mkdir -p tmp/$@-contents
echo '{"provider":"vsphere"}' >tmp/$@-contents/metadata.json
cp Vagrantfile.template tmp/$@-contents/Vagrantfile
tar cvf $@ -C tmp/$@-contents .
@./box-metadata.sh vsphere proxmox-ve-amd64 $@
clean:
rm -rf packer_cache $${PACKER_OUTPUT_BASE_DIR:-.}/output-proxmox-ve*
.PHONY: help build-libvirt build-uefi-libvirt build-proxmox build-uefi-proxmox build-hyperv build-vsphere clean
.PHONY: help build-libvirt build-uefi-libvirt build-proxmox build-uefi-proxmox clean

146
README.md
View File

@@ -55,152 +55,6 @@ make build-proxmox
**NB** There is no way to use the created template with vagrant (the [vagrant-proxmox plugin](https://github.com/telcat/vagrant-proxmox) is no longer compatible with recent vagrant versions). Instead, use packer or terraform.
## Hyper-V
Follow the [rgl/windows-vagrant Hyper-V Usage section](https://github.com/rgl/windows-vagrant#hyper-v-usage) to create the `Vagrant` vSwitch for use by the packer template and the example vagrant environment.
Create the `proxmox` vSwitch for use in the example vagrant environment second
network interface:
```bash
PowerShell -NoLogo -NoProfile -ExecutionPolicy Bypass <<'EOF'
$switchName = 'proxmox'
$networkAdapterName = "vEthernet ($switchName)"
$networkAdapterIpAddress = '10.10.10.1'
$networkAdapterIpPrefixLength = 24
# create the vSwitch.
New-VMSwitch -Name $switchName -SwitchType Internal | Out-Null
# assign it an host IP address.
$networkAdapter = Get-NetAdapter $networkAdapterName
$networkAdapter | New-NetIPAddress `
-IPAddress $networkAdapterIpAddress `
-PrefixLength $networkAdapterIpPrefixLength `
| Out-Null
# remove all virtual switches from the windows firewall.
Set-NetFirewallProfile `
-DisabledInterfaceAliases (
Get-NetAdapter -name "vEthernet*" | Where-Object {$_.ifIndex}
).InterfaceAlias
EOF
```
Set the Hyper-V details:
```bash
cat >secrets-hyperv.sh <<'EOF'
# set this value when you need to set the VM Switch Name.
export HYPERV_SWITCH_NAME='Vagrant'
# set this value when you need to set the VM VLAN ID.
unset HYPERV_VLAN_ID
#export HYPERV_VLAN_ID=''
# set the credentials that the guest will use
# to connect to this host smb share.
# NB you should create a new local user named _vagrant_share
# and use that one here instead of your user credentials.
# NB it would be nice for this user to have its credentials
# automatically rotated, if you implement that feature,
# let me known!
export VAGRANT_SMB_USERNAME='_vagrant_share'
export VAGRANT_SMB_PASSWORD=''
# remove the virtual switch from the windows firewall.
# NB execute if the VM fails to obtain an IP address from DHCP.
PowerShell -Command 'Set-NetFirewallProfile -DisabledInterfaceAliases (Get-NetAdapter -name "vEthernet*" | Where-Object {$_.ifIndex}).InterfaceAlias'
EOF
source secrets-hyperv.sh
```
Create the base box:
```bash
make build-hyperv
```
Add the base box as suggested in make output:
```bash
vagrant box add -f proxmox-ve-amd64 proxmox-ve-amd64-hyperv.box
```
Start the example vagrant environment with:
```bash
cd example
vagrant up --provider=hyperv
```
## VMware vSphere usage
Download [govc](https://github.com/vmware/govmomi/releases/latest) and place it inside your `/usr/local/bin` directory.
Set your VMware vSphere details and test the connection:
```bash
sudo apt-get install build-essential patch ruby-dev zlib1g-dev liblzma-dev
vagrant plugin install vagrant-vsphere
cat >secrets-vsphere.sh <<'EOF'
export GOVC_INSECURE='1'
export GOVC_HOST='vsphere.local'
export GOVC_URL="https://$GOVC_HOST/sdk"
export GOVC_USERNAME='administrator@vsphere.local'
export GOVC_PASSWORD='password'
export GOVC_DATACENTER='Datacenter'
export GOVC_CLUSTER='Cluster'
export GOVC_DATASTORE='Datastore'
export VSPHERE_OS_ISO="[$GOVC_DATASTORE] iso/proxmox-ve_8.2-2.iso"
export VSPHERE_ESXI_HOST='esxi.local'
export VSPHERE_TEMPLATE_FOLDER='test/templates'
# NB the VSPHERE_TEMPLATE_NAME last segment MUST match the
# builders.vm_name property inside the packer template.
export VSPHERE_TEMPLATE_NAME="$VSPHERE_TEMPLATE_FOLDER/proxmox-ve-amd64"
export VSPHERE_VM_FOLDER='test'
export VSPHERE_VM_NAME='proxmox-ve-example'
# NB for the nested VMs to access the network, this VLAN port group security
# policy MUST be configured to Accept:
# Promiscuous mode
# Forged transmits
export VSPHERE_VLAN='packer'
export VSPHERE_IP_WAIT_ADDRESS='0.0.0.0/0'
# set the credentials that the guest will use
# to connect to this host smb share.
# NB you should create a new local user named _vagrant_share
# and use that one here instead of your user credentials.
# NB it would be nice for this user to have its credentials
# automatically rotated, if you implement that feature,
# let me known!
export VAGRANT_SMB_USERNAME='_vagrant_share'
export VAGRANT_SMB_PASSWORD=''
EOF
source secrets-vsphere.sh
# see https://github.com/vmware/govmomi/blob/master/govc/USAGE.md
govc version
govc about
govc datacenter.info # list datacenters
govc find # find all managed objects
```
Download the Proxmox ISO (you can find the full iso URL in the [proxmox-ve.json](proxmox-ve.json) file) and place it inside the datastore as defined by the `iso_paths` property that is inside the [packer template](proxmox-ve-vsphere.json) file.
See the [example Vagrantfile](example/Vagrantfile) to see how you could use a cloud-init configuration to configure the VM.
Type `make build-vsphere` and follow the instructions.
Try the example guest:
```bash
source secrets-vsphere.sh
cd example
vagrant up --provider=vsphere --no-destroy-on-error --no-tty
vagrant ssh
exit
vagrant destroy -f
```
## Packer build performance options
To improve the build performance you can use the following options.

View File

@@ -1,170 +0,0 @@
packer {
required_plugins {
# see https://github.com/hashicorp/packer-plugin-vsphere
vsphere = {
version = "1.4.0"
source = "github.com/hashicorp/vsphere"
}
}
}
variable "cpus" {
type = number
default = 2
}
variable "memory" {
type = number
default = 2 * 1024
}
variable "disk_size" {
type = number
default = 20 * 1024
}
variable "vsphere_os_iso" {
type = string
default = env("VSPHERE_OS_ISO")
}
variable "vsphere_host" {
type = string
default = env("GOVC_HOST")
}
variable "vsphere_username" {
type = string
default = env("GOVC_USERNAME")
}
variable "vsphere_password" {
type = string
default = env("GOVC_PASSWORD")
}
variable "vsphere_esxi_host" {
type = string
default = env("VSPHERE_ESXI_HOST")
}
variable "vsphere_datacenter" {
type = string
default = env("GOVC_DATACENTER")
}
variable "vsphere_cluster" {
type = string
default = env("GOVC_CLUSTER")
}
variable "vsphere_datastore" {
type = string
default = env("GOVC_DATASTORE")
}
variable "vsphere_folder" {
type = string
default = env("VSPHERE_TEMPLATE_FOLDER")
}
variable "vsphere_network" {
type = string
default = env("VSPHERE_VLAN")
}
variable "vsphere_ip_wait_address" {
type = string
default = env("VSPHERE_IP_WAIT_ADDRESS")
}
variable "apt_cache_host" {
type = string
default = env("APT_CACHE_HOST")
}
variable "apt_cache_port" {
type = string
default = env("APT_CACHE_PORT")
}
variable "shell_provisioner_scripts" {
type = list(string)
default = [
"provisioners/apt_proxy.sh",
"provisioners/upgrade.sh",
"provisioners/network.sh",
"provisioners/localisation-pt.sh",
"provisioners/reboot.sh",
"provisioners/provision.sh",
]
}
source "vsphere-iso" "proxmox-ve-amd64" {
vm_name = "proxmox-ve-amd64"
guest_os_type = "debian12_64Guest"
NestedHV = true
CPUs = var.cpus
RAM = var.memory
storage {
disk_size = var.disk_size
disk_thin_provisioned = true
}
disk_controller_type = ["pvscsi"]
iso_paths = [
var.vsphere_os_iso,
]
vcenter_server = var.vsphere_host
username = var.vsphere_username
password = var.vsphere_password
insecure_connection = true
datacenter = var.vsphere_datacenter
cluster = var.vsphere_cluster
host = var.vsphere_esxi_host
folder = var.vsphere_folder
datastore = var.vsphere_datastore
network_adapters {
network = var.vsphere_network
network_card = "vmxnet3"
}
convert_to_template = true
ssh_username = "root"
ssh_password = "vagrant"
ssh_timeout = "60m"
cd_label = "proxmox-ais"
cd_files = ["answer.toml"]
boot_wait = "5s"
boot_command = [
# select Advanced Options.
"<end><enter>",
# select Install Proxmox VE (Automated).
"<down><down><down><enter>",
# wait for the shell prompt.
"<wait1m>",
# do the installation.
"proxmox-fetch-answer partition >/run/automatic-installer-answers<enter><wait>exit<enter>",
# wait for the installation to finish.
"<wait4m>",
# login.
"root<enter><wait5s>vagrant<enter><wait5s>",
# install the guest agent.
"apt-get update<enter><wait1m>",
"apt-get install -y open-vm-tools<enter><wait30s>",
]
shutdown_command = "poweroff"
}
build {
sources = [
"source.vsphere-iso.proxmox-ve-amd64",
]
provisioner "shell" {
expect_disconnect = true
environment_vars = [
"apt_cache_host=${var.apt_cache_host}",
"apt_cache_port=${var.apt_cache_port}",
]
scripts = var.shell_provisioner_scripts
}
}

View File

@@ -10,11 +10,6 @@ packer {
version = "1.1.8"
source = "github.com/hashicorp/proxmox"
}
# see https://github.com/hashicorp/packer-plugin-hyperv
hyperv = {
version = "1.1.3"
source = "github.com/hashicorp/hyperv"
}
# see https://github.com/hashicorp/packer-plugin-vagrant
vagrant = {
version = "1.1.5"
@@ -57,16 +52,6 @@ variable "proxmox_node" {
default = env("PROXMOX_NODE")
}
variable "hyperv_switch_name" {
type = string
default = env("HYPERV_SWITCH_NAME")
}
variable "hyperv_vlan_id" {
type = string
default = env("HYPERV_VLAN_ID")
}
variable "apt_cache_host" {
type = string
default = env("APT_CACHE_HOST")
@@ -301,56 +286,12 @@ source "proxmox-iso" "proxmox-ve-uefi-amd64" {
]
}
source "hyperv-iso" "proxmox-ve-amd64" {
temp_path = "tmp"
headless = true
generation = 2
enable_virtualization_extensions = true
enable_mac_spoofing = true
cpus = var.cpus
memory = var.memory
switch_name = var.hyperv_switch_name
vlan_id = var.hyperv_vlan_id
disk_size = var.disk_size
iso_url = var.iso_url
iso_checksum = var.iso_checksum
output_directory = "${var.output_base_dir}/output-{{build_name}}"
ssh_username = "root"
ssh_password = "vagrant"
ssh_timeout = "60m"
first_boot_device = "DVD"
boot_order = ["SCSI:0:0"]
cd_label = "proxmox-ais"
cd_files = ["answer.toml"]
boot_wait = "5s"
boot_command = [
# select Advanced Options.
"<end><enter>",
# select Install Proxmox VE (Automated).
"<down><down><down><enter>",
# wait for the shell prompt.
"<wait1m>",
# do the installation.
"proxmox-fetch-answer partition >/run/automatic-installer-answers<enter><wait>exit<enter>",
# wait for the installation to finish.
"<wait4m>",
# login.
"root<enter><wait5s>vagrant<enter><wait5s>",
# install the guest agent.
"rm -f /etc/apt/sources.list.d/{pve-enterprise,ceph}.list<enter>",
"apt-get update<enter><wait1m>",
"apt-get install -y hyperv-daemons<enter><wait30s>",
]
shutdown_command = "poweroff"
}
build {
sources = [
"source.qemu.proxmox-ve-amd64",
"source.qemu.proxmox-ve-uefi-amd64",
"source.proxmox-iso.proxmox-ve-amd64",
"source.proxmox-iso.proxmox-ve-uefi-amd64",
"source.hyperv-iso.proxmox-ve-amd64",
]
provisioner "shell" {
@@ -365,7 +306,6 @@ build {
post-processor "vagrant" {
only = [
"qemu.proxmox-ve-amd64",
"hyperv-iso.proxmox-ve-amd64",
]
output = var.vagrant_box
vagrantfile_template = "Vagrantfile.template"