PR updates

* README.md
  + update list of modern instance types
  + add caveat regarding linux-vanilla vs. linux-virt
* alpine-ami.yaml
  + build instance type is always t3.nano
  + block device where we build is always /dev/xvdf
  + add optional AMI encryption
  + always enable AMI SR-IOV flag (vanilla & virt both have the necessary driver)
  + no need to pass volume_name to make_ami.sh
* make_ami.sh
  + replace hard tabs with 4 spaces
  + always set up edge repositories
  + no need to add mkinitfs package, it's a dependency of linux-*
  + fix update of /etc/inittab
  + fix configuration of NTP
  + declare local vars in main()
  + device is always /dev/xvdf
* variables.json-default/example
  + improve comment for kernel_flavor
  + default add_repos is now empty
  + remove acct & e2fsprogs-extra from add_pkgs
  + add optional AMI encryption
  + remove sriov_enable, build_instance_type, and volume_name vars
This commit is contained in:
Jake Buchholz 2018-08-27 22:26:28 -07:00 committed by Mike Crute
parent 356105f23d
commit 95b7837c9f
5 changed files with 241 additions and 235 deletions

View File

@ -8,8 +8,8 @@ containing Alpine Linux. The AMI is designed to work with most EC2 features
such as Elastic Network Adapters and NVME EBS volumes by default. If anything
is missing please report a bug.
This image can be launched on any modern instance type. Including T2, M5, C5,
I3, R4, P2, P3, X1, X1e, D2. Other instances may also work but have not been
This image can be launched on any modern instance type, including T3, M5, C5,
I3, R5, P3, X1, X1e, D2, Z1d. Other instances may also work but have not been
tested. If you find an issue with instance support for any current generation
instance please file a bug against this project.
@ -50,10 +50,15 @@ its development and thus there are some sharp edges.
hardware so it seems unlikely that they will be supported going forward. Thus
this project does not support them.
- The linux-vanilla kernel all the linux-firmware packages it installs is much
larger than is necessary for an AMI designed to run on EC2. Unfortunately,
the linux-virt kernel is currently missing NVMe support, which is required for
the newest generation of instance families.
- The aws-ena-driver-vanilla package is still in edge/testing, and requires the
matching linux-vanilla package from edge/main. When ENA is available in an
alpine version release, edge/testing and edge/main should no longer be
necessary.
alpine version release (ideally with a 'virt' kernel flavor), edge/testing
and edge/main should no longer be necessary.
- [cloud-init](https://cloudinit.readthedocs.io/en/latest/) is not currently
supported on Alpine Linux. Instead this image uses

View File

@ -1,6 +1,6 @@
variables:
# NOTE: Additional configuration is set via the `variables.json` file.
# NOTE: Configuration is done with a `variables.json` file.
# To use default values, simply `cp variables.json-default variables.json`.
# See `variables.json-example` for full configuration variable descriptions.
@ -17,11 +17,11 @@ builders:
vpc_id: "{{user `vpc`}}"
subnet_id: "{{user `subnet`}}"
security_group_id: "{{user `security_group`}}"
instance_type: "{{user `build_instance_type`}}"
instance_type: "t3.nano"
associate_public_ip_address: "{{user `public_ip`}}"
launch_block_device_mappings:
- volume_type: "gp2"
device_name: "{{user `volume_name`}}"
device_name: "/dev/xvdf"
delete_on_termination: "true"
volume_size: "{{user `volume_size`}}"
ssh_username: "ec2-user"
@ -42,13 +42,14 @@ builders:
ami_description: "{{user `ami_desc_prefix`}}{{user `alpine_release`}}-r{{user `ami_release`}}{{user `ami_desc_suffix`}}"
ami_virtualization_type: "hvm"
ami_root_device:
source_device_name: "{{user `volume_name`}}"
source_device_name: "/dev/xvdf"
device_name: "/dev/xvda"
delete_on_termination: "true"
volume_size: "{{user `volume_size`}}"
volume_type: "gp2"
encrypt_boot: "{{user `encrypt_ami`}}"
ena_support: "{{user `ena_enable`}}"
sriov_support: "{{user `sriov_enable`}}"
sriov_support: "true"
ami_groups: "{{user `ami_access`}}"
ami_regions: "{{user `deploy_regions`}}"
@ -56,4 +57,4 @@ builders:
provisioners:
- type: "shell"
script: "make_ami.sh"
execute_command: 'sudo sh -c "{{ .Vars }} {{ .Path }} {{user `volume_name`}} {{user `kernel_flavor`}} ''{{user `add_repos`}}'' ''{{user `add_pkgs`}}''"'
execute_command: 'sudo sh -c "{{ .Vars }} {{ .Path }} {{user `kernel_flavor`}} ''{{user `add_repos`}}'' ''{{user `add_pkgs`}}''"'

View File

@ -1,5 +1,5 @@
#!/bin/sh
# vim: set ts=4 noet:
# vim: set ts=4 et:
set -eu
@ -10,308 +10,309 @@ set -eu
: ${ALPINE_KEYS_SHA256:="f7832b848cedca482b145011cf516e82392f02a10713875cb09f39c7221c6f17"}
die() {
printf '\033[1;31mERROR:\033[0m %s\n' "$@" >&2 # bold red
exit 1
printf '\033[1;31mERROR:\033[0m %s\n' "$@" >&2 # bold red
exit 1
}
einfo() {
printf '\n\033[1;36m> %s\033[0m\n' "$@" >&2 # bold cyan
printf '\n\033[1;36m> %s\033[0m\n' "$@" >&2 # bold cyan
}
rc_add() {
local target="$1"; shift # target directory
local runlevel="$1"; shift # runlevel name
local services="$*" # names of services
local target="$1"; shift # target directory
local runlevel="$1"; shift # runlevel name
local services="$*" # names of services
local svc; for svc in $services; do
mkdir -p "$target"/etc/runlevels/$runlevel
ln -s /etc/init.d/$svc "$target"/etc/runlevels/$runlevel/$svc
echo " * service $svc added to runlevel $runlevel"
done
local svc; for svc in $services; do
mkdir -p "$target"/etc/runlevels/$runlevel
ln -s /etc/init.d/$svc "$target"/etc/runlevels/$runlevel/$svc
echo " * service $svc added to runlevel $runlevel"
done
}
wgets() (
local url="$1" # url to fetch
local sha256="$2" # expected SHA256 sum of output
local dest="$3" # output path and filename
local url="$1" # url to fetch
local sha256="$2" # expected SHA256 sum of output
local dest="$3" # output path and filename
wget -T 10 -q -O "$dest" "$url"
echo "$sha256 $dest" | sha256sum -c > /dev/null
wget -T 10 -q -O "$dest" "$url"
echo "$sha256 $dest" | sha256sum -c > /dev/null
)
validate_block_device() {
local dev="$1" # target directory
local dev="$1" # target directory
lsblk -P --fs "$dev" >/dev/null 2>&1 || \
die "'$dev' is not a valid block device"
lsblk -P --fs "$dev" >/dev/null 2>&1 || \
die "'$dev' is not a valid block device"
if lsblk -P --fs "$dev" | grep -vq 'FSTYPE=""'; then
die "Block device '$dev' is not blank"
fi
if lsblk -P --fs "$dev" | grep -vq 'FSTYPE=""'; then
die "Block device '$dev' is not blank"
fi
}
fetch_apk_tools() {
local store="$(mktemp -d)"
local tarball="$(basename $APK_TOOLS_URI)"
local store="$(mktemp -d)"
local tarball="$(basename $APK_TOOLS_URI)"
wgets "$APK_TOOLS_URI" "$APK_TOOLS_SHA256" "$store/$tarball"
tar -C "$store" -xf "$store/$tarball"
wgets "$APK_TOOLS_URI" "$APK_TOOLS_SHA256" "$store/$tarball"
tar -C "$store" -xf "$store/$tarball"
find "$store" -name apk
find "$store" -name apk
}
make_filesystem() {
local device="$1" # target device path
local target="$2" # mount target
local device="$1" # target device path
local target="$2" # mount target
mkfs.ext4 "$device"
e2label "$device" /
mount "$device" "$target"
mkfs.ext4 "$device"
e2label "$device" /
mount "$device" "$target"
}
setup_repositories() {
local target="$1" # target directory
local add_repos="$2" # extra repo lines, comma separated
local target="$1" # target directory
local add_repos="$2" # extra repo lines, comma separated
mkdir -p "$target"/etc/apk/keys
cat > "$target"/etc/apk/repositories <<-EOF
http://dl-cdn.alpinelinux.org/alpine/v$ALPINE_RELEASE/main
http://dl-cdn.alpinelinux.org/alpine/v$ALPINE_RELEASE/community
EOF
echo "$add_repos" | tr , "\012" >> "$target"/etc/apk/repositories
mkdir -p "$target"/etc/apk/keys
cat > "$target"/etc/apk/repositories <<EOF
http://dl-cdn.alpinelinux.org/alpine/v$ALPINE_RELEASE/main
http://dl-cdn.alpinelinux.org/alpine/v$ALPINE_RELEASE/community
@edge-main http://dl-cdn.alpinelinux.org/alpine/edge/main
@edge-community http://dl-cdn.alpinelinux.org/alpine/edge/community
@edge-testing http://dl-cdn.alpinelinux.org/alpine/edge/testing
EOF
echo "$add_repos" | tr , "\012" >> "$target"/etc/apk/repositories
}
fetch_keys() {
local target="$1"
local tmp="$(mktemp -d)"
local target="$1"
local tmp="$(mktemp -d)"
wgets "$ALPINE_KEYS" "$ALPINE_KEYS_SHA256" "$tmp/alpine-keys.apk"
tar -C "$target" -xvf "$tmp"/alpine-keys.apk etc/apk/keys
rm -rf "$tmp"
wgets "$ALPINE_KEYS" "$ALPINE_KEYS_SHA256" "$tmp/alpine-keys.apk"
tar -C "$target" -xvf "$tmp"/alpine-keys.apk etc/apk/keys
rm -rf "$tmp"
}
setup_chroot() {
local target="$1"
local target="$1"
mount -t proc none "$target"/proc
mount --bind /dev "$target"/dev
mount --bind /sys "$target"/sys
mount -t proc none "$target"/proc
mount --bind /dev "$target"/dev
mount --bind /sys "$target"/sys
# Don't want to ship this but it's needed for bootstrap. Will be removed in
# the cleanup stage.
install -Dm644 /etc/resolv.conf "$target"/etc/resolv.conf
# Don't want to ship this but it's needed for bootstrap. Will be removed in
# the cleanup stage.
install -Dm644 /etc/resolv.conf "$target"/etc/resolv.conf
}
install_core_packages() {
local target="$1" # target directory
local flavor="$2" # kernel flavor
local add_pkgs="$3" # extra packages, space separated
local target="$1" # target directory
local flavor="$2" # kernel flavor
local add_pkgs="$3" # extra packages, space separated
# Most from: https://git.alpinelinux.org/cgit/alpine-iso/tree/alpine-virt.packages
#
# linux-$flavor - linux kernel flavor to install
# e2fsprogs - required by init scripts to maintain ext4 volumes
# mkinitfs - required to build custom initfs
# sudo - to allow alpine user to become root, disallow root SSH logins
# tiny-ec2-bootstrap - to bootstrap system from EC2 metadata
chroot "$target" apk --no-cache add \
linux-"$flavor" \
alpine-mirrors \
chrony \
e2fsprogs \
mkinitfs \
openssh \
sudo \
tiny-ec2-bootstrap \
tzdata \
$add_pkgs
# Most from: https://git.alpinelinux.org/cgit/alpine-iso/tree/alpine-virt.packages
#
# sudo - to allow alpine user to become root, disallow root SSH logins
# tiny-ec2-bootstrap - to bootstrap system from EC2 metadata
#
chroot "$target" apk --no-cache add \
linux-"$flavor" \
alpine-mirrors \
chrony \
e2fsprogs \
openssh \
sudo \
tiny-ec2-bootstrap \
tzdata \
$add_pkgs
chroot "$target" apk --no-cache add --no-scripts syslinux
chroot "$target" apk --no-cache add --no-scripts syslinux
# Disable starting getty for physical ttys because they're all inaccessible
# anyhow. With this configuration boot messages will still display in the
# EC2 console.
sed -Ei '/^tty\d/s/^/#/' "$target"/etc/inittab
# Disable starting getty for physical ttys because they're all inaccessible
# anyhow. With this configuration boot messages will still display in the
# EC2 console.
sed -Ei '/^tty[0-9]/s/^/#/' \
"$target"/etc/inittab
# Make it a little more obvious who is logged in by adding username to the
# prompt
sed -i "s/^export PS1='/&\\\\u@/" "$target"/etc/profile
# Make it a little more obvious who is logged in by adding username to the
# prompt
sed -i "s/^export PS1='/&\\\\u@/" "$target"/etc/profile
}
create_initfs() {
local target="$1"
local target="$1"
# Create ENA feature for mkinitfs
echo "kernel/drivers/net/ethernet/amazon" > \
"$target"/etc/mkinitfs/features.d/ena.modules
# Create ENA feature for mkinitfs
echo "kernel/drivers/net/ethernet/amazon" > \
"$target"/etc/mkinitfs/features.d/ena.modules
# Enable ENA and NVME features these don't hurt for any instance and are
# hard requirements of the 5 series and i3 series of instances
sed -Ei 's/^features="([^"]+)"/features="\1 nvme ena"/' \
"$target"/etc/mkinitfs/mkinitfs.conf
# Enable ENA and NVME features these don't hurt for any instance and are
# hard requirements of the 5 series and i3 series of instances
sed -Ei 's/^features="([^"]+)"/features="\1 nvme ena"/' \
"$target"/etc/mkinitfs/mkinitfs.conf
chroot "$target" /sbin/mkinitfs $(basename $(find "$target"/lib/modules/* -maxdepth 0))
chroot "$target" /sbin/mkinitfs $(basename $(find "$target"/lib/modules/* -maxdepth 0))
}
setup_extlinux() {
local target="$1"
local target="$1"
# Must use disk labels instead of UUID or devices paths so that this works
# across instance familes. UUID works for many instances but breaks on the
# NVME ones because EBS volumes are hidden behind NVME devices.
#
# Enable ext4 because the root device is formatted ext4
#
# Shorten timeout because EC2 has no way to interact with instance console
#
# ttyS0 is the target for EC2s "Get System Log" feature whereas tty0 is the
# target for EC2s "Get Instance Screenshot" feature. Enabling the serial
# port early in extlinux gives the most complete output in the system log.
sed -Ei -e "s|^[# ]*(root)=.*|\1=LABEL=/|" \
-e "s|^[# ]*(default_kernel_opts)=.*|\1=\"console=ttyS0 console=tty0\"|" \
-e "s|^[# ]*(serial_port)=.*|\1=ttyS0|" \
-e "s|^[# ]*(modules)=.*|\1=sd-mod,usb-storage,ext4|" \
-e "s|^[# ]*(default)=.*|\1=hardened|" \
-e "s|^[# ]*(timeout)=.*|\1=1|" \
"$target"/etc/update-extlinux.conf
# Must use disk labels instead of UUID or devices paths so that this works
# across instance familes. UUID works for many instances but breaks on the
# NVME ones because EBS volumes are hidden behind NVME devices.
#
# Enable ext4 because the root device is formatted ext4
#
# Shorten timeout because EC2 has no way to interact with instance console
#
# ttyS0 is the target for EC2s "Get System Log" feature whereas tty0 is the
# target for EC2s "Get Instance Screenshot" feature. Enabling the serial
# port early in extlinux gives the most complete output in the system log.
sed -Ei -e "s|^[# ]*(root)=.*|\1=LABEL=/|" \
-e "s|^[# ]*(default_kernel_opts)=.*|\1=\"console=ttyS0 console=tty0\"|" \
-e "s|^[# ]*(serial_port)=.*|\1=ttyS0|" \
-e "s|^[# ]*(modules)=.*|\1=sd-mod,usb-storage,ext4|" \
-e "s|^[# ]*(default)=.*|\1=hardened|" \
-e "s|^[# ]*(timeout)=.*|\1=1|" \
"$target"/etc/update-extlinux.conf
}
install_extlinux() {
local target="$1"
local target="$1"
chroot "$target" /sbin/extlinux --install /boot
chroot "$target" /sbin/update-extlinux --warn-only
chroot "$target" /sbin/extlinux --install /boot
chroot "$target" /sbin/update-extlinux --warn-only
}
setup_fstab() {
local target="$1"
local target="$1"
cat > "$target"/etc/fstab <<-EOF
# <fs> <mountpoint> <type> <opts> <dump/pass>
LABEL=/ / ext4 defaults,noatime 1 1
EOF
cat > "$target"/etc/fstab <<EOF
# <fs> <mountpoint> <type> <opts> <dump/pass>
LABEL=/ / ext4 defaults,noatime 1 1
EOF
}
setup_networking() {
local target="$1"
local target="$1"
cat > "$target"/etc/network/interfaces <<-EOF
auto lo
iface lo inet loopback
cat > "$target"/etc/network/interfaces <<EOF
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet dhcp
EOF
auto eth0
iface eth0 inet dhcp
EOF
}
enable_services() {
local target="$1"
local target="$1"
rc_add "$target" default sshd chronyd networking tiny-ec2-bootstrap
rc_add "$target" sysinit devfs dmesg mdev hwdrivers
rc_add "$target" boot modules hwclock swap hostname sysctl bootmisc syslog acpid
rc_add "$target" shutdown killprocs savecache mount-ro
rc_add "$target" default sshd chronyd networking tiny-ec2-bootstrap
rc_add "$target" sysinit devfs dmesg mdev hwdrivers
rc_add "$target" boot modules hwclock swap hostname sysctl bootmisc syslog acpid
rc_add "$target" shutdown killprocs savecache mount-ro
}
create_alpine_user() {
local target="$1"
local target="$1"
# Allow members of the wheel group to sudo without a password. By default
# this will only be the alpine user. This allows us to ship an AMI that is
# accessible via SSH using the user's configured SSH keys (thanks to
# tiny-ec2-bootstrap) but does not allow remote root access which is the
# best-practice.
sed -i '/%wheel .* NOPASSWD: .*/s/^# //' "$target"/etc/sudoers
# Allow members of the wheel group to sudo without a password. By default
# this will only be the alpine user. This allows us to ship an AMI that is
# accessible via SSH using the user's configured SSH keys (thanks to
# tiny-ec2-bootstrap) but does not allow remote root access which is the
# best-practice.
sed -i '/%wheel .* NOPASSWD: .*/s/^# //' "$target"/etc/sudoers
# There is no real standard ec2 username across AMIs, Amazon uses ec2-user
# for their Amazon Linux AMIs but Ubuntu uses ubuntu, Fedora uses fedora,
# etc... (see: https://alestic.com/2014/01/ec2-ssh-username/). So our user
# and group are alpine because this is Alpine Linux. On instance bootstrap
# the user can create whatever users they want and delete this one.
chroot "$target" /usr/sbin/addgroup alpine
chroot "$target" /usr/sbin/adduser -h /home/alpine -s /bin/sh -G alpine -D alpine
chroot "$target" /usr/sbin/addgroup alpine wheel
chroot "$target" /usr/bin/passwd -u alpine
# There is no real standard ec2 username across AMIs, Amazon uses ec2-user
# for their Amazon Linux AMIs but Ubuntu uses ubuntu, Fedora uses fedora,
# etc... (see: https://alestic.com/2014/01/ec2-ssh-username/). So our user
# and group are alpine because this is Alpine Linux. On instance bootstrap
# the user can create whatever users they want and delete this one.
chroot "$target" /usr/sbin/addgroup alpine
chroot "$target" /usr/sbin/adduser -h /home/alpine -s /bin/sh -G alpine -D alpine
chroot "$target" /usr/sbin/addgroup alpine wheel
chroot "$target" /usr/bin/passwd -u alpine
}
configure_ntp() {
local target="$1"
local target="$1"
# EC2 provides an instance-local NTP service syncronized with GPS and
# atomic clocks in-region. Prefer this over external NTP hosts when running
# in EC2.
#
# See: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html
sed -i 's/^server .*/server 169.254.169.123/' "$target"/etc/chrony/chrony.conf
# EC2 provides an instance-local NTP service syncronized with GPS and
# atomic clocks in-region. Prefer this over external NTP hosts when running
# in EC2.
#
# See: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html
sed -i 's/^pool .*/server 169.254.169.123 iburst/' "$target"/etc/chrony/chrony.conf
}
cleanup() {
local target="$1"
local target="$1"
# Sweep cruft out of the image that doesn't need to ship or will be
# re-generated when the image boots
rm -f \
"$target"/var/cache/apk/* \
"$target"/etc/resolv.conf \
"$target"/root/.ash_history \
"$target"/etc/*-
# Sweep cruft out of the image that doesn't need to ship or will be
# re-generated when the image boots
rm -f \
"$target"/var/cache/apk/* \
"$target"/etc/resolv.conf \
"$target"/root/.ash_history \
"$target"/etc/*-
umount \
"$target"/dev \
"$target"/proc \
"$target"/sys
umount \
"$target"/dev \
"$target"/proc \
"$target"/sys
umount "$target"
umount "$target"
}
main() {
[ "$#" -ne 4 ] && { echo "usage: $0 <block-device> <kernel-flavor> '<repo>[,<repo>]' '<pkg>[ <pkg>]'"; exit 1; }
[ "$#" -ne 3 ] && { echo "usage: $0 <kernel-flavor> '<repo>[,<repo>]' '<pkg>[ <pkg>]'"; exit 1; }
device="$1"
flavor="$2"
add_repos="$3"
add_pkgs="$4"
local flavor="$1"
local add_repos="$2"
local add_pkgs="$3"
target="/mnt/target"
local device="/dev/xvdf"
local target="/mnt/target"
validate_block_device "$device"
validate_block_device "$device"
[ -d "$target" ] || mkdir "$target"
[ -d "$target" ] || mkdir "$target"
einfo "Fetching static APK tools"
apk="$(fetch_apk_tools)"
einfo "Fetching static APK tools"
apk="$(fetch_apk_tools)"
einfo "Creating root filesystem"
make_filesystem "$device" "$target"
einfo "Creating root filesystem"
make_filesystem "$device" "$target"
setup_repositories "$target" "$add_repos"
setup_repositories "$target" "$add_repos"
einfo "Fetching Alpine signing keys"
fetch_keys "$target"
einfo "Fetching Alpine signing keys"
fetch_keys "$target"
einfo "Installing base system"
$apk add --root "$target" --update-cache --initdb alpine-base
einfo "Installing base system"
$apk add --root "$target" --update-cache --initdb alpine-base
setup_chroot "$target"
setup_chroot "$target"
einfo "Installing core packages"
install_core_packages "$target" "$flavor" "$add_pkgs"
einfo "Installing core packages"
install_core_packages "$target" "$flavor" "$add_pkgs"
einfo "Configuring and enabling boot loader"
create_initfs "$target"
setup_extlinux "$target"
install_extlinux "$target"
einfo "Configuring and enabling boot loader"
create_initfs "$target"
setup_extlinux "$target"
install_extlinux "$target"
einfo "Configuring system"
setup_fstab "$target"
setup_networking "$target"
enable_services "$target"
create_alpine_user "$target"
configure_ntp "$target"
einfo "Configuring system"
setup_fstab "$target"
setup_networking "$target"
enable_services "$target"
create_alpine_user "$target"
configure_ntp "$target"
einfo "All done, cleaning up"
cleanup "$target"
einfo "All done, cleaning up"
cleanup "$target"
}
main "$@"

View File

@ -5,18 +5,16 @@
"ami_desc_prefix": "Alpine Linux ",
"ami_desc_suffix": " Release with EC2 Optimizations",
"kernel_flavor": "vanilla@edge-main",
"add_repos": "@edge-main http://dl-cdn.alpinelinux.org/alpine/edge/main,@edge-testing http://dl-cdn.alpinelinux.org/alpine/edge/testing",
"add_pkgs": "acct aws-ena-driver-vanilla@edge-testing e2fsprogs-extra",
"add_repos": "",
"add_pkgs": "aws-ena-driver-vanilla@edge-testing",
"ena_enable": "true",
"sriov_enable": "false",
"volume_size": "1",
"encrypt_ami": "false",
"ami_access": "all",
"deploy_regions": "us-east-1,us-east-2,us-west-1,us-west-2,ca-central-1,eu-central-1,eu-west-1,eu-west-2,eu-west-3,ap-northeast-1,ap-northeast-2,ap-southeast-1,ap-southeast-2,ap-south-1,sa-east-1",
"vpc": "",
"subnet": "",
"security_group": "",
"public_ip": "false",
"build_instance_type": "t2.nano",
"volume_name": "/dev/xvdf"
"public_ip": "false"
}

View File

@ -1,4 +1,5 @@
# NOTE: This is file not valid JSON.
# *** NOTE: This is file not valid JSON! ***
{
### Build Options ###
@ -13,31 +14,37 @@
"ami_desc_prefix": "Alpine Linux ",
"ami_desc_suffix": " Release with EC2 Optimizations",
# Kernel "flavor" to install. 'virt' is a slim choice, but doesn't currently
# include NVME support and there is no matching 'aws-ena-driver' package.
# Kernel "flavor" to install.
#
# 'virt' is the slim choice, but doesn't currently include NVMe support and
# there is no matching 'aws-ena-driver' package. When these features are
# available, this kernel flavor will be the default (if not hardcoded).
#
# 'vanilla' installs a lot of unneeded stuff (for an AMI), but does support
# NVME; however, there is no matching ENA driver in the main repo. In order
# to support NVME and ENA, we need to use 'vanilla@edge-main', which matches
# NVMe; however, there is no matching ENA driver in the main repo. In order
# to support NVMe and ENA, we need to use 'vanilla@edge-main', which matches
# the 'aws-ena-driver@edge-testing' package.
#
"kernel_flavor": "vanilla@edge-main",
# Comma separated list of lines to add to /etc/apk/repositories. We need
# edge/main and edge/testing for simultaneous NVME and ENA support.
"add_repos": "@edge-main http://dl-cdn.alpinelinux.org/alpine/edge/main,@edge-testing http://dl-cdn.alpinelinux.org/alpine/edge/testing",
# Comma separated list of custom lines to add to /etc/apk/repositories.
# @edge-main, @edge-community, and @edge-testing repos have been predefined.
"add_repos": "",
# Space separated list of additional packages to add to the AMI.
# acct - system accounting utilities (sa, etc.)
# aws-ena-driver-vanilla - Enhanced Network Adapter kernel module
# e2fsprogs-extra - ec2-tiny-bootstrap's currently undeclared dependency (resize2fs)
"add_pkgs": "acct aws-ena-driver-vanilla@edge-testing e2fsprogs-extra",
# aws-ena-driver-vanilla - ENA driver (until we have a 'virt' flavor)
"add_pkgs": "aws-ena-driver-vanilla@edge-testing",
# Enable ENA/SRIOV support on the AMI.
# Enable ENA support on the AMI.
# When ENA is available for the 'virt' kernel, this will always be on.
"ena_enable": "true",
"sriov_enable": "false",
# Size of the AMI image (in GiB).
"volume_size": "1",
# Encrypt the AMI?
"encrypt_ami": "false",
# Comma separated list of groups that should have access to the AMI. However,
# only two values are currently supported: 'all' for public, '' for private.
"ami_access": "all",
@ -62,12 +69,6 @@
# Assign a public IP to the builder instance. Set to 'true' for if you need
# to initiate the build from somewhere that wouldn't normally be able to
# access the builder instance's private network.
"public_ip": "false",
"public_ip": "false"
# Instance type to use for building.
"build_instance_type": "t2.nano",
# Don't override this without a good reason, and if you do just make sure it
# gets passed all the way through to the make_ami script.
"volume_name": "/dev/xvdf"
}