diff --git a/alpine-cloud-images/.flake8 b/alpine-cloud-images/.flake8 new file mode 100644 index 0000000..c1ba363 --- /dev/null +++ b/alpine-cloud-images/.flake8 @@ -0,0 +1,2 @@ +[flake8] +ignore = E265,E266,E402,E501 \ No newline at end of file diff --git a/alpine-cloud-images/.gitignore b/alpine-cloud-images/.gitignore new file mode 100644 index 0000000..fd1027e --- /dev/null +++ b/alpine-cloud-images/.gitignore @@ -0,0 +1,5 @@ +*~ +*.bak +*.swp +.vscode/ +/work/ diff --git a/alpine-cloud-images/CONFIGURATION.md b/alpine-cloud-images/CONFIGURATION.md new file mode 100644 index 0000000..f3b9ccf --- /dev/null +++ b/alpine-cloud-images/CONFIGURATION.md @@ -0,0 +1,318 @@ +# Configuration + +All the configuration for building image variants is defined by multiple +config files; the base configs for official Alpine Linux cloud images are in +the [`configs/`](configs/) directory. + +We use [HOCON](https://github.com/lightbend/config/blob/main/HOCON.md) for +configuration -- this primarily facilitates importing deeper configs from +other files, but also allows the extension/concatenation of arrays and maps +(which can be a useful feature for customization), and inline comments. + +---- +## Resolving Work Environment Configs and Scripts + +If `work/configs/` and `work/scripts/` don't exist, the `build` script will +install the contents of the base [`configs/`](configs/) and [`scripts/`](scripts/) +directories, and overlay additional `configs/` and `scripts/` subdirectories +from `--custom` directories (if any). + +Files cannot be installed over existing files, with one exception -- the +[`configs/images.conf`](configs/images.conf) same-directory symlink. Because +the `build` script _always_ loads `work/configs/images.conf`, this is the hook +for "rolling your own" custom Alpine Linux cloud images. + +The base [`configs/images.conf`](configs/images.conf) symlinks to +[`alpine.conf`](configs/images.conf), but this can be overridden using a +`--custom` directory containing a new `configs/images.conf` same-directory +symlink pointing to its custom top-level config. + +For example, the configs and scripts in the [`overlays/testing/`](overlays/testing/) +directory can be resolved in a _clean_ work environment with... +``` +./build configs --custom overlays/testing +``` +This results in the `work/configs/images.conf` symlink to point to +`work/configs/alpine-testing.conf` instead of `work/configs/alpine.conf`. + +If multiple directories are specified with `--custom`, they are applied in +the order given. + +---- +## Top-Level Config File + +Examples of top-level config files are [`configs/alpine.conf`](configs/alpine.conf) +and [`overlays/testing/configs/alpine-testing.conf`](overlays/testing/configs/alpine-testing.conf). + +There are three main blocks that need to exist (or be `import`ed into) the top +level HOCON configuration, and are merged in this exact order: + +### `Default` + +All image variant configs start with this block's contents as a starting point. +Arrays and maps can be appended by configs in `Dimensions` and `Mandatory` +blocks. + +### `Dimensions` + +The sub-blocks in `Dimensions` define the "dimensions" a variant config is +comprised of, and the different config values possible for that dimension. +The default [`alpine.conf`](configs/alpine.conf) defines the following +dimensional configs: + +* `version` - Alpine Linux _x_._y_ (plus `edge`) versions +* `arch` - machine architectures, `x86_64` or `aarch64` +* `firmware` - supports launching via legacy BIOS or UEFI +* `bootstrap` - the system/scripts responsible for setting up an instance + during its initial launch +* `cloud` - for specific cloud platforms + +The specific dimensional configs for an image variant are merged in the order +that the dimensions are listed. + +### `Mandatory` + +After a variant's dimensional configs have been applied, this is the last block +that's merged to the image variant configuration. This block is the ultimate +enforcer of any non-overrideable configuration across all variants, and can +also provide the last element to array config items. + +---- +## Dimensional Config Directives + +Because a full cross-product across all dimensional configs may produce images +variants that are not viable (i.e. `aarch64` simply does not support legacy +`bios`), or may require further adjustments (i.e. the `aws` `aarch64` images +require an additional kernel module from `3.15` forward, which aren't available +in previous versions), we have two special directives which may appear in +dimensional configs. + +### `EXCLUDE` array + +This directive provides an array of dimensional config keys which are +incompatible with the current dimensional config. For example, +[`configs/arch/aarch64.conf`](configs/arch/aarch64.conf) specifies... +``` +# aarch64 is UEFI only +EXCLUDE = [bios] +``` +...which indicates that any image variant that includes both `aarch64` (the +current dimensional config) and `bios` configuration should be skipped. + +### `WHEN` block + +This directive conditionally merges additional configuration ***IF*** the +image variant also includes a specific dimensional config key (or keys). In +order to handle more complex situations, `WHEN` blocks may be nested. For +example, [`configs/cloud/aws.conf`](configs/cloud/aws.conf) has... +``` +WHEN { + aarch64 { + # new AWS aarch64 default... + kernel_modules.gpio_pl061 = true + initfs_features.gpio_pl061 = true + WHEN { + "3.14 3.13 3.12" { + # ...but not supported for older versions + kernel_modules.gpio_pl061 = false + initfs_features.gpio_pl061 = false + } + } + } +``` +This configures AWS `aarch64` images to use the `gpio_pl061` kernel module in +order to cleanly shutdown/reboot instances from the web console, CLI, or SDK. +However, this module is unavailable on older Alpine versions. + +Spaces in `WHEN` block keys serve as an "OR" operator; nested `WHEN` blocks +function as "AND" operators. + +---- +## Config Settings + +**Scalar** values can be simply overridden in later configs. + +**Array** and **map** settings in later configs are merged with the previous +values, _or entirely reset if it's first set to `null`_, for example... +``` +some_array = [ thing ] +# [...] +some_array = null +some_array = [ other_thing ] +``` + +Mostly in order of appearance, as we walk through +[`configs/alpine.conf`](configs/alpine.conf) and the deeper configs it +imports... + +### `project` string + +This is a unique identifier for the whole collection of images being built. +For the official Alpine Linux cloud images, this is set to +`https://alpinelinux.org/cloud`. + +When building custom images, you **MUST** override **AT LEAST** this setting to +avoid image import and publishing collisions. + +### `name` array + +The ultimate contents of this array contribute to the overall naming of the +resultant image. Almost all dimensional configs will add to the `name` array, +with two notable exceptions: **version** configs' contribution to this array is +determined when `work/images.yaml` is resolved, and is set to the current +Alpine Linux release (_x.y.z_ or _YYYYMMDD_ for edge); also because +**cloud** images are isolated from each other, it's redundant to include that +in the image name. + +### `description` array + +Similar to the `name` array, the elements of this array contribute to the final +image description. However, for the official Alpine configs, only the +**version** dimension adds to this array, via the same mechanism that sets the +revision for the `name` array. + +### `motd` map + +This setting controls the contents of what ultimately gets written into the +variant image's `/etc/motd` file. Later configs can add additional messages, +replace existing contents, or remove them entirely (by setting the value to +`null`). + +The `motd.version_notes` and `motd.release_notes` settings have slightly +different behavior: +* if the Alpine release (_x.y.z_) ends with `.0`, `release_notes` is dropped + to avoid redundancy +* edge versions are technically not released, so both of these notes are + dropped from `/etc/motd` +* otherwise, `version_notes` and `release_notes` are concatenated together as + `release_notes` to avoid a blank line between them + +### `scripts` array + +These are the scripts that will be executed by Packer, in order, to do various +setup tasks inside a variant's image. The `work/scripts/` directory contains +all scripts, including those that may have been added via `build --custom`. + +### `script_dirs` array + +Directories (under `work/scripts/`) that contain additional data that the +`scripts` will need. Packer will copy these to the VM responsible for setting +up the variant image. + +### `size` string + +The size of the image disk, by default we use `1G` (1 GiB). This disk may (or +may not) be further partitioned, based on other factors. + +### `login` string + +The image's primary login user, set to `alpine`. + +### `local_format` string + +The local VM's disk image format, set to `qcow2`. + +### `repos` map + +Defines the contents of the image's `/etc/apk/repositories` file. The map's +key is the URL of the repo, and the value determines how that URL will be +represented in the `repositories` file... +| value | result | +|-|-| +| `null` | make no reference to this repo | +| `false` | this repo is commented out (disabled) | +| `true` | this repo is enabled for use | +| _tag_ | enable this repo with `@`_`tag`_ | + +### `packages` map + +Defines what APK packages to add/delete. The map's key is the package +name, and the value determines whether (or not) to install/uninstall the +package... +| value | result | +|-|-| +| `null` | don't add or delete | +| `false` | explicitly delete | +| `true` | add from default repos | +| _tag_ | add from `@`_`tag`_ repo | +| `--no-scripts` | add with `--no-scripts` option | +| `--no-scripts` _tag_ | add from `@`_`tag`_ repo, with `--no-scripts` option | + +### `services` map of maps + +Defines what services are enabled/disabled at various runlevels. The first +map's key is the runlevel, the second key is the service. The service value +determines whether (or not) to enable/disable the service at that runlevel... +| value | result | +|-|-| +| `null` | don't enable or disable | +| `false` | explicitly disable | +| `true` | explicitly enable | + +### `kernel_modules` map + +Defines what kernel modules are specified in the boot loader. The key is the +kernel module, and the value determines whether or not it's in the final +list... +| value | result | +|-|-| +| `null` | skip | +| `false` | skip | +| `true` | include | + +### `kernel_options` map + +Defines what kernel options are specified on the kernel command line. The keys +are the kernel options, the value determines whether or not it's in the final +list... +| value | result | +|-|-| +| `null` | skip | +| `false` | skip | +| `true` | include | + +### `initfs_features` map + +Defines what initfs features are included when making the image's initramfs +file. The keys are the initfs features, and the values determine whether or +not they're included in the final list... +| value | result | +|-|-| +| `null` | skip | +| `false` | skip | +| `true` | include | + +### `builder` string + +The Packer builder that's used to build images. This is set to `qemu`. + +### `qemu.machine_type` string + +The QEMU machine type to use when building local images. For x86_64, this is +set to `null`, for aarch64, we use `virt`. + +### `qemu.args` list of lists + +Additional QEMU arguments. For x86_64, this is set to `null`; but aarch64 +requires several additional arguments to start an operational VM. + +### `qemu.firmware` string + +The path to the QEMU firmware (installed in `work/firmware/`). This is only +used when creating UEFI images. + +### `bootloader` string + +The bootloader to use, currently `extlinux` or `grub-efi`. + +### `access` map + +When images are published, this determines who has access to those images. +The key is the cloud account (or `PUBLIC`), and the value is whether or not +access is granted, `true` or `false`/`null`. + +### `regions` map + +Determines where images should be published. The key is the region +identifier (or `ALL`), and the value is whether or not to publish to that +region, `true` or `false`/`null`. \ No newline at end of file diff --git a/alpine-cloud-images/LICENSE.txt b/alpine-cloud-images/LICENSE.txt new file mode 100644 index 0000000..817eab1 --- /dev/null +++ b/alpine-cloud-images/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2017-2022 Jake Buchholz Göktürk, Michael Crute + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/alpine-cloud-images/README.md b/alpine-cloud-images/README.md new file mode 100644 index 0000000..595f8d2 --- /dev/null +++ b/alpine-cloud-images/README.md @@ -0,0 +1,183 @@ +# Alpine Linux Cloud Image Builder + +This repository contains the code and and configs for the build system used to +create official Alpine Linux images for various cloud providers, in various +configurations. This build system is flexible, enabling others to build their +own customized images. + +---- +## Pre-Built Offical Cloud Images + +To get started with offical pre-built Alpine Linux cloud images, visit +https://alpinelinux.org/cloud. Currently, we build official images for the +following cloud platforms... +* AWS + +...we are working on also publishing offical images to other major cloud +providers. + +Each published image's name contains the Alpine version release, architecture, +firmware, bootstrap, and image revision. These details (and more) are also +tagged on the images... + +| Tag | Description / Values | +|-----|----------------------| +| name | `alpine-`_`release`_`-`_`arch`_`-`_`firmware`_`-`_`bootstrap`_`-r`_`revision`_ | +| project | `https://alpinelinux.org/cloud` | +| image_key | _`release`_`-`_`arch`_`-`_`firmware`_`-`_`bootstrap`_`-`_`cloud`_ | +| version | Alpine version (_`x.y`_ or `edge`) | +| release | Alpine release (_`x.y.z`_ or _`YYYYMMDD`_ for edge) | +| arch | architecture (`aarch64` or `x86_64`) | +| firmware | boot mode (`bios` or `uefi`) | +| bootstrap | initial bootstrap system (`tiny` = Tiny Cloud) | +| cloud | provider short name (`aws`) | +| revision | image revision number | +| imported | image import timestamp | +| import_id | imported image id | +| import_region | imported image region | +| published | image publication timestamp | +| description | image description | + +Although AWS does not allow cross-account filtering by tags, the image name can +still be used to filter images. For example, to get a list of available Alpine +3.x aarch64 images in AWS eu-west-2... +``` +aws ec2 describe-images \ + --region eu-west-2 \ + --owners 538276064493 \ + --filters \ + Name=name,Values='alpine-3.*-aarch64-*' \ + Name=state,Values=available \ + --output text \ + --query 'reverse(sort_by(Images, &CreationDate))[].[ImageId,Name,CreationDate]' +``` +To get just the most recent matching image, use... +``` + --query 'max_by(Image, &CreationDate).[ImageId,Name,CreationDate]' +``` + +---- +## Build System + +The build system consists of a number of components: + +* the primary `build` script +* the `configs/` directory, defining the set of images to be built +* the `scripts/` directory, containing scripts and related data used to set up + image contents during provisioning +* the Packer `alpine.pkr.hcl`, which orchestrates build, import, and publishing + of images +* the `cloud_helper.py` script that Packer runs in order to do cloud-specific + import and publish operations + +### Build Requirements +* [Python](https://python.org) (3.9.7 is known to work) +* [Packer](https://packer.io) (1.7.6 is known to work) +* [QEMU](https://www.qemu.org) (6.1.0 is known to work) +* cloud provider account(s) + +### Cloud Credentials + +By default, the build system relies on the cloud providers' Python API +libraries to find and use the necessary credentials, usually via configuration +under the user's home directory (i.e. `~/.aws/`, `~/.oci/`, etc.) or or via +environment variables (i.e. `AWS_...`, `OCI_...`, etc.) + +The credentials' user/role needs sufficient permission to query, import, and +publish images -- the exact details will vary from cloud to cloud. _It is +recommended that only the minimum required permissions are granted._ + +_We manage the credentials for publishing official Alpine images with an +"identity broker" service, and retrieve those credentials via the +`--use-broker` argument of the `build` script._ + +### The `build` Script + +``` +usage: build [-h] [--debug] [--clean] [--custom DIR [DIR ...]] + [--skip KEY [KEY ...]] [--only KEY [KEY ...]] [--revise] [--use-broker] + [--no-color] [--parallel N] [--vars FILE [FILE ...]] + {configs,state,local,import,publish} + +positional arguments: (build up to and including this step) + configs resolve image build configuration + state refresh current image build state + local build images locally + import import local images to cloud provider default region + publish set image permissions and publish to cloud regions + +optional arguments: + -h, --help show this help message and exit + --debug enable debug output + --clean start with a clean work environment + --custom DIR [DIR ...] overlay custom directory in work environment + --skip KEY [KEY ...] skip variants with dimension key(s) + --only KEY [KEY ...] only variants with dimension key(s) + --revise remove existing local/imported image, or bump + revision and rebuild if published + --use-broker use the identity broker to get credentials + --no-color turn off Packer color output + --parallel N build N images in parallel (default: 1) + --vars FILE [FILE ...] supply Packer with -vars-file(s) +``` + +The `build` script will automatically create a `work/` directory containing a +Python virtual environment if one does not already exist. This directory also +hosts other data related to building images. The `--clean` argument will +remove everything in the `work/` directory except for things related to the +Python virtual environment. + +If `work/configs/` or `work/scripts/` directories do not yet exist, they will +be populated with the base configuration and scripts from `configs/` and/or +`scripts/` directories. If any custom overlay directories are specified with +the `--custom` argument, their `configs/` and `scripts/` subdirectories are +also added to `work/configs/` and `work/scripts/`. + +The "build step" positional argument deterimines the last step the `build` +script should execute -- all steps before this targeted step may also be +executed. That is, `build local` will first execute the `configs` step (if +necessary) and then the `state` step (always) before proceeding to the `local` +step. + +The `configs` step resolves configuration for all buildable images, and writes +it to `work/images.yaml`, if it does not already exist. + +The `state` step always checks the current state of the image builds, +determines what actions need to be taken, and updates `work/images.yaml`. A +subset of image builds can be targeted by using the `--skip` and `--only` +arguments. The `--revise` argument indicates that any _unpublished_ local +or imported images should be removed and rebuilt; as _published_ images can't +be removed, `--revise` instead increments the _`revision`_ value to rebuild +new images. + +`local`, `import`, and `publish` steps are orchestrated by Packer. By default, +each image will be processed serially; providing the `--parallel` argument with +a value greater than 1 will parallelize operations. The degree to which you +can parallelze `local` image builds will depend on the local build hardware -- +as QEMU virtual machines are launched for each image being built. Image +`import` and `publish` steps are much more lightweight, and can support higher +parallelism. + +The `local` step builds local images with QEMU, for those that are not already +built locally or have already been imported. + +The `import` step imports the local images into the cloud providers' default +regions, unless they've already been imported. At this point the images are +not available publicly, allowing for additional testing prior to publishing. + +The `publish` step copies the image from the default region to other regions, +if they haven't already been copied there. This step will always update +image permissions, descriptions, tags, and deprecation date (if applicable) +in all regions where the image has been published. + +### The `cloud_helper.py` Script + +This script is meant to be called only by Packer from its `post-processor` +block for image `import` and `publish` steps. + +---- +## Build Configuration + +For more in-depth information about how the build system configuration works, +how to create custom config overlays, and details about individual config +settings, see [CONFIGURATION.md](CONFIGURATION.md). diff --git a/alpine-cloud-images/alpine.pkr.hcl b/alpine-cloud-images/alpine.pkr.hcl new file mode 100644 index 0000000..5cd4c99 --- /dev/null +++ b/alpine-cloud-images/alpine.pkr.hcl @@ -0,0 +1,195 @@ +# Alpine Cloud Images Packer Configuration + +### Variables + +# include debug output from provisioning/post-processing scripts +variable "DEBUG" { + default = 0 +} +# indicates cloud_helper.py should be run with --use-broker +variable "USE_BROKER" { + default = 0 +} + +# tuneable QEMU VM parameters, based on perfomance of the local machine; +# overrideable via build script --vars parameter referencing a Packer +# ".vars.hcl" file containing alternate settings +variable "qemu" { + default = { + boot_wait = { + aarch64 = "1m" + x86_64 = "1m" + } + cmd_wait = "5s" + ssh_timeout = "1m" + memory = 1024 # MiB + } +} + +### Local Data + +locals { + debug_arg = var.DEBUG == 0 ? "" : "--debug" + broker_arg = var.USE_BROKER == 0 ? "" : "--use-broker" + + # randomly generated password + password = uuidv4() + + # resolve actionable build configs + configs = { for b, cfg in yamldecode(file("work/images.yaml")): + b => cfg if contains(keys(cfg), "actions") + } +} + +### Build Sources + +# Don't build +source null alpine { + communicator = "none" +} + +# Common to all QEMU builds +source qemu alpine { + # qemu machine + headless = true + memory = var.qemu.memory + net_device = "virtio-net" + disk_interface = "virtio" + + # build environment + boot_command = [ + "root", + "setup-interfaces", + "ifup eth0", + "setup-sshd -c openssh", + "echo PermitRootLogin yes >> /etc/ssh/sshd_config", + "service sshd restart", + "echo 'root:${local.password}' | chpasswd", + ] + ssh_username = "root" + ssh_password = local.password + ssh_timeout = var.qemu.ssh_timeout + shutdown_command = "poweroff" +} + +build { + name = "alpine" + + ## Builders + + # QEMU builder + dynamic "source" { + for_each = { for b, c in local.configs: + b => c if contains(c.actions, "build") && c.builder == "qemu" + } + iterator = B + labels = ["qemu.alpine"] # links us to the base source + + content { + name = B.key + + # qemu machine + qemu_binary = "qemu-system-${B.value.arch}" + qemuargs = B.value.qemu.args + machine_type = B.value.qemu.machine_type + firmware = B.value.qemu.firmware + + # build environment + iso_url = B.value.qemu.iso_url + iso_checksum = "file:${B.value.qemu.iso_url}.sha512" + boot_wait = var.qemu.boot_wait[B.value.arch] + + # results + output_directory = "work/images/${B.value.cloud}/${B.value.image_key}" + disk_size = B.value.size + format = B.value.local_format + vm_name = "image.${B.value.local_format}" + } + } + + # Null builder (don't build, but we might import and/or publish) + dynamic "source" { + for_each = { for b, c in local.configs: + b => c if !contains(c.actions, "build") + } + iterator = B + labels = ["null.alpine"] + content { + name = B.key + } + } + + ## build provisioners + + # install setup files + dynamic "provisioner" { + for_each = { for b, c in local.configs: + b => c if contains(c.actions, "build") + } + iterator = B + labels = ["file"] + content { + only = [ "${B.value.builder}.${B.key}" ] # configs specific to one build + + sources = [ for d in B.value.script_dirs: "work/scripts/${d}" ] + destination = "/tmp/" + } + } + + # run setup scripts + dynamic "provisioner" { + for_each = { for b, c in local.configs: + b => c if contains(c.actions, "build") + } + iterator = B + labels = ["shell"] + content { + only = [ "${B.value.builder}.${B.key}" ] # configs specific to one build + + scripts = [ for s in B.value.scripts: "work/scripts/${s}" ] + use_env_var_file = true + environment_vars = [ + "DEBUG=${var.DEBUG}", + "ARCH=${B.value.arch}", + "BOOTLOADER=${B.value.bootloader}", + "BOOTSTRAP=${B.value.bootstrap}", + "BUILD_NAME=${B.value.name}", + "BUILD_REVISION=${B.value.revision}", + "CLOUD=${B.value.cloud}", + "END_OF_LIFE=${B.value.end_of_life}", + "FIRMWARE=${B.value.firmware}", + "IMAGE_LOGIN=${B.value.login}", + "INITFS_FEATURES=${B.value.initfs_features}", + "KERNEL_MODULES=${B.value.kernel_modules}", + "KERNEL_OPTIONS=${B.value.kernel_options}", + "MOTD=${B.value.motd}", + "NTP_SERVER=${B.value.ntp_server}", + "PACKAGES_ADD=${B.value.packages.add}", + "PACKAGES_DEL=${B.value.packages.del}", + "PACKAGES_NOSCRIPTS=${B.value.packages.noscripts}", + "RELEASE=${B.value.release}", + "REPOS=${B.value.repos}", + "SERVICES_ENABLE=${B.value.services.enable}", + "SERVICES_DISABLE=${B.value.services.disable}", + "VERSION=${B.value.version}", + ] + } + } + + ## build post-processor + + # import and/or publish cloud images + dynamic "post-processor" { + for_each = { for b, c in local.configs: + b => c if contains(c.actions, "import") || contains(c.actions, "publish") + } + iterator = B + labels = ["shell-local"] + content { + only = [ "${B.value.builder}.${B.key}", "null.${B.key}" ] + inline = [ for action in ["import", "publish"]: + "./cloud_helper.py ${action} ${local.debug_arg} ${local.broker_arg} ${B.key}" if contains(B.value.actions, action) + ] + } + } +} diff --git a/alpine-cloud-images/alpine.py b/alpine-cloud-images/alpine.py new file mode 100644 index 0000000..8fa4a6a --- /dev/null +++ b/alpine-cloud-images/alpine.py @@ -0,0 +1,104 @@ +# vim: ts=4 et: + +import json +import re +from datetime import datetime, timedelta +from urllib.request import urlopen + + +class Alpine(): + + DEFAULT_RELEASES_URL = 'https://alpinelinux.org/releases.json' + DEFAULT_CDN_URL = 'https://dl-cdn.alpinelinux.org/alpine' + DEFAULT_WEB_TIMEOUT = 5 + + def __init__(self, releases_url=None, cdn_url=None, web_timeout=None): + self.now = datetime.utcnow() + self.release_today = self.now.strftime('%Y%m%d') + self.eol_tomorrow = (self.now + timedelta(days=1)).strftime('%F') + self.latest = None + self.versions = {} + self.releases_url = releases_url or self.DEFAULT_RELEASES_URL + self.web_timeout = web_timeout or self.DEFAULT_WEB_TIMEOUT + self.cdn_url = cdn_url or self.DEFAULT_CDN_URL + + # get all Alpine versions, and their EOL and latest release + res = urlopen(self.releases_url, timeout=self.web_timeout) + r = json.load(res) + branches = sorted( + r['release_branches'], reverse=True, + key=lambda x: x.get('branch_date', '0000-00-00') + ) + for b in branches: + ver = b['rel_branch'].lstrip('v') + if not self.latest: + self.latest = ver + + rel = None + if releases := b.get('releases', None): + rel = sorted( + releases, reverse=True, key=lambda x: x['date'] + )[0]['version'] + elif ver == 'edge': + # edge "releases" is today's YYYYMMDD + rel = self.release_today + + self.versions[ver] = { + 'version': ver, + 'release': rel, + 'end_of_life': b.get('eol_date', self.eol_tomorrow), + 'arches': b.get('arches'), + } + + def _ver(self, ver=None): + if not ver or ver == 'latest' or ver == 'latest-stable': + ver = self.latest + + return ver + + def repo_url(self, repo, arch, ver=None): + ver = self._ver(ver) + if ver != 'edge': + ver = 'v' + ver + + return f"{self.cdn_url}/{ver}/{repo}/{arch}" + + def virt_iso_url(self, arch, ver=None): + ver = self._ver(ver) + rel = self.versions[ver]['release'] + return f"{self.cdn_url}/v{ver}/releases/{arch}/alpine-virt-{rel}-{arch}.iso" + + def version_info(self, ver=None): + ver = self._ver(ver) + if ver not in self.versions: + # perhaps a release candidate? + apk_ver = self.apk_version('main', 'x86_64', 'alpine-base', ver=ver) + rel = apk_ver.split('-')[0] + ver = '.'.join(rel.split('.')[:2]) + self.versions[ver] = { + 'version': ver, + 'release': rel, + 'end_of_life': self.eol_tomorrow, + 'arches': self.versions['edge']['arches'], # reasonable assumption + } + + return self.versions[ver] + + # TODO? maybe implement apk_info() to read from APKINDEX, but for now + # this apk_version() seems faster and gets what we need + + def apk_version(self, repo, arch, apk, ver=None): + ver = self._ver(ver) + repo_url = self.repo_url(repo, arch, ver=ver) + apks_re = re.compile(f'"{apk}-(\\d.*)\\.apk"') + res = urlopen(repo_url, timeout=self.web_timeout) + for line in map(lambda x: x.decode('utf8'), res): + if not line.startswith('.* and its objects + def import_image(self, ic): + log = logging.getLogger('import') + image_path = ic.local_path + image_aws = ic.local_dir / 'image.vhd' + name = ic.image_name + description = ic.image_description + + # convert QCOW2 to VHD + log.info('Converting %s to VHD format', image_path) + p = Popen(self.CONVERT_CMD + (image_path, image_aws), stdout=PIPE, stdin=PIPE, encoding='utf8') + out, err = p.communicate() + if p.returncode: + log.error('Unable to convert %s to VHD format (%s)', image_path, p.returncode) + log.error('EXIT: %d', p.returncode) + log.error('STDOUT:\n%s', out) + log.error('STDERR:\n%s', err) + raise RuntimeError + + session = self.session() + s3r = session.resource('s3') + ec2c = session.client('ec2') + ec2r = session.resource('ec2') + + bucket_name = 'alpine-cloud-images.' + hashlib.sha1(os.urandom(40)).hexdigest() + s3_key = name + '.vhd' + + bucket = s3r.Bucket(bucket_name) + log.info('Creating S3 bucket %s', bucket.name) + bucket.create( + CreateBucketConfiguration={'LocationConstraint': ec2c.meta.region_name} + ) + bucket.wait_until_exists() + s3_url = f"s3://{bucket.name}/{s3_key}" + + try: + log.info('Uploading %s to %s', image_aws, s3_url) + bucket.upload_file(str(image_aws), s3_key) + + # import snapshot from S3 + log.info('Importing EC2 snapshot from %s', s3_url) + ss_import = ec2c.import_snapshot( + DiskContainer={ + 'Description': description, # https://github.com/boto/boto3/issues/2286 + 'Format': 'VHD', + 'Url': s3_url + } + # NOTE: TagSpecifications -- doesn't work with ResourceType: snapshot? + ) + ss_task_id = ss_import['ImportTaskId'] + while True: + ss_task = ec2c.describe_import_snapshot_tasks( + ImportTaskIds=[ss_task_id] + ) + task_detail = ss_task['ImportSnapshotTasks'][0]['SnapshotTaskDetail'] + if task_detail['Status'] not in ['pending', 'active', 'completed']: + msg = f"Bad EC2 snapshot import: {task_detail['Status']} - {task_detail['StatusMessage']}" + log.error(msg) + raise RuntimeError(msg) + + if task_detail['Status'] == 'completed': + snapshot_id = task_detail['SnapshotId'] + break + + time.sleep(15) + except Exception: + log.error('Unable to import snapshot from S3:', exc_info=True) + raise + finally: + # always cleanup S3, even if there was an exception raised + log.info('Cleaning up %s', s3_url) + bucket.Object(s3_key).delete() + bucket.delete() + + # tag snapshot + snapshot = ec2r.Snapshot(snapshot_id) + try: + log.info('Tagging EC2 snapshot %s', snapshot_id) + tags = ic.tags + tags.Name = tags.name # because AWS is special + snapshot.create_tags(Tags=tags.as_list()) + except Exception: + log.error('Unable to tag snapshot:', exc_info=True) + log.info('Removing snapshot') + snapshot.delete() + raise + + # register AMI + try: + log.info('Registering EC2 AMI from snapshot %s', snapshot_id) + img = ec2c.register_image( + Architecture=self.ARCH[ic.arch], + BlockDeviceMappings=[{ + 'DeviceName': '/dev/xvda', + 'Ebs': {'SnapshotId': snapshot_id} + }], + Description=description, + EnaSupport=True, + Name=ic.image_name, + RootDeviceName='/dev/xvda', + SriovNetSupport='simple', + VirtualizationType='hvm', + BootMode=self.BOOT_MODE[ic.firmware], + ) + except Exception: + log.error('Unable to register image:', exc_info=True) + log.info('Removing snapshot') + snapshot.delete() + raise + + image_id = img['ImageId'] + image = ec2r.Image(image_id) + + try: + # tag image (adds imported tag) + log.info('Tagging EC2 AMI %s', image_id) + tags.imported = datetime.utcnow().isoformat() + tags.import_id = image_id + tags.import_region = ec2c.meta.region_name + image.create_tags(Tags=tags.as_list()) + except Exception: + log.error('Unable to tag image:', exc_info=True) + log.info('Removing image and snapshot') + image.delete() + snapshot.delete() + raise + + return self._image_info(image) + + # remove an (unpublished) image + def remove_image(self, image_id): + log = logging.getLogger('build') + ec2r = self.session().resource('ec2') + image = ec2r.Image(image_id) + snapshot_id = image.block_device_mappings[0]['Ebs']['SnapshotId'] + snapshot = ec2r.Snapshot(snapshot_id) + log.info('Deregistering %s', image_id) + image.deregister() + log.info('Deleting %s', snapshot_id) + snapshot.delete() + + # publish an image + def publish_image(self, ic): + log = logging.getLogger('publish') + source_image = self.latest_build_image( + ic.project, + ic.image_key, + ) + if not source_image: + log.error('No source image for %s', ic.image_key) + raise RuntimeError('Missing source imamge') + + source_id = source_image.import_id + source_region = source_image.import_region + log.info('Publishing source: %s/%s', source_region, source_id) + source = self.session().resource('ec2').Image(source_id) + + # we may be updating tags, get them from image config + tags = ic.tags + + # sort out published image access permissions + perms = {'groups': [], 'users': []} + if ic.access.get('PUBLIC', None): + perms['groups'] = ['all'] + else: + for k, v in ic.access.items(): + if v: + log.debug('users: %s', k) + perms['users'].append(str(k)) + + log.debug('perms: %s', perms) + + # resolve destination regions + regions = self.regions + if ic.regions.pop('ALL', None): + log.info('Publishing to ALL available regions') + else: + # clear ALL out of the way if it's still there + ic.regions.pop('ALL', None) + regions = {r: regions[r] for r in ic.regions} + + publishing = {} + for r in regions.keys(): + if not regions[r]: + log.warning('Skipping unsubscribed AWS region %s', r) + continue + + images = self._get_images_with_tags( + region=r, + project=ic.project, + image_key=ic.image_key, + tags={'revision': ic.revision} + ) + if images: + image = images[0] + log.info('%s: Already exists as %s', r, image.id) + else: + ec2c = self.session(r).client('ec2') + try: + res = ec2c.copy_image( + Description=source.description, + Name=source.name, + SourceImageId=source_id, + SourceRegion=source_region, + ) + except Exception: + log.warning('Skipping %s, unable to copy image:', r, exc_info=True) + continue + + image_id = res['ImageId'] + log.info('%s: Publishing to %s', r, image_id) + image = self.session(r).resource('ec2').Image(image_id) + + publishing[r] = image + + artifacts = {} + copy_wait = 180 + while len(artifacts) < len(publishing): + for r, image in publishing.items(): + if r not in artifacts: + image.reload() + if image.state == 'available': + # tag image + log.info('%s: Adding tags to %s', r, image.id) + image_tags = Tags(from_list=image.tags) + fresh = False + if 'published' not in image_tags: + fresh = True + + if fresh: + tags.published = datetime.utcnow().isoformat() + + image.create_tags(Tags=tags.as_list()) + + # tag image's snapshot, too + snapshot = self.session(r).resource('ec2').Snapshot( + image.block_device_mappings[0]['Ebs']['SnapshotId'] + ) + snapshot.create_tags(Tags=tags.as_list()) + + # update image description to match description in tags + log.info('%s: Updating description to %s', r, tags.description) + image.modify_attribute( + Description={'Value': tags.description}, + ) + + # apply launch perms + log.info('%s: Applying launch perms to %s', r, image.id) + image.reset_attribute(Attribute='launchPermission') + image.modify_attribute( + Attribute='launchPermission', + OperationType='add', + UserGroups=perms['groups'], + UserIds=perms['users'], + ) + + # set up AMI deprecation + ec2c = image.meta.client + log.info('%s: Setting EOL deprecation time on %s', r, image.id) + ec2c.enable_image_deprecation( + ImageId=image.id, + DeprecateAt=f"{tags.end_of_life}T23:59:59Z" + ) + + artifacts[r] = image.id + + if image.state == 'failed': + log.error('%s: %s - %s - %s', r, image.id, image.state, image.state_reason) + artifacts[r] = None + + remaining = len(publishing) - len(artifacts) + if remaining > 0: + log.info('Waiting %ds for %d images to complete', copy_wait, remaining) + time.sleep(copy_wait) + copy_wait = 30 + + return artifacts + + +def register(cloud, cred_provider=None): + return AWSCloudAdapter(cloud, cred_provider) diff --git a/alpine-cloud-images/clouds/identity_broker_client.py b/alpine-cloud-images/clouds/identity_broker_client.py new file mode 100644 index 0000000..0465d82 --- /dev/null +++ b/alpine-cloud-images/clouds/identity_broker_client.py @@ -0,0 +1,135 @@ +# vim: ts=4 et: + +import json +import logging +import os +import sys +import time +import urllib.error + +from datetime import datetime +from email.utils import parsedate +from urllib.request import Request, urlopen + + +class IdentityBrokerClient: + """Client for identity broker + + Export IDENTITY_BROKER_ENDPOINT to override the default broker endpoint. + Export IDENTITY_BROKER_API_KEY to specify an API key for the broker. + + See README_BROKER.md for more information and a spec. + """ + + _DEFAULT_ENDPOINT = 'https://aws-access.crute.us/api/account' + _DEFAULT_ACCOUNT = 'alpine-amis-user' + _LOGFORMAT = '%(name)s - %(levelname)s - %(message)s' + + def __init__(self, endpoint=None, key=None, account=None, debug=False): + # log to STDOUT so that it's not all red when executed by Packer + self._logger = logging.getLogger('identity-broker') + self._logger.setLevel(logging.DEBUG if debug else logging.INFO) + console = logging.StreamHandler(sys.stdout) + console.setFormatter(logging.Formatter(self._LOGFORMAT)) + self._logger.addHandler(console) + + self._endpoint = os.environ.get('IDENTITY_BROKER_ENDPOINT') or endpoint \ + or self._DEFAULT_ENDPOINT + self._key = os.environ.get('IDENTITY_BROKER_API_KEY') or key + self._account = account or self._DEFAULT_ACCOUNT + if not self._key: + raise Exception('No identity broker key found') + + self._headers = { + 'Accept': 'application/vnd.broker.v2+json', + 'X-API-Key': self._key + } + self._cache = {} + self._expires = {} + self._default_region = {} + + def _is_cache_valid(self, path): + if path not in self._cache: + return False + + # path is subject to expiry AND its time has passed + if self._expires[path] and self._expires[path] < datetime.utcnow(): + return False + + return True + + def _get(self, path): + self._logger.debug("request: %s", path) + if not self._is_cache_valid(path): + while True: # to handle rate limits + try: + res = urlopen(Request(path, headers=self._headers)) + except urllib.error.HTTPError as ex: + if ex.status == 401: + raise Exception('Expired or invalid identity broker token') + + if ex.status == 406: + raise Exception('Invalid or malformed identity broker token') + + # TODO: will this be entirely handled by the 401 above? + if ex.headers.get('Location') == '/logout': + raise Exception('Identity broker token is expired') + + if ex.status == 429: + self._logger.warning( + 'Rate-limited by identity broker, sleeping 30 seconds') + time.sleep(30) + continue + + raise ex + + if res.status not in {200, 429}: + raise Exception(res.reason) + + # never expires without valid RFC 1123 Expires header + if expires := res.getheader('Expires'): + expires = parsedate(expires) + # convert RFC 1123 to datetime, if parsed successfully + expires = datetime(*expires[:6]) + + self._expires[path] = expires + self._cache[path] = json.load(res) + break + + self._logger.debug("response: %s", self._cache[path]) + return self._cache[path] + + def get_credentials_url(self, vendor): + accounts = self._get(self._endpoint) + if vendor not in accounts: + raise Exception(f'No {vendor} credentials found') + + for account in accounts[vendor]: + if account['short_name'] == self._account: + return account['credentials_url'] + + raise Exception('No account credentials found') + + def get_regions(self, vendor): + out = {} + + for region in self._get(self.get_credentials_url(vendor)): + if region['enabled']: + out[region['name']] = region['credentials_url'] + + if region['default']: + self._default_region[vendor] = region['name'] + + return out + + def get_default_region(self, vendor): + if vendor not in self._default_region: + self.get_regions(vendor) + + return self._default_region.get(vendor) + + def get_credentials(self, vendor, region=None): + if not region: + region = self.get_default_region(vendor) + + return self._get(self.get_regions(vendor)[region]) diff --git a/alpine-cloud-images/clouds/interfaces/__init__.py b/alpine-cloud-images/clouds/interfaces/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/alpine-cloud-images/clouds/interfaces/adapter.py b/alpine-cloud-images/clouds/interfaces/adapter.py new file mode 100644 index 0000000..77618b2 --- /dev/null +++ b/alpine-cloud-images/clouds/interfaces/adapter.py @@ -0,0 +1,40 @@ +# vim: ts=4 et: + +class CloudAdapterInterface: + + def __init__(self, cloud, cred_provider=None): + self._sdk = None + self._sessions = {} + self.cloud = cloud + self.cred_provider = cred_provider + self._default_region = None + + @property + def sdk(self): + raise NotImplementedError + + @property + def regions(self): + raise NotImplementedError + + @property + def default_region(self): + raise NotImplementedError + + def credentials(self, region=None): + raise NotImplementedError + + def session(self, region=None): + raise NotImplementedError + + def latest_build_image(self, project, image_key): + raise NotImplementedError + + def import_image(self, config): + raise NotImplementedError + + def remove_image(self, config, image_id): + raise NotImplementedError + + def publish_image(self, config): + raise NotImplementedError diff --git a/alpine-cloud-images/configs/alpine.conf b/alpine-cloud-images/configs/alpine.conf new file mode 100644 index 0000000..81606c6 --- /dev/null +++ b/alpine-cloud-images/configs/alpine.conf @@ -0,0 +1,82 @@ +# vim: ts=2 et: + +# NOTE: If you are using alpine-cloud-images to build public cloud images +# for something/someone other than Alpine Linux, you *MUST* override +# *AT LEAST* the 'project' setting with a unique identifier string value +# via a "config overlay" to avoid image import and publishing collisions. + +project = "https://alpinelinux.org/cloud" + +# all build configs start with these +Default { + project = ${project} + + # image name/description components + name = [ alpine ] + description = [ Alpine Linux ] + + motd { + welcome = "Welcome to Alpine!" + + wiki = "The Alpine Wiki contains a large amount of how-to guides and general\n"\ + "information about administrating Alpine systems.\n"\ + "See ." + + version_notes = "Release Notes:\n"\ + "* " + release_notes = "* dimension_key + dim_map = dict(zip(dimensions, dim_keys)) + + # replace version with release, and make image_key from that + release = cfg.Dimensions.version[dim_map['version']].release + (rel_map := dim_map.copy())['version'] = release + image_key = '-'.join(rel_map.values()) + + image_config = ImageConfig( + config_key, + { + 'image_key': image_key, + 'release': release + } | dim_map + ) + + # merge in the Default config + image_config._merge(cfg.Default) + skip = False + # merge in each dimension key's configs + for dim, dim_key in dim_map.items(): + dim_cfg = deepcopy(cfg.Dimensions[dim][dim_key]) + + exclude = dim_cfg.pop('EXCLUDE', None) + if exclude and set(exclude) & set(dim_keys): + self.log.debug('%s SKIPPED, %s excludes %s', config_key, dim_key, exclude) + skip = True + break + + if eol := dim_cfg.get('end_of_life', None): + if self.now > datetime.fromisoformat(eol): + self.log.warning('%s SKIPPED, %s end_of_life %s', config_key, dim_key, eol) + skip = True + break + + image_config._merge(dim_cfg) + + # now that we're done with ConfigTree/dim_cfg, remove " from dim_keys + dim_keys = set(k.replace('"', '') for k in dim_keys) + + # WHEN blocks inside WHEN blocks are considered "and" operations + while (when := image_config._pop('WHEN', None)): + for when_keys, when_conf in when.items(): + # WHEN keys with spaces are considered "or" operations + if len(set(when_keys.split(' ')) & dim_keys) > 0: + image_config._merge(when_conf) + + if skip is True: + continue + + # merge in the Mandatory configs at the end + image_config._merge(cfg.Mandatory) + + # clean stuff up + image_config._normalize() + image_config.qemu['iso_url'] = self.alpine.virt_iso_url(arch=image_config.arch) + + # we've resolved everything, add tags attribute to config + self._configs[config_key] = image_config + + self._save_yaml() + + # set current version release + def _set_version_release(self, v, c): + info = self.alpine.version_info(v) + c.put('release', info['release']) + c.put('end_of_life', info['end_of_life']) + + # release is also appended to name & description arrays + c.put('name', [c.release]) + c.put('description', [c.release]) + + # update current config status + def refresh_state(self, step, only=[], skip=[], revise=False): + self.log.info('Refreshing State') + has_actions = False + for ic in self._configs.values(): + # clear away any previous actions + if hasattr(ic, 'actions'): + delattr(ic, 'actions') + + dim_keys = set(ic.config_key.split('-')) + if only and len(set(only) & dim_keys) != len(only): + self.log.debug("%s SKIPPED, doesn't match --only", ic.config_key) + continue + + if skip and len(set(skip) & dim_keys) > 0: + self.log.debug('%s SKIPPED, matches --skip', ic.config_key) + continue + + ic.refresh_state(step, revise) + if not has_actions and len(ic.actions): + has_actions = True + + # re-save with updated actions + self._save_yaml() + return has_actions + + +class ImageConfig(): + + def __init__(self, config_key, obj={}): + self.config_key = str(config_key) + tags = obj.pop('tags', None) + self.__dict__ |= self._deep_dict(obj) + # ensure tag values are str() when loading + if tags: + self.tags = tags + + @property + def local_dir(self): + return Path('work/images') / self.cloud / self.image_key + + @property + def local_path(self): + return self.local_dir / ('image.' + self.local_format) + + @property + def published_yaml(self): + return self.local_dir / 'published.yaml' + + @property + def image_name(self): + return self.name.format(**self.__dict__) + + @property + def image_description(self): + return self.description.format(**self.__dict__) + + @property + def tags(self): + # stuff that really ought to be there + t = { + 'arch': self.arch, + 'bootstrap': self.bootstrap, + 'cloud': self.cloud, + 'description': self.image_description, + 'end_of_life': self.end_of_life, + 'firmware': self.firmware, + 'image_key': self.image_key, + 'name': self.image_name, + 'project': self.project, + 'release': self.release, + 'revision': self.revision, + 'version': self.version + } + # stuff that might not be there yet + for k in ['imported', 'import_id', 'import_region', 'published']: + if self.__dict__.get(k, None): + t[k] = self.__dict__[k] + return Tags(t) + + # recursively convert a ConfigTree object to a dict object + def _deep_dict(self, layer): + obj = deepcopy(layer) + if isinstance(layer, pyhocon.ConfigTree): + obj = dict(obj) + + try: + for key, value in layer.items(): + # some HOCON keys are quoted to preserve dots + if '"' in key: + obj.pop(key) + key = key.strip('"') + + # version values were HOCON keys at one point, too + if key == 'version' and '"' in value: + value = value.strip('"') + + obj[key] = self._deep_dict(value) + except AttributeError: + pass + + return obj + + def _merge(self, obj={}): + mergedeep.merge(self.__dict__, self._deep_dict(obj), strategy=mergedeep.Strategy.ADDITIVE) + + def _pop(self, attr, default=None): + return self.__dict__.pop(attr, default) + + # make data ready for Packer ingestion + def _normalize(self): + # stringify arrays + self.name = '-'.join(self.name) + self.description = ' '.join(self.description) + self._resolve_motd() + self._stringify_repos() + self._stringify_packages() + self._stringify_services() + self._stringify_dict_keys('kernel_modules', ',') + self._stringify_dict_keys('kernel_options', ' ') + self._stringify_dict_keys('initfs_features', ' ') + + def _resolve_motd(self): + # merge version/release notes, as apporpriate + if self.motd.get('version_notes', None) and self.motd.get('release_notes', None): + if self.version == 'edge': + # edge is, by definition, not released + self.motd.pop('version_notes', None) + self.motd.pop('release_notes', None) + + elif self.release == self.version + '.0': + # no point in showing the same URL twice + self.motd.pop('release_notes') + + else: + # combine version and release notes + self.motd['release_notes'] = self.motd.pop('version_notes') + '\n' + \ + self.motd['release_notes'] + + # TODO: be rid of null values + self.motd = '\n\n'.join(self.motd.values()).format(**self.__dict__) + + def _stringify_repos(self): + # stringify repos map + # : # @ enabled + # : false # disabled (commented out) + # : true # enabled + # : null # skip entirely + # ...and interpolate {version} + self.repos = "\n".join(filter(None, ( + f"@{v} {r}" if isinstance(v, str) else + f"#{r}" if v is False else + r if v is True else None + for r, v in self.repos.items() + ))).format(version=self.version) + + def _stringify_packages(self): + # resolve/stringify packages map + # : true # add + # : # add @ + # : --no-scripts # add --no-scripts + # : --no-scripts # add --no-scripts @ + # : false # del + # : null # skip explicit add/del + pkgs = {'add': '', 'del': '', 'noscripts': ''} + for p, v in self.packages.items(): + k = 'add' + if isinstance(v, str): + if '--no-scripts' in v: + k = 'noscripts' + v = v.replace('--no-scripts', '') + v = v.strip() + if len(v): + p += f"@{v}" + elif v is False: + k = 'del' + elif v is None: + continue + + pkgs[k] = p if len(pkgs[k]) == 0 else pkgs[k] + ' ' + p + + self.packages = pkgs + + def _stringify_services(self): + # stringify services map + # : + # : true # enable at + # : false # disable at + # : null # skip explicit en/disable at + self.services = { + 'enable': ' '.join(filter(lambda x: not x.endswith('='), ( + '{}={}'.format(lvl, ','.join(filter(None, ( + s if v is True else None + for s, v in svcs.items() + )))) + for lvl, svcs in self.services.items() + ))), + 'disable': ' '.join(filter(lambda x: not x.endswith('='), ( + '{}={}'.format(lvl, ','.join(filter(None, ( + s if v is False else None + for s, v in svcs.items() + )))) + for lvl, svcs in self.services.items() + ))) + } + + def _stringify_dict_keys(self, d, sep): + self.__dict__[d] = sep.join(filter(None, ( + m if v is True else None + for m, v in self.__dict__[d].items() + ))) + + def refresh_state(self, step, revise=False): + log = logging.getLogger('build') + actions = {} + revision = 0 + remote_image = clouds.latest_build_image(self) + log.debug('\n%s', remote_image) + step_state = step == 'state' + + # enable actions based on the specified step + if step in ['local', 'import', 'publish', 'state']: + actions['build'] = True + + if step in ['import', 'publish', 'state']: + actions['import'] = True + + if step in ['publish', 'state']: + # we will resolve publish destinations (if any) later + actions['publish'] = True + + if revise: + if self.local_path.exists(): + # remove previously built local image artifacts + log.warning('%s existing local image dir %s', + 'Would remove' if step_state else 'Removing', + self.local_dir) + if not step_state: + shutil.rmtree(self.local_dir) + + if remote_image and remote_image.published: + log.warning('%s image revision for %s', + 'Would bump' if step_state else 'Bumping', + self.image_key) + revision = int(remote_image.revision) + 1 + + elif remote_image and remote_image.imported: + # remove existing imported (but unpublished) image + log.warning('%s unpublished remote image %s', + 'Would remove' if step_state else 'Removing', + remote_image.import_id) + if not step_state: + clouds.remove_image(self, remote_image.import_id) + + remote_image = None + + elif remote_image: + if remote_image.imported: + # already imported, don't build/import again + log.debug('%s - already imported', self.image_key) + actions.pop('build', None) + actions.pop('import', None) + + if remote_image.published: + # NOTE: re-publishing can update perms or push to new regions + log.debug('%s - already published', self.image_key) + + if self.local_path.exists(): + # local image's already built, don't rebuild + log.debug('%s - already locally built', self.image_key) + actions.pop('build', None) + + # merge remote_image data into image state + if remote_image: + self.__dict__ |= dict(remote_image) + + else: + self.__dict__ |= { + 'revision': revision, + 'imported': None, + 'import_id': None, + 'import_region': None, + 'published': None, + } + + # update artifacts, if we've got 'em + artifacts_yaml = self.local_dir / 'artifacts.yaml' + if artifacts_yaml.exists(): + yaml = YAML() + self.artifacts = yaml.load(artifacts_yaml) + else: + self.artifacts = None + + self.actions = list(actions) + log.info('%s/%s = %s', self.cloud, self.image_name, self.actions) + + self.state_updated = datetime.utcnow().isoformat() + + +class DictObj(dict): + + def __getattr__(self, key): + return self[key] + + def __setattr__(self, key, value): + self[key] = value + + def __delattr__(self, key): + del self[key] + + +class Tags(DictObj): + + def __init__(self, d={}, from_list=None, key_name='Key', value_name='Value'): + for key, value in d.items(): + self.__setattr__(key, value) + + if from_list: + self.from_list(from_list, key_name, value_name) + + def __setattr__(self, key, value): + self[key] = str(value) + + def as_list(self, key_name='Key', value_name='Value'): + return [{key_name: k, value_name: v} for k, v in self.items()] + + def from_list(self, list=[], key_name='Key', value_name='Value'): + for tag in list: + self.__setattr__(tag[key_name], tag[value_name]) diff --git a/alpine-cloud-images/overlays/testing/configs/alpine-testing.conf b/alpine-cloud-images/overlays/testing/configs/alpine-testing.conf new file mode 100644 index 0000000..e82e372 --- /dev/null +++ b/alpine-cloud-images/overlays/testing/configs/alpine-testing.conf @@ -0,0 +1,38 @@ +# vim: ts=2 et: + +# Overlay for testing alpine-cloud-images + +# start with the production alpine config +include required("alpine.conf") + +# override specific things... + +project = alpine-cloud-images__test + +Default { + # unset before resetting + name = null + name = [ test ] + description = null + description = [ Alpine Test ] +} + +Dimensions { + cloud { + # just test in these regions + aws.regions { + us-west-2 = true + us-east-1 = true + } + # adapters need to be written + #oci { include required("testing/oci.conf") } + #gcp { include required("testing/gcp.conf") } + #azure { include required("testing/azure.conf") } + #generic + #nocloud + } +} + +# test in private, and only in regions specified above +Mandatory.access.PUBLIC = false +Mandatory.regions.ALL = false \ No newline at end of file diff --git a/alpine-cloud-images/overlays/testing/configs/images.conf b/alpine-cloud-images/overlays/testing/configs/images.conf new file mode 120000 index 0000000..cc0f93d --- /dev/null +++ b/alpine-cloud-images/overlays/testing/configs/images.conf @@ -0,0 +1 @@ +alpine-testing.conf \ No newline at end of file diff --git a/alpine-cloud-images/overlays/testing/configs/testing/oci.conf b/alpine-cloud-images/overlays/testing/configs/testing/oci.conf new file mode 100644 index 0000000..0e80256 --- /dev/null +++ b/alpine-cloud-images/overlays/testing/configs/testing/oci.conf @@ -0,0 +1,4 @@ +# vim: ts=2 et: +builder = qemu + +# TBD \ No newline at end of file diff --git a/alpine-cloud-images/scripts/cleanup b/alpine-cloud-images/scripts/cleanup new file mode 100644 index 0000000..81e579b --- /dev/null +++ b/alpine-cloud-images/scripts/cleanup @@ -0,0 +1,42 @@ +#!/bin/sh -eu +# vim: ts=4 et: + +[ -z "$DEBUG" ] || [ "$DEBUG" = 0 ] || set -x + +export \ + TARGET=/mnt + + +die() { + printf '\033[1;7;31m FATAL: %s \033[0m\n' "$@" >&2 # bold reversed red + exit 1 +} +einfo() { + printf '\n\033[1;7;36m> %s <\033[0m\n' "$@" >&2 # bold reversed cyan +} + +cleanup() { + # Sweep cruft out of the image that doesn't need to ship or will be + # re-generated when the image boots + rm -f \ + "$TARGET/var/cache/apk/"* \ + "$TARGET/etc/resolv.conf" \ + "$TARGET/root/.ash_history" \ + "$TARGET/etc/"*- + + # unmount extra EFI mount + if [ "$FIRMWARE" = uefi ]; then + umount "$TARGET/boot/efi" + fi + + umount \ + "$TARGET/dev" \ + "$TARGET/proc" \ + "$TARGET/sys" + + umount "$TARGET" +} + +einfo "Cleaning up and unmounting image volume..." +cleanup +einfo "Done!" \ No newline at end of file diff --git a/alpine-cloud-images/scripts/setup b/alpine-cloud-images/scripts/setup new file mode 100755 index 0000000..3bd3726 --- /dev/null +++ b/alpine-cloud-images/scripts/setup @@ -0,0 +1,256 @@ +#!/bin/sh -eu +# vim: ts=4 et: + +[ -z "$DEBUG" ] || [ "$DEBUG" = 0 ] || set -x + +export \ + DEVICE=/dev/vda \ + TARGET=/mnt \ + SETUP=/tmp/setup.d + + +die() { + printf '\033[1;7;31m FATAL: %s \033[0m\n' "$@" >&2 # bold reversed red + exit 1 +} +einfo() { + printf '\n\033[1;7;36m> %s <\033[0m\n' "$@" >&2 # bold reversed cyan +} + +# set up the builder's environment +setup_builder() { + einfo "Setting up Builder Instance" + setup-apkrepos -1 # main repo via dl-cdn + # ODO? also uncomment community repo? + # Always use latest versions within the release, security patches etc. + apk upgrade -U --available --no-cache + apk --no-cache add \ + e2fsprogs \ + dosfstools \ + gettext \ + lsblk \ + parted +} + +make_filesystem() { + einfo "Making the Filesystem" + root_dev=$DEVICE + + # make sure we're using a blank block device + lsblk -P --fs "$DEVICE" >/dev/null 2>&1 || \ + die "'$DEVICE' is not a valid block device" + if lsblk -P --fs "$DEVICE" | grep -vq 'FSTYPE=""'; then + die "Block device '$DEVICE' is not blank" + fi + + if [ "$FIRMWARE" = uefi ]; then + # EFI partition isn't optimally aligned, but is rarely used after boot + parted "$DEVICE" -- \ + mklabel gpt \ + mkpart EFI fat32 512KiB 1MiB \ + mkpart / ext4 1MiB 100% \ + set 1 esp on \ + unit MiB print + + root_dev="${DEVICE}2" + mkfs.fat -n EFI "${DEVICE}1" + fi + + mkfs.ext4 -O ^64bit -L / "$root_dev" + mkdir -p "$TARGET" + mount -t ext4 "$root_dev" "$TARGET" + + if [ "$FIRMWARE" = uefi ]; then + mkdir -p "$TARGET/boot/efi" + mount -t vfat "${DEVICE}1" "$TARGET/boot/efi" + fi +} + +install_base() { + einfo "Installing Alpine Base" + mkdir -p "$TARGET/etc/apk" + echo "$REPOS" > "$TARGET/etc/apk/repositories" + cp -a /etc/apk/keys "$TARGET/etc/apk" + # shellcheck disable=SC2086 + apk --root "$TARGET" --initdb --no-cache add $PACKAGES_ADD + # shellcheck disable=SC2086 + [ -z "$PACKAGES_NOSCRIPTS" ] || \ + apk --root "$TARGET" --no-cache --no-scripts add $PACKAGES_NOSCRIPTS + # shellcheck disable=SC2086 + [ -z "$PACKAGES_DEL" ] || \ + apk --root "$TARGET" --no-cache del $PACKAGES_DEL +} + +setup_chroot() { + mount -t proc none "$TARGET/proc" + mount --bind /dev "$TARGET/dev" + mount --bind /sys "$TARGET/sys" + + # Needed for bootstrap, will be removed in the cleanup stage. + install -Dm644 /etc/resolv.conf "$TARGET/etc/resolv.conf" +} + +install_bootloader() { + einfo "Installing Bootloader" + + # create initfs + + # shellcheck disable=SC2046 + kernel=$(basename $(find "$TARGET/lib/modules/"* -maxdepth 0)) + + # ensure features can be found by mkinitfs + for FEATURE in $INITFS_FEATURES; do + # already taken care of? + [ -f "$TARGET/etc/mkinitfs/features.d/$FEATURE.modules" ] || \ + [ -f "$TARGET/etc/mkinitfs/features.d/$FEATURE.files" ] && continue + # find the kernel module directory + module=$(chroot "$TARGET" /sbin/modinfo -k "$kernel" -n "$FEATURE") + [ -z "$module" ] && die "initfs_feature '$FEATURE' kernel module not found" + # replace everything after .ko with a * + echo "$module" | cut -d/ -f5- | sed -e 's/\.ko.*/.ko*/' \ + > "$TARGET/etc/mkinitfs/features.d/$FEATURE.modules" + done + + # TODO? this appends INITFS_FEATURES, we may want to allow removal someday? + sed -Ei "s/^features=\"([^\"]+)\"/features=\"\1 $INITFS_FEATURES\"/" \ + "$TARGET/etc/mkinitfs/mkinitfs.conf" + + chroot "$TARGET" /sbin/mkinitfs "$kernel" + + if [ "$FIRMWARE" = uefi ]; then + install_grub_efi + else + install_extlinux + fi +} + +install_extlinux() { + # Use disk labels instead of UUID or devices paths so that this works across + # instance familes. UUID works for many instances but breaks on the NVME + # ones because EBS volumes are hidden behind NVME devices. + # + # Shorten timeout (1/10s), eliminating delays for instance launches. + # + # ttyS0 is for EC2 Console "Get system log" and "EC2 Serial Console" + # features, whereas tty0 is for "Get Instance screenshot" feature. Enabling + # the port early in extlinux gives the most complete output in the log. + # + # TODO: review for other clouds -- this may need to be cloud-specific. + sed -Ei -e "s|^[# ]*(root)=.*|\1=LABEL=/|" \ + -e "s|^[# ]*(default_kernel_opts)=.*|\1=\"$KERNEL_OPTIONS\"|" \ + -e "s|^[# ]*(serial_port)=.*|\1=ttyS0|" \ + -e "s|^[# ]*(modules)=.*|\1=$KERNEL_MODULES|" \ + -e "s|^[# ]*(default)=.*|\1=virt|" \ + -e "s|^[# ]*(timeout)=.*|\1=1|" \ + "$TARGET/etc/update-extlinux.conf" + + chroot "$TARGET" /sbin/extlinux --install /boot + # TODO: is this really necessary? can we set all this stuff during --install? + chroot "$TARGET" /sbin/update-extlinux --warn-only +} + +install_grub_efi() { + [ -d "/sys/firmware/efi" ] || die "/sys/firmware/efi does not exist" + + case "$ARCH" in + x86_64) grub_target=x86_64-efi ; fwa=x64 ;; + aarch64) grub_target=arm64-efi ; fwa=aa64 ;; + *) die "ARCH=$ARCH is currently unsupported" ;; + esac + + # disable nvram so grub doesn't call efibootmgr + chroot "$TARGET" /usr/sbin/grub-install --target="$grub_target" --efi-directory=/boot/efi \ + --bootloader-id=alpine --boot-directory=/boot --no-nvram + + # fallback mode + install -D "$TARGET/boot/efi/EFI/alpine/grub$fwa.efi" "$TARGET/boot/efi/EFI/boot/boot$fwa.efi" + + # install default grub config + envsubst < "$SETUP/grub.template" > "$SETUP/grub" + install -o root -g root -Dm644 -t "$TARGET/etc/default" \ + "$SETUP/grub" + + # generate/install new config + chroot "$TARGET" grub-mkconfig -o /boot/grub/grub.cfg +} + +configure_system() { + einfo "Configuring System" + + # default network configuration + install -o root -g root -Dm644 -t "$TARGET/etc/network" "$SETUP/interfaces" + + # configure NTP server, if specified + [ -n "$NTP_SERVER" ] && \ + sed -e 's/^pool /server /' -e "s/pool.ntp.org/$NTP_SERVER/g" \ + -i "$TARGET/etc/chrony/chrony.conf" + + # setup fstab + install -o root -g root -Dm644 -t "$TARGET/etc" "$SETUP/fstab" + # if we're using an EFI bootloader, add extra line for EFI partition + if [ "$FIRMWARE" = uefi ]; then + cat "$SETUP/fstab.grub-efi" >> "$TARGET/etc/fstab" + fi + + # Disable getty for physical ttys, enable getty for serial ttyS0. + sed -Ei -e '/^tty[0-9]/s/^/#/' -e '/^#ttyS0:/s/^#//' "$TARGET/etc/inittab" + + # setup sudo and/or doas + if grep -q '^sudo$' "$TARGET/etc/apk/world"; then + echo '%wheel ALL=(ALL) NOPASSWD: ALL' > "$TARGET/etc/sudoers.d/wheel" + fi + if grep -q '^doas$' "$TARGET/etc/apk/world"; then + echo 'permit nopass :wheel' > "$TARGET/etc/doas.d/wheel.conf" + fi + + # explicitly lock the root account + chroot "$TARGET" /bin/sh -c "/bin/echo 'root:*' | /usr/sbin/chpasswd -e" + chroot "$TARGET" /usr/bin/passwd -l root + + # set up image user + user="${IMAGE_LOGIN:-alpine}" + chroot "$TARGET" /usr/sbin/addgroup "$user" + chroot "$TARGET" /usr/sbin/adduser -h "/home/$user" -s /bin/sh -G "$user" -D "$user" + chroot "$TARGET" /usr/sbin/addgroup "$user" wheel + chroot "$TARGET" /bin/sh -c "echo '$user:*' | /usr/sbin/chpasswd -e" + + # modify PS1s in /etc/profile to add user + sed -Ei \ + -e "s/(^PS1=')(\\$\\{HOSTNAME%)/\\1\\$\\USER@\\2/" \ + -e "s/( PS1=')(\\\\h:)/\\1\\\\u@\\2/" \ + -e "s/( PS1=')(%m:)/\\1%n@\\2/" \ + "$TARGET"/etc/profile + + # write /etc/motd + echo "$MOTD" > "$TARGET"/etc/motd + + setup_services +} + +# shellcheck disable=SC2046 +setup_services() { + for lvl_svcs in $SERVICES_ENABLE; do + rc add $(echo "$lvl_svcs" | tr '=,' ' ') + done + for lvl_svcs in $SERVICES_DISABLE; do + rc del $(echo "$lvl_svcs" | tr '=,' ' ') + done +} + +rc() { + op="$1" # add or del + runlevel="$2" # runlevel name + shift 2 + services="$*" # names of services + + for svc in $services; do + chroot "$TARGET" rc-update "$op" "$svc" "$runlevel" + done +} + +setup_builder +make_filesystem +install_base +setup_chroot +install_bootloader +configure_system diff --git a/alpine-cloud-images/scripts/setup-cloudinit b/alpine-cloud-images/scripts/setup-cloudinit new file mode 100755 index 0000000..01658a1 --- /dev/null +++ b/alpine-cloud-images/scripts/setup-cloudinit @@ -0,0 +1,36 @@ +#!/bin/sh -eu +# vim: ts=4 et: + +[ -z "$DEBUG" ] || [ "$DEBUG" = 0 ] || set -x + +TARGET=/mnt + +einfo() { + printf '\n\033[1;7;36m> %s <\033[0m\n' "$@" >&2 # bold reversed cyan +} + +einfo "Installing up cloud-init bootstrap components..." + +# This adds the init scripts at the correct boot phases +chroot "$TARGET" /sbin/setup-cloud-init + +# cloud-init locks our user by default which means alpine can't login from +# SSH. This seems like a bug in cloud-init that should be fixed but we can +# hack around it for now here. +if [ -f "$TARGET"/etc/cloud/cloud.cfg ]; then + sed -i '/lock_passwd:/s/True/False/' "$TARGET"/etc/cloud/cloud.cfg +fi + +# configure the image for a particular cloud datasource +case "$CLOUD" in + aws) + DATASOURCE="Ec2" + ;; + *) + echo "Unsupported Cloud '$CLOUD'" >&2 + exit 1 + ;; +esac + +printf '\n\n# Cloud-Init will use default configuration for this DataSource\n' +printf 'datasource_list: ["%s"]\n' "$DATASOURCE" >> "$TARGET"/etc/cloud/cloud.cfg \ No newline at end of file diff --git a/alpine-cloud-images/scripts/setup-tiny b/alpine-cloud-images/scripts/setup-tiny new file mode 100755 index 0000000..7081842 --- /dev/null +++ b/alpine-cloud-images/scripts/setup-tiny @@ -0,0 +1,15 @@ +#!/bin/sh -eu +# vim: ts=4 et: + +[ -z "$DEBUG" ] || [ "$DEBUG" = 0 ] || set -x + +TARGET=/mnt + +einfo() { + printf '\n\033[1;7;36m> %s <\033[0m\n' "$@" >&2 # bold reversed cyan +} + +einfo "Configuring Tiny Cloud..." +sed -i.bak -Ee "s/^#?CLOUD_USER=.*/CLOUD_USER=$IMAGE_LOGIN/" \ + "$TARGET"/etc/conf.d/tiny-cloud +rm "$TARGET"/etc/conf.d/tiny-cloud.bak \ No newline at end of file diff --git a/alpine-cloud-images/scripts/setup.d/fstab b/alpine-cloud-images/scripts/setup.d/fstab new file mode 100644 index 0000000..3fb8864 --- /dev/null +++ b/alpine-cloud-images/scripts/setup.d/fstab @@ -0,0 +1,2 @@ +# +LABEL=/ / ext4 defaults,noatime 1 1 diff --git a/alpine-cloud-images/scripts/setup.d/fstab.grub-efi b/alpine-cloud-images/scripts/setup.d/fstab.grub-efi new file mode 100644 index 0000000..03d6e96 --- /dev/null +++ b/alpine-cloud-images/scripts/setup.d/fstab.grub-efi @@ -0,0 +1 @@ +LABEL=EFI /boot/efi vfat defaults,noatime,uid=0,gid=0,umask=077 0 0 diff --git a/alpine-cloud-images/scripts/setup.d/grub.template b/alpine-cloud-images/scripts/setup.d/grub.template new file mode 100644 index 0000000..b88f991 --- /dev/null +++ b/alpine-cloud-images/scripts/setup.d/grub.template @@ -0,0 +1,5 @@ +GRUB_CMDLINE_LINUX_DEFAULT="modules=$KERNEL_MODULES $KERNEL_OPTIONS" +GRUB_DISABLE_RECOVERY=true +GRUB_DISABLE_SUBMENU=y +GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1" +GRUB_TIMEOUT=0 diff --git a/alpine-cloud-images/scripts/setup.d/interfaces b/alpine-cloud-images/scripts/setup.d/interfaces new file mode 100644 index 0000000..864f3e3 --- /dev/null +++ b/alpine-cloud-images/scripts/setup.d/interfaces @@ -0,0 +1,7 @@ +# default alpine-cloud-images network configuration + +auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet dhcp