2021-11-23 06:09:18 +00:00
|
|
|
# NOTE: not meant to be executed directly
|
|
|
|
# vim: ts=4 et:
|
|
|
|
|
|
|
|
import logging
|
2021-11-28 23:04:28 +00:00
|
|
|
import hashlib
|
2021-11-23 06:09:18 +00:00
|
|
|
import os
|
|
|
|
import time
|
|
|
|
|
|
|
|
from datetime import datetime
|
|
|
|
from subprocess import Popen, PIPE, run
|
|
|
|
|
|
|
|
from .interfaces.adapter import CloudAdapterInterface
|
2021-11-28 23:04:28 +00:00
|
|
|
from image_configs import Tags, DictObj
|
2021-11-23 06:09:18 +00:00
|
|
|
|
|
|
|
|
|
|
|
class AWSCloudAdapter(CloudAdapterInterface):
|
2021-11-28 23:04:28 +00:00
|
|
|
IMAGE_INFO = [
|
|
|
|
'revision', 'imported', 'import_id', 'import_region', 'published',
|
|
|
|
]
|
2021-11-23 06:09:18 +00:00
|
|
|
CRED_MAP = {
|
|
|
|
'access_key': 'aws_access_key_id',
|
|
|
|
'secret_key': 'aws_secret_access_key',
|
|
|
|
'session_token': 'aws_session_token',
|
|
|
|
}
|
|
|
|
CONVERT_CMD = (
|
|
|
|
'qemu-img', 'convert', '-f', 'qcow2', '-O', 'vpc', '-o', 'force_size=on'
|
|
|
|
)
|
|
|
|
ARCH = {
|
|
|
|
'aarch64': 'arm64',
|
|
|
|
'x86_64': 'x86_64',
|
|
|
|
}
|
|
|
|
BOOT_MODE = {
|
|
|
|
'bios': 'legacy-bios',
|
|
|
|
'uefi': 'uefi',
|
|
|
|
}
|
|
|
|
|
|
|
|
@property
|
|
|
|
def sdk(self):
|
|
|
|
# delayed import/install of SDK until we want to use it
|
|
|
|
if not self._sdk:
|
|
|
|
try:
|
|
|
|
import boto3
|
|
|
|
except ModuleNotFoundError:
|
|
|
|
run(['work/bin/pip', 'install', '-U', 'boto3'])
|
|
|
|
import boto3
|
|
|
|
|
|
|
|
self._sdk = boto3
|
|
|
|
|
|
|
|
return self._sdk
|
|
|
|
|
|
|
|
def session(self, region=None):
|
|
|
|
if region not in self._sessions:
|
|
|
|
creds = {'region_name': region} | self.credentials(region)
|
|
|
|
self._sessions[region] = self.sdk.session.Session(**creds)
|
|
|
|
|
|
|
|
return self._sessions[region]
|
|
|
|
|
2021-11-28 23:04:28 +00:00
|
|
|
@property
|
2021-11-23 06:09:18 +00:00
|
|
|
def regions(self):
|
|
|
|
if self.cred_provider:
|
|
|
|
return self.cred_provider.get_regions(self.cloud)
|
|
|
|
|
|
|
|
# list of all subscribed regions
|
|
|
|
return {r['RegionName']: True for r in self.session().client('ec2').describe_regions()['Regions']}
|
|
|
|
|
2021-11-28 23:04:28 +00:00
|
|
|
@property
|
2021-11-23 06:09:18 +00:00
|
|
|
def default_region(self):
|
|
|
|
if self.cred_provider:
|
|
|
|
return self.cred_provider.get_default_region(self.cloud)
|
|
|
|
|
|
|
|
# rely on our env or ~/.aws config for the default
|
|
|
|
return None
|
|
|
|
|
|
|
|
def credentials(self, region=None):
|
|
|
|
if not self.cred_provider:
|
|
|
|
# use the cloud SDK's default credential discovery
|
|
|
|
return {}
|
|
|
|
|
|
|
|
creds = self.cred_provider.get_credentials(self.cloud, region)
|
|
|
|
# return dict suitable to use for session()
|
|
|
|
return {self.CRED_MAP[k]: v for k, v in creds.items() if k in self.CRED_MAP}
|
|
|
|
|
2021-11-28 23:04:28 +00:00
|
|
|
def _get_images_with_tags(self, project, image_key, tags={}, region=None):
|
2021-11-23 06:09:18 +00:00
|
|
|
ec2r = self.session(region).resource('ec2')
|
|
|
|
req = {'Owners': ['self'], 'Filters': []}
|
2021-11-28 23:04:28 +00:00
|
|
|
tags |= {
|
|
|
|
'project': project,
|
|
|
|
'image_key': image_key,
|
|
|
|
}
|
2021-11-23 06:09:18 +00:00
|
|
|
for k, v in tags.items():
|
|
|
|
req['Filters'].append({'Name': f"tag:{k}", 'Values': [str(v)]})
|
|
|
|
|
|
|
|
return sorted(
|
|
|
|
ec2r.images.filter(**req), key=lambda k: k.creation_date, reverse=True)
|
|
|
|
|
2021-11-28 23:04:28 +00:00
|
|
|
# necessary cloud-agnostic image info
|
2021-11-23 06:09:18 +00:00
|
|
|
def _image_info(self, i):
|
|
|
|
tags = Tags(from_list=i.tags)
|
2021-11-28 23:04:28 +00:00
|
|
|
return DictObj({k: tags.get(k, None) for k in self.IMAGE_INFO})
|
2021-11-23 06:09:18 +00:00
|
|
|
|
|
|
|
# get the latest imported image for a given build name
|
2021-11-28 23:04:28 +00:00
|
|
|
def latest_build_image(self, project, image_key):
|
|
|
|
images = self._get_images_with_tags(
|
|
|
|
project=project,
|
|
|
|
image_key=image_key,
|
|
|
|
)
|
2021-11-23 06:09:18 +00:00
|
|
|
if images:
|
|
|
|
# first one is the latest
|
|
|
|
return self._image_info(images[0])
|
|
|
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
# import an image
|
|
|
|
# NOTE: requires 'vmimport' role with read/write of <s3_bucket>.* and its objects
|
|
|
|
def import_image(self, ic):
|
|
|
|
log = logging.getLogger('import')
|
|
|
|
image_path = ic.local_path
|
2021-11-28 23:04:28 +00:00
|
|
|
image_aws = ic.local_dir / 'image.vhd'
|
|
|
|
name = ic.image_name
|
2021-11-23 06:09:18 +00:00
|
|
|
description = ic.image_description
|
|
|
|
|
|
|
|
# convert QCOW2 to VHD
|
|
|
|
log.info('Converting %s to VHD format', image_path)
|
|
|
|
p = Popen(self.CONVERT_CMD + (image_path, image_aws), stdout=PIPE, stdin=PIPE, encoding='utf8')
|
|
|
|
out, err = p.communicate()
|
|
|
|
if p.returncode:
|
|
|
|
log.error('Unable to convert %s to VHD format (%s)', image_path, p.returncode)
|
2021-11-28 23:04:28 +00:00
|
|
|
log.error('EXIT: %d', p.returncode)
|
2021-11-23 06:09:18 +00:00
|
|
|
log.error('STDOUT:\n%s', out)
|
|
|
|
log.error('STDERR:\n%s', err)
|
2021-11-28 23:04:28 +00:00
|
|
|
raise RuntimeError
|
2021-11-23 06:09:18 +00:00
|
|
|
|
2021-11-28 23:04:28 +00:00
|
|
|
session = self.session()
|
|
|
|
s3r = session.resource('s3')
|
|
|
|
ec2c = session.client('ec2')
|
|
|
|
ec2r = session.resource('ec2')
|
|
|
|
|
|
|
|
bucket_name = 'alpine-cloud-images.' + hashlib.sha1(os.urandom(40)).hexdigest()
|
|
|
|
s3_key = name + '.vhd'
|
2021-11-23 06:09:18 +00:00
|
|
|
|
|
|
|
bucket = s3r.Bucket(bucket_name)
|
|
|
|
log.info('Creating S3 bucket %s', bucket.name)
|
|
|
|
bucket.create(
|
|
|
|
CreateBucketConfiguration={'LocationConstraint': ec2c.meta.region_name}
|
|
|
|
)
|
|
|
|
bucket.wait_until_exists()
|
|
|
|
s3_url = f"s3://{bucket.name}/{s3_key}"
|
|
|
|
|
|
|
|
try:
|
|
|
|
log.info('Uploading %s to %s', image_aws, s3_url)
|
2021-11-28 23:04:28 +00:00
|
|
|
bucket.upload_file(str(image_aws), s3_key)
|
2021-11-23 06:09:18 +00:00
|
|
|
|
|
|
|
# import snapshot from S3
|
|
|
|
log.info('Importing EC2 snapshot from %s', s3_url)
|
2022-04-14 13:35:10 +00:00
|
|
|
_import_opts = {
|
|
|
|
'DiskContainer': {
|
2021-11-23 06:09:18 +00:00
|
|
|
'Description': description, # https://github.com/boto/boto3/issues/2286
|
|
|
|
'Format': 'VHD',
|
|
|
|
'Url': s3_url
|
|
|
|
}
|
2022-04-14 13:35:10 +00:00
|
|
|
}
|
|
|
|
# NOTE: TagSpecifications -- doesn't work with ResourceType: snapshot?
|
|
|
|
|
|
|
|
# For some reason the import_snapshot boto function cannot handle setting KmsKeyId to default / empty
|
|
|
|
# so we need to set it conditionally
|
|
|
|
if ic.encryption_key_id:
|
|
|
|
_import_opts['Encrypted'] = True
|
|
|
|
_import_opts['KmsKeyId'] = ic.encryption_key_id
|
|
|
|
|
|
|
|
ss_import = ec2c.import_snapshot(**_import_opts)
|
|
|
|
|
2021-11-23 06:09:18 +00:00
|
|
|
ss_task_id = ss_import['ImportTaskId']
|
|
|
|
while True:
|
|
|
|
ss_task = ec2c.describe_import_snapshot_tasks(
|
|
|
|
ImportTaskIds=[ss_task_id]
|
|
|
|
)
|
|
|
|
task_detail = ss_task['ImportSnapshotTasks'][0]['SnapshotTaskDetail']
|
|
|
|
if task_detail['Status'] not in ['pending', 'active', 'completed']:
|
|
|
|
msg = f"Bad EC2 snapshot import: {task_detail['Status']} - {task_detail['StatusMessage']}"
|
|
|
|
log.error(msg)
|
2021-11-28 23:04:28 +00:00
|
|
|
raise RuntimeError(msg)
|
2021-11-23 06:09:18 +00:00
|
|
|
|
|
|
|
if task_detail['Status'] == 'completed':
|
|
|
|
snapshot_id = task_detail['SnapshotId']
|
|
|
|
break
|
|
|
|
|
|
|
|
time.sleep(15)
|
|
|
|
except Exception:
|
|
|
|
log.error('Unable to import snapshot from S3:', exc_info=True)
|
|
|
|
raise
|
|
|
|
finally:
|
|
|
|
# always cleanup S3, even if there was an exception raised
|
|
|
|
log.info('Cleaning up %s', s3_url)
|
|
|
|
bucket.Object(s3_key).delete()
|
|
|
|
bucket.delete()
|
|
|
|
|
|
|
|
# tag snapshot
|
|
|
|
snapshot = ec2r.Snapshot(snapshot_id)
|
|
|
|
try:
|
|
|
|
log.info('Tagging EC2 snapshot %s', snapshot_id)
|
|
|
|
tags = ic.tags
|
|
|
|
tags.Name = tags.name # because AWS is special
|
|
|
|
snapshot.create_tags(Tags=tags.as_list())
|
|
|
|
except Exception:
|
|
|
|
log.error('Unable to tag snapshot:', exc_info=True)
|
|
|
|
log.info('Removing snapshot')
|
|
|
|
snapshot.delete()
|
|
|
|
raise
|
|
|
|
|
|
|
|
# register AMI
|
|
|
|
try:
|
|
|
|
log.info('Registering EC2 AMI from snapshot %s', snapshot_id)
|
|
|
|
img = ec2c.register_image(
|
|
|
|
Architecture=self.ARCH[ic.arch],
|
|
|
|
BlockDeviceMappings=[{
|
2021-12-28 21:11:16 +00:00
|
|
|
'DeviceName': '/dev/xvda',
|
2021-11-23 06:09:18 +00:00
|
|
|
'Ebs': {'SnapshotId': snapshot_id}
|
|
|
|
}],
|
|
|
|
Description=description,
|
|
|
|
EnaSupport=True,
|
|
|
|
Name=ic.image_name,
|
2021-12-28 21:11:16 +00:00
|
|
|
RootDeviceName='/dev/xvda',
|
2021-11-23 06:09:18 +00:00
|
|
|
SriovNetSupport='simple',
|
|
|
|
VirtualizationType='hvm',
|
|
|
|
BootMode=self.BOOT_MODE[ic.firmware],
|
|
|
|
)
|
|
|
|
except Exception:
|
|
|
|
log.error('Unable to register image:', exc_info=True)
|
|
|
|
log.info('Removing snapshot')
|
|
|
|
snapshot.delete()
|
|
|
|
raise
|
|
|
|
|
|
|
|
image_id = img['ImageId']
|
|
|
|
image = ec2r.Image(image_id)
|
|
|
|
|
|
|
|
try:
|
|
|
|
# tag image (adds imported tag)
|
|
|
|
log.info('Tagging EC2 AMI %s', image_id)
|
|
|
|
tags.imported = datetime.utcnow().isoformat()
|
2021-11-28 23:04:28 +00:00
|
|
|
tags.import_id = image_id
|
|
|
|
tags.import_region = ec2c.meta.region_name
|
2021-11-23 06:09:18 +00:00
|
|
|
image.create_tags(Tags=tags.as_list())
|
|
|
|
except Exception:
|
|
|
|
log.error('Unable to tag image:', exc_info=True)
|
|
|
|
log.info('Removing image and snapshot')
|
|
|
|
image.delete()
|
|
|
|
snapshot.delete()
|
|
|
|
raise
|
|
|
|
|
|
|
|
return self._image_info(image)
|
|
|
|
|
|
|
|
# remove an (unpublished) image
|
|
|
|
def remove_image(self, image_id):
|
|
|
|
log = logging.getLogger('build')
|
|
|
|
ec2r = self.session().resource('ec2')
|
|
|
|
image = ec2r.Image(image_id)
|
|
|
|
snapshot_id = image.block_device_mappings[0]['Ebs']['SnapshotId']
|
|
|
|
snapshot = ec2r.Snapshot(snapshot_id)
|
|
|
|
log.info('Deregistering %s', image_id)
|
|
|
|
image.deregister()
|
|
|
|
log.info('Deleting %s', snapshot_id)
|
|
|
|
snapshot.delete()
|
|
|
|
|
|
|
|
# publish an image
|
|
|
|
def publish_image(self, ic):
|
|
|
|
log = logging.getLogger('publish')
|
2021-11-28 23:04:28 +00:00
|
|
|
source_image = self.latest_build_image(
|
|
|
|
ic.project,
|
|
|
|
ic.image_key,
|
|
|
|
)
|
2021-11-23 06:09:18 +00:00
|
|
|
if not source_image:
|
2021-11-28 23:04:28 +00:00
|
|
|
log.error('No source image for %s', ic.image_key)
|
|
|
|
raise RuntimeError('Missing source imamge')
|
2021-11-23 06:09:18 +00:00
|
|
|
|
2021-11-28 23:04:28 +00:00
|
|
|
source_id = source_image.import_id
|
|
|
|
source_region = source_image.import_region
|
|
|
|
log.info('Publishing source: %s/%s', source_region, source_id)
|
2021-11-23 06:09:18 +00:00
|
|
|
source = self.session().resource('ec2').Image(source_id)
|
2021-11-30 16:11:32 +00:00
|
|
|
|
|
|
|
# we may be updating tags, get them from image config
|
|
|
|
tags = ic.tags
|
2021-11-23 06:09:18 +00:00
|
|
|
|
|
|
|
# sort out published image access permissions
|
|
|
|
perms = {'groups': [], 'users': []}
|
2021-11-28 23:04:28 +00:00
|
|
|
if ic.access.get('PUBLIC', None):
|
2021-11-23 06:09:18 +00:00
|
|
|
perms['groups'] = ['all']
|
|
|
|
else:
|
2021-11-28 23:04:28 +00:00
|
|
|
for k, v in ic.access.items():
|
2021-11-23 06:09:18 +00:00
|
|
|
if v:
|
|
|
|
log.debug('users: %s', k)
|
|
|
|
perms['users'].append(str(k))
|
|
|
|
|
|
|
|
log.debug('perms: %s', perms)
|
|
|
|
|
|
|
|
# resolve destination regions
|
2021-11-28 23:04:28 +00:00
|
|
|
regions = self.regions
|
|
|
|
if ic.regions.pop('ALL', None):
|
2021-11-23 06:09:18 +00:00
|
|
|
log.info('Publishing to ALL available regions')
|
|
|
|
else:
|
|
|
|
# clear ALL out of the way if it's still there
|
2021-11-28 23:04:28 +00:00
|
|
|
ic.regions.pop('ALL', None)
|
|
|
|
regions = {r: regions[r] for r in ic.regions}
|
2021-11-23 06:09:18 +00:00
|
|
|
|
|
|
|
publishing = {}
|
|
|
|
for r in regions.keys():
|
|
|
|
if not regions[r]:
|
|
|
|
log.warning('Skipping unsubscribed AWS region %s', r)
|
|
|
|
continue
|
|
|
|
|
|
|
|
images = self._get_images_with_tags(
|
|
|
|
region=r,
|
2021-11-28 23:04:28 +00:00
|
|
|
project=ic.project,
|
|
|
|
image_key=ic.image_key,
|
|
|
|
tags={'revision': ic.revision}
|
2021-11-23 06:09:18 +00:00
|
|
|
)
|
|
|
|
if images:
|
|
|
|
image = images[0]
|
|
|
|
log.info('%s: Already exists as %s', r, image.id)
|
|
|
|
else:
|
|
|
|
ec2c = self.session(r).client('ec2')
|
|
|
|
try:
|
|
|
|
res = ec2c.copy_image(
|
|
|
|
Description=source.description,
|
|
|
|
Name=source.name,
|
2021-11-28 23:04:28 +00:00
|
|
|
SourceImageId=source_id,
|
|
|
|
SourceRegion=source_region,
|
2022-04-14 13:35:10 +00:00
|
|
|
Encrypted=True if ic.encryption_key_id else False,
|
|
|
|
KmsKeyId=ic.encryption_key_id
|
2021-11-23 06:09:18 +00:00
|
|
|
)
|
|
|
|
except Exception:
|
|
|
|
log.warning('Skipping %s, unable to copy image:', r, exc_info=True)
|
|
|
|
continue
|
|
|
|
|
|
|
|
image_id = res['ImageId']
|
|
|
|
log.info('%s: Publishing to %s', r, image_id)
|
|
|
|
image = self.session(r).resource('ec2').Image(image_id)
|
|
|
|
|
|
|
|
publishing[r] = image
|
|
|
|
|
2021-11-28 23:04:28 +00:00
|
|
|
artifacts = {}
|
2021-11-23 06:09:18 +00:00
|
|
|
copy_wait = 180
|
2021-11-28 23:04:28 +00:00
|
|
|
while len(artifacts) < len(publishing):
|
2021-11-23 06:09:18 +00:00
|
|
|
for r, image in publishing.items():
|
2021-11-28 23:04:28 +00:00
|
|
|
if r not in artifacts:
|
2021-11-23 06:09:18 +00:00
|
|
|
image.reload()
|
|
|
|
if image.state == 'available':
|
|
|
|
# tag image
|
|
|
|
log.info('%s: Adding tags to %s', r, image.id)
|
2021-11-30 16:11:32 +00:00
|
|
|
image_tags = Tags(from_list=image.tags)
|
2021-11-23 06:09:18 +00:00
|
|
|
fresh = False
|
2021-11-30 16:11:32 +00:00
|
|
|
if 'published' not in image_tags:
|
2021-11-23 06:09:18 +00:00
|
|
|
fresh = True
|
|
|
|
|
|
|
|
if fresh:
|
|
|
|
tags.published = datetime.utcnow().isoformat()
|
|
|
|
|
2022-04-14 13:35:10 +00:00
|
|
|
tags.Name = tags.name # because AWS is special
|
2021-11-23 06:09:18 +00:00
|
|
|
image.create_tags(Tags=tags.as_list())
|
|
|
|
|
|
|
|
# tag image's snapshot, too
|
|
|
|
snapshot = self.session(r).resource('ec2').Snapshot(
|
|
|
|
image.block_device_mappings[0]['Ebs']['SnapshotId']
|
|
|
|
)
|
2021-11-30 16:11:32 +00:00
|
|
|
snapshot.create_tags(Tags=tags.as_list())
|
|
|
|
|
|
|
|
# update image description to match description in tags
|
|
|
|
log.info('%s: Updating description to %s', r, tags.description)
|
|
|
|
image.modify_attribute(
|
|
|
|
Description={'Value': tags.description},
|
|
|
|
)
|
2021-11-23 06:09:18 +00:00
|
|
|
|
|
|
|
# apply launch perms
|
2022-04-14 13:35:10 +00:00
|
|
|
if perms['groups'] or perms['users']:
|
|
|
|
log.info('%s: Applying launch perms to %s', r, image.id)
|
|
|
|
image.reset_attribute(Attribute='launchPermission')
|
|
|
|
image.modify_attribute(
|
|
|
|
Attribute='launchPermission',
|
|
|
|
OperationType='add',
|
|
|
|
UserGroups=perms['groups'],
|
|
|
|
UserIds=perms['users'],
|
|
|
|
)
|
2021-11-23 06:09:18 +00:00
|
|
|
|
|
|
|
# set up AMI deprecation
|
|
|
|
ec2c = image.meta.client
|
|
|
|
log.info('%s: Setting EOL deprecation time on %s', r, image.id)
|
2022-05-24 02:17:45 +00:00
|
|
|
try:
|
|
|
|
ec2c.enable_image_deprecation(
|
|
|
|
ImageId=image.id,
|
|
|
|
DeprecateAt=f"{tags.end_of_life}T23:59:59Z"
|
|
|
|
)
|
|
|
|
except Exception:
|
|
|
|
log.warning('Unable to set EOL Deprecation on %s image:', r, exc_info=True)
|
2021-11-23 06:09:18 +00:00
|
|
|
|
2021-11-28 23:04:28 +00:00
|
|
|
artifacts[r] = image.id
|
2021-11-23 06:09:18 +00:00
|
|
|
|
|
|
|
if image.state == 'failed':
|
|
|
|
log.error('%s: %s - %s - %s', r, image.id, image.state, image.state_reason)
|
2021-11-28 23:04:28 +00:00
|
|
|
artifacts[r] = None
|
2021-11-23 06:09:18 +00:00
|
|
|
|
2021-11-28 23:04:28 +00:00
|
|
|
remaining = len(publishing) - len(artifacts)
|
2021-11-23 06:09:18 +00:00
|
|
|
if remaining > 0:
|
|
|
|
log.info('Waiting %ds for %d images to complete', copy_wait, remaining)
|
|
|
|
time.sleep(copy_wait)
|
|
|
|
copy_wait = 30
|
|
|
|
|
2021-11-28 23:04:28 +00:00
|
|
|
return artifacts
|
2021-11-23 06:09:18 +00:00
|
|
|
|
|
|
|
|
|
|
|
def register(cloud, cred_provider=None):
|
|
|
|
return AWSCloudAdapter(cloud, cred_provider)
|